diff --git "a/5300.jsonl" "b/5300.jsonl" new file mode 100644--- /dev/null +++ "b/5300.jsonl" @@ -0,0 +1,644 @@ +{"seq_id":"363652960","text":"def factorial(n):\n \"\"\" Calcula el factorial de n\n\n va desde n hasta el 1\n ej: 4 * 3 * 2 * 1 = 24\n\n primer recorrido\n 4 *\n segundo\n 4 * 3\n tercero\n 4 * 3 * 2\n cuarto \n 4 * 3 * 2 * 1 = 24\n\n Al llamarse asi misma cuando saca el factorial se acumula hasta que le indiquemos cuando pare que en este punto es 1\n\n n int > 0\n return n!(numeros factoriables por el cual llega el numero ej: 1 * 2 * 3 * 4)\n \"\"\"\n print('Valor: ', n)\n print(f'valor multiplicado {n} * {n - 1}: ', n * (n - 1))\n if n == 1:\n return 1\n \n return n * factorial(n - 1)\n\nnumero = int(input('Escribe un entero: '))\n\nprint(factorial(numero))","sub_path":"recursividad/factorial.py","file_name":"factorial.py","file_ext":"py","file_size_in_byte":669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"286810370","text":"import os\nimport shutil\nimport tempfile\n\nimport pytest\nIS_TRAVIS_TESTING = \"TRAVIS\" in os.environ and os.environ[\"TRAVIS\"] == \"true\"\n\nfrom pressurecooker import utils\nfrom pressurecooker import youtube\n\ntrees = {}\nyt_resources = {}\n\ncc_playlist = 'https://www.youtube.com/playlist?list=PL7m903CwFUgntbjkVMwts89fZq0INCtVS'\nnon_cc_playlist = 'https://www.youtube.com/playlist?list=PLBO8M-O_dTPE51ymDUgilf8DclGAEg9_A'\nsubtitles_video = 'https://www.youtube.com/watch?v=6uXAbJQoZlE'\nsubtitles_zu_video = 'https://www.youtube.com/watch?v=FN12ty5ztAs'\n\n\ndef get_yt_resource(url):\n global yt_resources\n if not url in yt_resources:\n yt_resources[url] = youtube.YouTubeResource(url)\n\n return yt_resources[url]\n\n\ndef test_get_youtube_info():\n yt_resource = get_yt_resource(non_cc_playlist)\n tree = yt_resource.get_resource_info()\n assert tree['id']\n assert tree['kind']\n assert tree['title']\n assert len(tree['children']) == 4\n\n for video in tree['children']:\n assert video['id']\n assert video['kind']\n assert video['title']\n\n\ndef test_warnings_no_license():\n yt_resource = get_yt_resource(non_cc_playlist)\n issues, output_info = yt_resource.check_for_content_issues()\n\n assert len(issues) == 4\n for issue in issues:\n assert 'no_license_specified' in issue['warnings']\n\n\ndef test_cc_no_warnings():\n yt_resource = get_yt_resource(cc_playlist)\n issues, output_info = yt_resource.check_for_content_issues()\n\n # there is one video in this playlist that is not cc-licensed\n assert len(issues) == 1\n for issue in issues:\n assert 'no_license_specified' in issue['warnings']\n\n\n@pytest.mark.skipif(IS_TRAVIS_TESTING, reason=\"Skipping download tests on Travis.\")\ndef test_download_youtube_video():\n download_dir = tempfile.mkdtemp()\n\n try:\n yt_resource = get_yt_resource(subtitles_video)\n info = yt_resource.download(base_path=download_dir)\n assert info\n if info:\n assert 'filename' in info\n assert os.path.exists(info['filename']), 'Filename {} does not exist'.format(info['filename'])\n\n finally:\n shutil.rmtree(download_dir)\n\n\n@pytest.mark.skipif(IS_TRAVIS_TESTING, reason=\"Skipping download tests on Travis.\")\ndef test_download_youtube_playlist():\n download_dir = tempfile.mkdtemp()\n\n try:\n yt_resource = get_yt_resource(cc_playlist)\n info = yt_resource.download(base_path=download_dir)\n assert info is not None\n if info:\n assert not 'filename' in info\n assert 'children' in info\n for child in info['children']:\n assert 'filename' in child\n assert os.path.exists(child['filename']), 'Filename {} does not exist'.format(child['filename'])\n\n finally:\n shutil.rmtree(download_dir)\n\n\ndef test_get_subtitles():\n yt_resource = get_yt_resource(subtitles_video)\n info = yt_resource.get_resource_subtitles()\n assert len(info['subtitles']) == 2\n assert 'ru' in info['subtitles']\n assert 'en' in info['subtitles']\n\n\ndef test_non_youtube_url_error():\n url = 'https://vimeo.com/238190750'\n with pytest.raises(utils.VideoURLFormatError):\n youtube.YouTubeResource(url)\n\n\ndef test_subtitles_lang_helpers_compatible():\n \"\"\"\n Usage examples functions `is_youtube_subtitle_file_supported_language` and\n `_get_language_with_alpha2_fallback` that deal with language codes.\n \"\"\"\n yt_resource = get_yt_resource(subtitles_zu_video)\n info = yt_resource.get_resource_subtitles()\n all_subtitles = info['subtitles']\n\n # 1. filter out non-vtt subs\n vtt_subtitles = {}\n for youtube_language, subs in all_subtitles.items():\n vtt_subtitles[youtube_language] = [s for s in subs if s['ext'] == 'vtt']\n\n for youtube_language, sub_dict in vtt_subtitles.items():\n # 2. check compatibility with le-utils language codes (a.k.a. internal representation)\n verdict = youtube.is_youtube_subtitle_file_supported_language(youtube_language)\n assert verdict == True, 'Wrongly marked youtube_language as incompatible'\n # 3. TODO: figure out what to do for incompatible langs\n\n # 4. map youtube_language to le-utils language code (a.k.a. internal representation)\n language_obj = youtube.get_language_with_alpha2_fallback(youtube_language)\n assert language_obj is not None, 'Failed to find matchin language code in le-utils'\n if youtube_language == 'zu':\n assert language_obj.code == 'zul', 'Matched to wrong language code in le-utils'\n\n\ndef test_subtitles_lang_helpers_incompatible():\n \"\"\"\n Ensure `is_youtube_subtitle_file_supported_language` rejects unknown language codes.\n \"\"\"\n verdict1 = youtube.is_youtube_subtitle_file_supported_language('patapata')\n assert verdict1 == False, 'Failed to reject incompatible youtube_language'\n verdict2 = youtube.is_youtube_subtitle_file_supported_language('zzz')\n assert verdict2 == False, 'Failed to reject incompatible youtube_language'\n","sub_path":"tests/test_youtube.py","file_name":"test_youtube.py","file_ext":"py","file_size_in_byte":5042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"533650140","text":"def getLCS(X, Y): # X and Y should be the string.\n length1 = len(X)\n length2 = len(Y)\n LCS = [[0] * (length2+1) for _ in range(length1+1)]\n for i in range(1, length1+1):\n for j in range(1, length2+1):\n if X[i-1] == Y[j-1]:\n LCS[i][j] = LCS[i-1][j-1] + 1\n else:\n LCS[i][j] = max(LCS[i-1][j], LCS[i][j-1])\n return LCS\n #return LCS[length1][length2], length1, length2\n \n\ndef findLCS(LCS, X, len1, len2):\n result = []\n def recall(i, j): # i, j should be the LCS' index.\n if LCS[i][j] == 0:\n return result\n if LCS[i][j] == LCS[i-1][j]:\n recall(i-1, j) \n elif LCS[i][j] == LCS[i][j-1]:\n recall(i, j-1)\n elif LCS[i][j] != LCS[i-1][j] and LCS[i][j] != LCS[i][j-1]:\n result.append(X[i-1])\n recall(i-1, j-1)\n recall(len1, len2)\n result.reverse()\n return result","sub_path":"LCS.py","file_name":"LCS.py","file_ext":"py","file_size_in_byte":937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"498227986","text":"# Runtime: 40ms\n# Your runtime beats 6.74% of python submissions.\n# 本解效率低是因为按题目要求没有使用乘法、除法和模\n\nclass Solution(object):\n def divide(self, dividend, divisor):\n \"\"\"\n :type dividend: int\n :type divisor: int\n :rtype: int\n \"\"\"\n flag = 1\n if (dividend > 0 and divisor < 0) or (dividend < 0 and divisor > 0): flag = -1\n num1 = abs(dividend)\n num2 = abs(divisor)\n ans = 0\n while num1 >= num2:\n multi = 0\n while num1 >= num2 << multi + 1:\n multi += 1\n num1 -= num2 << multi\n ans += 1 << multi\n return max(-2147483648, min(2147483647, flag * ans))\n","sub_path":"21-30/29_divide_two_integers.py","file_name":"29_divide_two_integers.py","file_ext":"py","file_size_in_byte":648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"45607878","text":"import numpy as np \nimport matplotlib.pyplot as plt \n\nm = 1.0 # 重さ\nk = 1.0 # バネ定数\nc = 1.0 # ダンパ定数\nx = 1.0 # 初期位置\nv = 0 # 初期速度\ndt = 0.1 # 時間刻み\nrecord = []\ntime = np.arange(0, 10, dt)\nfor t in time:\n F = -k * x -c * v \n a = F / m\n v += a * dt\n x += v * dt \n record.append(x)\n\nplt.plot(time, record)\nplt.show()","sub_path":"term_5/interactive_system/1st/No2.py","file_name":"No2.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"334515520","text":"from PIL import Image, ImageDraw\nimport face_recognition\n\ngroup_photo = face_recognition.load_image_file('/home/amartya/Pictures/group1.jpg')\ngroup_photo_location = face_recognition.face_locations(group_photo)\n\npil_img = Image.fromarray(group_photo)\n\n\ncount = 0\nfor face_location in group_photo_location:\n count += 1\n top, right, bottom, left = face_location\n draw = ImageDraw.Draw(pil_img)\n draw.rectangle((left, top, right, bottom), outline=(0, 0, 0), width=4)\n\npil_img.show()\n","sub_path":"FRAmartya/mark_face.py","file_name":"mark_face.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"321677161","text":"#!/usr/bin/env python \n# encoding: utf-8 \n\n\"\"\" \n@version: v1.0 \n@author: BaoChengCai \n@contact: baochengcai@lanjingren.com\n@site: http://www.meipian.cn \n@software: PyCharm \n@file: serializers.py \n@time: 2018/12/18 12:25 PM \n\"\"\"\n\n###简洁模式\nfrom rest_framework import serializers\nfrom api.models import Snippet, LANGUAGE_CHOICES, STYLE_CHOICES, Book, UserInfo, DatabaseInfo, WorkSheet\nfrom django.contrib.auth.models import User\n\nclass SnippetSerializer(serializers.ModelSerializer):\n # owner = serializers.ReadOnlyField(source='owner.username')\n class Meta:\n model = Snippet\n fields = ('id', 'title', 'code', 'linenos', 'language', 'style')\n\nclass BookSerializer(serializers.ModelSerializer):\n # created = serializers.DateTimeField(format='%Y-%m-%d %H:%M')\n class Meta:\n model = Book\n fields = \"__all__\"\n\n# class UserInfoSerializer(serializers.ModelSerializer):\n# class Meta:\n# model = User\n# fields = \"__all__\"\n\nclass DatabaseInfoSerializer(serializers.ModelSerializer):\n class Meta:\n model = DatabaseInfo\n fields = ('id', 'db_host', 'db_name', 'db_port', 'author', 'online', 'connect_content')\n\nclass WorkSheetSerializer(serializers.ModelSerializer):\n class Meta:\n model = WorkSheet\n fields = \"__all__\"\n\nif __name__ == \"__main__\":\n pass ","sub_path":"drf_api/api/instance/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"446737321","text":"import pandas as pd\nscore_df = pd.DataFrame([[1,56,66,70], \n [2,90,45,34],\n [3,45,32,55],\n [4,70,77,89],\n [5,56,80,70],\n [6,60,54,55],\n [7,45,70,79],\n [8,34,77,76],\n [9,25,87,60],\n [10,88,40,43]],columns=['student_id','math_score','english_score','chinese_score'])\nscore_df = score_df.set_index('student_id')\nprint(score_df)\n\nscore_mean=score_df.mean(axis=1)\nprint (score_mean)\n\nprint (score_mean[6])\n\nprint (score_mean.median())\n\nscore_apply=score_df.apply(lambda x: x**(0.5)*10)\nprint (score_apply)\n\nprint (score_apply[6])\n\n\nprint(score_apply.mean())\n","sub_path":"homework/D13 pandas.py","file_name":"D13 pandas.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"569062184","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n'''\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\nCopyright: Deutsches Zentrum fuer Luft- und Raumfahrt e.V., 2015 (c)\nContact: daniel.boehnke@dlr.de and jonas.jepsen@dlr.de\n'''\n\nfrom VAMPzero.Handler.Parameter import parameter\n\nclass flightCycles(parameter):\n '''\n Yearly Flight Cycles\n \n :Unit: [#/year]\n\n :Source: TU Berlin - Simplified DOC model, J. Thorbeck (remarks by D. Scholz)\n http://www.fzt.haw-hamburg.de/pers/Scholz/Aero/TU-Berlin_DOC-Method_with_remarks_13-09-19.pdf \n '''\n \n def __init__(self, value=0., unit='#/year', parent='', cpacsPath=''):\n super(flightCycles, self).__init__(value=value, unit=unit, doc=self.__doc__, status='init', parent=parent,\n cpacsPath=cpacsPath)\n \n def calc(self):\n '''\n Calculation method for number of flight cycles per year\n\n :Source: TU Berlin - Simplified DOC model, J. Thorbeck (remarks by D. Scholz)\n http://www.fzt.haw-hamburg.de/pers/Scholz/Aero/TU-Berlin_DOC-Method_with_remarks_13-09-19.pdf \n ''' \n \n tFlight = self.parent.tFlight.getValue()\n \n # Values given in source\n \n POT_pa = 8760.0 # Potential yearly operation time [hr]\n DT_pa = 2748.8 # Yearly forced downtime [hr] -> see source for breakdown\n blockTime = 1.83 # Block time supplement per flight, stat. avg. [hr]\n \n # Calculations\n \n flightCycles = (POT_pa - DT_pa) / (tFlight + blockTime)\n \n return self.setValueCalc(flightCycles)\n \n ###################################################################################################\n #EOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFE#\n ###################################################################################################","sub_path":"src/VAMPzero/Component/Main/DOC/flightCycles.py","file_name":"flightCycles.py","file_ext":"py","file_size_in_byte":2677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"117623313","text":"# Create your views here.\nfrom django.http import HttpResponse\nfrom django.template import Context, loader\n\ndef index(request):\n template = loader.get_template(\"myapp/index.html\")\n context = Context({\n 'name': \"Testypie\",\n })\n return HttpResponse(template.render(context))\n","sub_path":"myapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"532303156","text":"import pytest\nimport requests\nfrom hamcrest import assert_that\nfrom mbtest.imposters import Imposter, Predicate, Response, Stub\nfrom mbtest.matchers import had_request\n\nfrom caia.items.items_job_config import ItemsJobConfig\nfrom caia.items.steps.query_source_url import QuerySourceUrl\n\n\ndef test_valid_response_from_server(mock_server):\n with open(\"tests/resources/items/valid_src_response.json\") as file:\n valid_src_response = file.read()\n\n last_timestamp = \"20200601\"\n current_timestamp = \"20200603\"\n\n # Set up mock server with required behavior\n imposter = Imposter(\n Stub(\n Predicate(path=\"/items\",\n query={\"starttime\": last_timestamp, \"endtime\": current_timestamp},\n operator=\"deepEquals\"),\n Response(body=valid_src_response)\n )\n )\n\n with mock_server(imposter) as server:\n config = {\n 'source_url': f\"{imposter.url}/items\",\n 'storage_dir': '/tmp',\n 'last_success_lookup': 'tests/storage/items/items_last_success.txt'\n }\n job_config = ItemsJobConfig(config, 'test')\n\n query_source_url = QuerySourceUrl(job_config, last_timestamp, current_timestamp, None)\n\n step_result = query_source_url.execute()\n\n assert step_result.was_successful() is True\n assert_that(server, had_request().with_path(\"/items\").and_method(\"GET\"))\n assert valid_src_response == step_result.get_result()\n\n\ndef test_404_response_from_server(mock_server):\n # Set up mock server with required behavior\n imposter = Imposter(Stub(Predicate(path=\"/items\"),\n Response(status_code=404)))\n\n with mock_server(imposter) as server:\n config = {\n 'source_url': f\"{imposter.url}/items\",\n 'storage_dir': '/tmp',\n 'last_success_lookup': 'tests/storage/items/items_last_success.txt'\n }\n job_config = ItemsJobConfig(config, 'test')\n\n last_timestamp = \"20200601\"\n current_timestamp = \"20200603\"\n\n query_source_url = QuerySourceUrl(job_config, last_timestamp, current_timestamp, None)\n\n step_result = query_source_url.execute()\n\n assert step_result.was_successful() is False\n assert_that(server, had_request().with_path(\"/items\").and_method(\"GET\"))\n assert f\"Retrieval of '{imposter.url}/items' failed with a status code of 404\" in \\\n step_result.get_errors()\n\n\ndef test_server_does_not_exist():\n config = {\n 'source_url': \"http://localhost:12345/URL_DOES_NOT_EXIST\",\n 'storage_dir': '/tmp',\n 'last_success_lookup': 'tests/storage/items/items_last_success.txt'\n }\n job_config = ItemsJobConfig(config, 'test')\n\n last_timestamp = \"20200601\"\n current_timestamp = \"20200603\"\n\n query_source_url = QuerySourceUrl(job_config, last_timestamp, current_timestamp, None)\n\n with pytest.raises(requests.exceptions.ConnectionError):\n query_source_url.execute()\n","sub_path":"tests/items/steps/query_source_url_test.py","file_name":"query_source_url_test.py","file_ext":"py","file_size_in_byte":3075,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"459623546","text":"import collections\nimport pydot_ng as pydot\nfrom fsmPygen import *\nfrom fsmClasses import *\n\ngraph = pydot.graph_from_dot_file('data/graph2.gv')\ndot_edges = graph.get_edges()\n\n\ndef parse_data(edges_p):\n states = {}\n edges = []\n\n for e in edges_p:\n s, d, l = e.get_source(), e.get_destination(), e.get_label()\n\n start = states[s] = states.get(s, State(s))\n end = states[d] = states.get(d, State(d))\n edge = Edge(start, end, l)\n\n start.add_edge(edge)\n edges.append(edge)\n\n return states.values(), edges\n\n\ndef generate_state_table(states, edges):\n d = collections.defaultdict(dict)\n en = set([e.label for e in edges])\n\n for s in states:\n for e in en:\n d[s][e] = s\n\n for e in edges:\n d[e.start][e.label] = e.end\n\n return d\n\n\ndef create_file(states, events, table):\n cfile = create_events(events) + \"\\n\"\n cfile += create_states(states) + \"\\n\"\n cfile += create_fsm_table(table) + \"\\n\"\n\n cfile += 'unsigned long currentMillis, previousMillis = 0, interval;\\n\\n'\n\n cfile += create_poll() + \"\\n\"\n\n cfile += create_actions(states) + \"\\n\"\n cfile += create_eval_state(states) + \"\\n\"\n cfile += create_setup(states[0]) + \"\\n\"\n cfile += create_loop() + \"\\n\"\n\n return cfile\n\n\ndef main():\n s, e = parse_data(dot_edges)\n\n t = generate_state_table(s, e)\n\n with open('test.c', 'w') as f:\n f.write(create_file(s, e, t))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"296407693","text":"# Compile the model\r\nmodel.compile(loss='categorical_crossentropy', metrics=['accuracy'], optimizer='adam')\r\n\r\n# Display model architecture summary\r\nmodel.summary()\r\n\r\n# Calculate pre-training accuracy\r\nscore = model.evaluate(x_test, y_test, verbose=1)\r\naccuracy = 100*score[1]\r\n\r\nprint(\"Pre-training accuracy: %.4f%%\" % accuracy) \r\n","sub_path":"compile.py","file_name":"compile.py","file_ext":"py","file_size_in_byte":333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"333379727","text":"\"\"\"\nThis is a program to recognize lane from video.\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nimport os\nimport cv2\nimport numpy as np\nfrom find_lane import FindLane\nfrom time import sleep\nimport argparse\n\n__author__ = 'ryutaShitomi'\n__version__ = '1.0'\n__date__ = '2018/10'\n\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('-p',\"--path\",\n default=os.path.join(os.pardir, 'test_videos', 'harder_challenge_video.mp4'),\n help=\"video path\")\nparser.add_argument('-o', '--output',\n default=os.path.join(os.pardir, 'test_videos_output', 'output.mp4'),\n help='output path')\nargs = parser.parse_args()\n\nym_per_pix = 30/720\nxm_per_pix = 3.7/700\n# Create the class pipeline.FindLane.\nfind_lane = FindLane(ym_per_pix=ym_per_pix, xm_per_pix=xm_per_pix)\nvideo_path = args.path\ncap = cv2.VideoCapture(video_path)\n# get the image width, height and fps.\nwidth = cap.get(cv2.CAP_PROP_FRAME_WIDTH)\nheight = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)\nfps = cap.get(cv2.CAP_PROP_FPS)\n# Specify extension of video to save.\nfourcc = cv2.VideoWriter_fourcc(*'MP4V')\nprint(fps, width, height)\n# output_path = '../test_videos_output/analyze_' + os.path.basename(video_path)\noutput_path = args.output\nout = cv2.VideoWriter(output_path, fourcc, int(fps), (int(width), int(height)))\nend_flag, frame = cap.read()\nESC_KEY = 27\ncount = 0\n\nsrc = np.float32([[685, 450], [1120, 720], [190, 720], [595, 450]])\ndst = np.float32([[900, 0], [900, 720], [320, 720], [320, 0]])\nvertices = np.array([[(300,height) ,(460, 450), (750, 450), (1000, height)]], dtype=np.int32)\nvertices = np.array([[(0,height) ,(0, 0), (1200, 0), (1200, height)]], dtype=np.int32)\n## for challenge video ##\n# src = np.float32([[730, 480], [1050, 730], [265, 720], [630, 480]])\n# dst = np.float32([[900, 100], [900, 720], [320, 720], [320, 100]])\n# rows = height\n# vertices = np.array([[(200,rows) ,(650, 430), (750, 430), (1130, rows)]], dtype=np.int32)\n##########################\n\n## for harder_challenge ##\nsrc = np.float32([[730, 500], [1030, 720], [190, 720], [525, 500]])\ndst = np.float32([[900, 0], [900, 720], [320, 720], [320, 0]])\nvertices = np.array([[(300,height) ,(460, 450), (750, 450), (1000, height)]], dtype=np.int32)\nvertices = np.array([[(0,height) ,(0, 0), (1200, 0), (1200, height)]], dtype=np.int32)\n##########################\n\n\n\nwarped_size = (int(width), int(height))\nfind_lane.createColorGrad('BGR')\nfind_lane.createWarp(src, dst, warped_size, vertices)\n\n\nwhile end_flag == True:\n out_img = find_lane.pipeline(frame, True)\n ## If you apply sliding window each frame, restore below comment ##\n # find_lane.left_fit = None\n\n cv2.imshow('A', out_img)\n out.write(out_img)\n key = cv2.waitKey(1)\n if key == ESC_KEY:\n break\n # sleep(0.1)\n end_flag, frame = cap.read()\n\ncap.release()\nout.release()\ncv2.destroyAllWindows()\n","sub_path":"py_code/video_main.py","file_name":"video_main.py","file_ext":"py","file_size_in_byte":2916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"223576747","text":"import os\nimport PIL.Image as Image\nimport numpy as np\n\n## Function resize\n## resize img, the largest dimension is maxSize\ndef resizeImg(I, minNet, strideNet, maxSize) :\n\n w, h = I.size\n \n wratio, hratio = w / maxSize, h / maxSize\n resizeRatio = max(wratio, hratio)\n \n w, h= w / resizeRatio, h / resizeRatio\n \n resizeW = round((w - minNet) / strideNet) * strideNet + minNet\n resizeH = round((h - minNet) / strideNet) * strideNet + minNet\n \n \n return I.resize((resizeW-1, resizeH-1))\n \n \ndef getIndexMatchGT(df, imgDir, minNet, strideNet, maxSize, index) : \n imgA = df['source_image'][ index ]\n imgB = df['target_image'][ index ]\n\n IA = Image.open(os.path.join(imgDir, imgA)).convert('RGB')\n wA, hA = IA.size\n IB = Image.open(os.path.join(imgDir, imgB)).convert('RGB')\n wB, hB = IB.size\n\n ## take coordinate and normalize between 0 and 1\n xA = np.array(list(map(float, df['XA'][ index ].split(';')))) \n yA = np.array(list(map(float, df['YA'][ index ].split(';')))) \n xB = np.array(list(map(float, df['XB'][ index ].split(';')))) \n yB = np.array(list(map(float, df['YB'][ index ].split(';')))) \n \n # compute PCK reference length refPCK (equal to max bounding box side in image_A)\n refPCK = max(xA.max() - xA.min(), yA.max() - yA.min())\n \n xA, yA, xB, yB = xA / wA, yA / hA, xB / wB, yB / hB\n\n IA = resizeImg (IA, minNet, strideNet, maxSize)\n IB = resizeImg (IB, minNet, strideNet, maxSize)\n return IA, IB, xA, yA, xB, yB, refPCK, wA, hA, wB, hB\n \n","sub_path":"evalPascalPckDataloader.py","file_name":"evalPascalPckDataloader.py","file_ext":"py","file_size_in_byte":1548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"267716142","text":"# encoding=utf8\r\nfrom urllib import request,parse\r\nimport re\r\nimport random\r\n# 解码取得url\r\n\r\nwd= {\"kw\":\"欧美街拍吧\"}\r\nwdd = parse.urlencode(wd)\r\nurl = \"http://tieba.baidu.com/f?ie=utf-8&\"\r\nurl = url+wdd\r\nprint(url)\r\n\r\n# 取得user-agent\r\nagent1 = \"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36\"\r\nagent2 = \"Mozilla/5.0 (iPad; CPU OS 5_0 like Mac OS X) AppleWebKit/534.46 (KHTML, like Gecko) Version/5.1 Mobile/9A334 Safari/7534.48.3\"\r\nagent3 = \"Mozilla/5.0 (Windows NT 6.2; WOW64; rv:21.0) Gecko/20100101 Firefox/21.0\"\r\nagent4 = \"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727\"\r\nlist1 = [agent1,agent2,agent3,agent4]\r\nagent = random.choice(list1)\r\nheader = {\"User-agent\":agent}\r\n# 发送请求\r\nreq = request.Request(url,headers=header)\r\nreponse = request.urlopen(req).read().decode()\r\nrept = \"bpic=(.*?)\"\r\n# print(reponse)\r\ndata = re.findall(rept,reponse)\r\n# print(data)\r\n\r\nfor i in data:\r\n print(i)","sub_path":"python网络爬虫/个人做的爬虫练习/另外爬取贴吧.py","file_name":"另外爬取贴吧.py","file_ext":"py","file_size_in_byte":1028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"559051064","text":"import pytest\nfrom mock import MagicMock, patch\n\nfrom infrastructure_testing.kms import KMS\n\n\nclass TestMain:\n \"\"\" Test Suite for KMS Utility class \"\"\"\n\n def setup_method(self):\n self.mock_client = MagicMock()\n self.mock_kms = KMS(self.mock_client)\n\n def test_get_keys_returns_keys(self):\n \"\"\" Test Success: Keys returned \"\"\"\n fake_key_details = {'KeyId': 'test-key',\n 'KeyArn': 'test-key-arn'}\n\n fake_key = {'Keys': [fake_key_details]}\n fake_response = [fake_key]\n self.mock_client.get_paginator.return_value.paginate.return_value = \\\n fake_response\n\n sut = self.mock_kms.get_keys()\n\n assert len(sut) == 1\n assert sut == [fake_key_details]\n\n def test_get_keys_raises_excpetion(self):\n \"\"\" Test Failure: Exception is raised if pagination fails \"\"\"\n fake_response = '{\"HttpResponseCode\": 400}'\n self.mock_client.get_paginator.return_value.paginate.return_value = \\\n fake_response\n with pytest.raises(TypeError) as exc_info:\n self.mock_kms.get_keys()\n\n assert 'TypeError' in str(exc_info)\n\n def test_get_key_aliases_raises_correct_exception(self):\n \"\"\" Test Failure: TypeError raised when param is not a list \"\"\"\n fake_key_id = 'test-key'\n err_message = 'param (key_id) type must be a list'\n with pytest.raises(TypeError) as exc_info:\n self.mock_kms.get_key_aliases(fake_key_id)\n\n assert type(exc_info.value) != type(err_message)\n assert str(exc_info.value) == err_message\n\n\nif __name__ == '__main__':\n pytest.main()\n","sub_path":"tests/infrastructure_testing/test_kms.py","file_name":"test_kms.py","file_ext":"py","file_size_in_byte":1651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"177821048","text":"modulus = 1000000007\nfact_range = 2000\nfact = [1]\nfact_inverse = [1]\n\ndef bezout(a, b):\n assert((a, b) != (0, 0))\n if a == 0:\n return (0, 1)\n if b == 0:\n return (1, 0)\n if a <= b:\n c = b % a\n x, y = bezout(a, c)\n return (x -(b/a) * y, y)\n y, x = bezout(b, a)\n return (x, y)\n\ndef inverse(x):\n y, _ = bezout(x, modulus)\n return y % modulus\n\nfor i in range(fact_range):\n fact.append(fact[i] * (i + 1) % modulus)\n fact_inverse.append(fact_inverse[i] * inverse(i + 1) % modulus)\n\ndef mod_exp(b, e):\n p = 1\n for i in range(e):\n p = p * b % modulus\n return p\n\ndef choose(a, b):\n if b > a:\n return 0\n return fact[a] * fact_inverse[b] * fact_inverse[a - b] % modulus\n\n","sub_path":"arithmetic.py","file_name":"arithmetic.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"625848440","text":"import os\nimport re\nimport random\nimport sys\n\ncontent = os.listdir()\nfiles = []\noptions = []\n#Here, we make a list of the files with the extensions .txt\ndef append_choices():\n i = 1\n for file in content:\n name, ext = os.path.splitext(file)\n if(ext.lower() == \".txt\"):\n files.append(file)\n options.append(i)\n i+=1\n\ndef print_choices():\n count = 1\n for file in files:\n print(count, \" \", file)\n count+=1\n\n\n#Store the file contents into a dictionary\ndef create_dict(file_name):\n d = {}\n with open(file_name) as f:\n for line in f:\n key = re.split(\"[-:]\",line)[0]\n translations = (re.split(\"[-:]\",line)[1])\n val = [x.strip() for x in translations.split(',')]\n d[(key)] = val\n\n return d\n\n\nappend_choices()\nwhile(True):\n while(True):\n print(\"Vocabulary Program\")\n print(\"Choose a file with the proper number of press Q to quit\")\n print_choices()\n print(\"Q. Quit Program\")\n\n choice = input()\n if(choice.upper() == \"Q\"):\n sys.exit()\n\n if (not choice.isdigit()) or (int(choice) not in options):\n print(\"You must choose one of the valie options Q,\")\n for l in options:\n print(l)\n else:\n break\n\n #QUIZ STARTS HERE IF THE CHOICE IS RIGHT\n if(int(choice) >0 and int(choice) <= len(files) and files[int(choice) - 1] in files):\n txt_file = files[int(choice)-1]\n dictionary = create_dict(txt_file)\n\n while(True):\n num_of_questions = input(\"How many words in your quiz? ==> \")\n if num_of_questions.isdigit():\n if int(num_of_questions) > 0 and int(num_of_questions) <= 10:\n num_of_questions = int(num_of_questions)\n break\n else:\n print(\"Number should be greater than zero and less than or equal to 10\")\n continue\n else:\n print(\"password must be a integer\")\n continue\n\n\n points = 0; #Keeps track of correct answers #\n questions_list = []\n for x in range(1,num_of_questions+1):\n english, spanish = random.choice(list(dictionary.items()))\n\n #This makes sure that the generated question is random and unique in question set\n while(english in questions_list):\n english, spanish = random.choice(list(dictionary.items()))\n\n questions_list.append(english);\n print(\"#\",x,\" Enter a valid spanish phrase for \", english)\n answer = input()\n if(answer in spanish):\n points+=1\n if(len(spanish) > 1):\n print(\"Correct. You could also have chosen \")\n for item in spanish:\n if(item != answer):\n print(item)\n else:\n print (\"Correct!. Good Work.\")\n else:\n print(\"Incorrect. Valid choice(s) were \")\n for item in spanish:\n print(item,\" \")\n\n grade = (points/num_of_questions) * 100.00\n print(\"You got \",points,\" out of \", num_of_questions,\", which is \", grade, \"%\" )\n else:\n print(\"Wrong choice\")\n","sub_path":"Quiz Program/quiz.py","file_name":"quiz.py","file_ext":"py","file_size_in_byte":3361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"41162313","text":"from django.db import models\nfrom django.utils.timezone import datetime\nfrom django.utils.translation import ugettext_lazy as _\nfrom apps.call_records_app import utils\nfrom apps.call_records_app.models.choices import BOOLEAN_CHOICES, CALL_TYPE\n\n\nclass CallRecord(models.Model):\n \"\"\"\n Call record model\n \"\"\"\n\n timestamp = models.DateTimeField(\n verbose_name=_('call timestamp'),\n null=True, blank=True,\n help_text=_('The timestamp of when the event occurred')\n )\n\n call_type = models.PositiveSmallIntegerField(\n verbose_name=_('call type'), choices=CALL_TYPE,\n help_text=_('Indicate if it is a call start or end record.'),\n )\n\n call_id = models.PositiveIntegerField(\n verbose_name=_('call ID'),\n help_text=_('Unique ID for each call record pair.'),\n )\n\n source = models.CharField(\n max_length=20,\n verbose_name=_('origin phone number'),\n null=True, blank=True,\n validators=[utils.phone_number_validator],\n help_text=_('The subscriber phone number that originated the call.')\n )\n\n destination = models.CharField(\n max_length=20,\n verbose_name=_('destination phone number'),\n blank=True, null=True,\n help_text=_('The phone number receiving the call.')\n )\n\n compromised = models.BooleanField(\n default=False,\n verbose_name=_('compromised'),\n choices=BOOLEAN_CHOICES\n )\n\n created_at = models.DateTimeField(\n auto_now_add=True,\n verbose_name=_('created at')\n )\n\n reference_month = models.PositiveIntegerField(\n null=True, blank=True, verbose_name=_('reference month')\n )\n reference_year = models.PositiveIntegerField(\n null=True, blank=True, verbose_name=_('reference year')\n )\n\n processed_call = models.BooleanField(default=False, verbose_name=_('processed call'))\n\n def __str__(self):\n return _(\n 'call_id: %(call_id)s, type: %(call_type)s, timestamp: %(timestamp)s.') % {\n 'call_id': self.call_id or '',\n 'call_type': self.call_type or '',\n 'timestamp': self.timestamp or ''}\n\n def save(self, *args, **kwargs):\n if self.call_type == 1:\n if self.destination in ['', ' ', None]:\n self.compromised = True\n\n if self.call_type == 2:\n if self.timestamp:\n self.reference_month = self.timestamp.month\n self.reference_year = self.timestamp.year\n else:\n now = datetime.now()\n self.reference_month = now.month\n self.reference_year = now.year\n\n if not self.timestamp:\n self.compromised = True\n\n super().save(*args, **kwargs)\n\n class Meta:\n app_label = 'call_records_app'\n ordering = ['timestamp', ]\n verbose_name_plural = _('call records')\n verbose_name = _('call record')\n","sub_path":"olist/apps/call_records_app/models/CallRecord.py","file_name":"CallRecord.py","file_ext":"py","file_size_in_byte":2956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"252865826","text":"import json\nfrom pprint import pprint\nimport sys\n\nprint(\"This is the name of the script: \", sys.argv[0])\nprint(\"Number of arguments: \", len(sys.argv))\nprint(\"The arguments are: \", str(sys.argv))\n\nsvcfile = \"\"\nregfile = \"\"\n\n\nfor arg in sys.argv:\n if arg.startswith(\"--svc=\"):\n svcfile = arg.replace(\"--svc=\", \"\")\n elif arg.startswith(\"--reg=\"):\n regfile = arg.replace(\"--reg=\", \"\")\n elif arg == sys.argv[0]:\n continue\n else:\n print(\"invalid argument \" + arg)\n\n\nprint(regfile)\nprint(svcfile)\n\n\nwith open(regfile) as f:\n regdata = json.load(f)\n\nwith open(svcfile) as f:\n svcdata = json.load(f)\n\nservicefield = 4\n\nsvcservices = []\nfor service in svcdata[\"rows\"]:\n svcservices.append(service[servicefield])\n\nregservices = []\nfor service in regdata[\"rows\"]:\n regservices.append(service[servicefield])\n\nmissingregservices = []\nfor regservice in regservices:\n found = False\n for svcservice in svcservices:\n if(regservice == svcservice):\n found = True\n break\n if not found:\n missingregservices.append(regservice)\n\nmissingsvcservices = []\nfor svcservice in svcservices:\n found = False\n for regservice in regservices:\n if(regservice == svcservice):\n found = True\n break\n if not found:\n missingsvcservices.append(svcservice)\n\npprint(missingregservices)\npprint(missingsvcservices)\n","sub_path":"missingservices.py","file_name":"missingservices.py","file_ext":"py","file_size_in_byte":1410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"596237018","text":"# coding=utf-8\n\"\"\"\nCe module comporte les méthodes principales pour jouer.\nC'est dans ce module que nous allons procéder au lancement du jeu\nen utilisant notre api.py.\n\"\"\"\nimport argparse\nfrom api import débuter_partie, jouer_coup\nfrom quoridor import Quoridor, QuoridorError\nfrom quoridorx import QuoridorX\n\n\ndef analyser_commande():\n \"\"\"\n Cette méthode utilise le module argparse (ArgumentParser) pour\n récupérer l'idul du joueur depuis la ligne de commande.\n\n :return: un Namespace contenant les attributs automatique, graphique et idul.\n \"\"\"\n parser = argparse.ArgumentParser(description=\"Jeu Quoridor - phase 3\")\n parser.add_argument(\"idul\", help=\"IDUL du joueur.\")\n parser.add_argument(\"-a\", \"--automatique\", action=\"store_true\", dest=\"automatique\",\n help=\"Activer le mode automatique.\")\n parser.add_argument(\"-x\", \"--graphique\", action=\"store_true\", dest=\"graphique\",\n help=\"Activer le mode graphique.\")\n return parser.parse_args()\n\n\ndef jouer_quoridor():\n \"\"\"\n Cette méthode est la première à être appelé et elle analyse la commande entré pour\n nous rediriger vers le bon mode de jeu.\n \"\"\"\n args = analyser_commande()\n automatique = args.automatique\n graphique = args.graphique\n idul = args.idul\n\n id_partie, etat_partie = débuter_partie(idul)\n\n try:\n if graphique:\n partie = QuoridorX(etat_partie[\"joueurs\"], etat_partie[\"murs\"])\n partie.afficher()\n else:\n partie = Quoridor(etat_partie[\"joueurs\"], etat_partie[\"murs\"])\n print(partie, \"\\n\")\n except QuoridorError as quoridor_error:\n print(quoridor_error)\n\n en_jeu = True\n\n while en_jeu:\n if automatique:\n try:\n type_coup, coup_x, coup_y = partie.jouer_coup(1)\n except QuoridorError as quoridor_error:\n print(quoridor_error, \"\\n\")\n else:\n if graphique:\n partie.root.wait_variable(partie.ok_var)\n if partie.ok_var != 0:\n coup = partie.obtenir_coup()\n if len(coup) == 3:\n type_coup, coup_x, coup_y = coup[0], coup[1], coup[2]\n else:\n type_coup = input(\"Veuillez entrer le type de coup que vous voulez jouer :\\n\"\n \"D pour déplacer le jeton,\\n\"\n \"MH pour placer un mur horizontal,\\n\"\n \"MV pour placer un mur vertical.\\n\")\n\n coup_x = input(\"Veuillez entrer la position de votre coup en x correspondant à \"\n \"la colonne :\\n\")\n\n coup_y = input(\"Veuillez entrer la position de votre coup en y correspondant à \"\n \"la ligne :\\n\")\n\n try:\n etat_partie = jouer_coup(id_partie, type_coup, (coup_x, coup_y))\n except RuntimeError as runtime_error:\n print(runtime_error, \"\\n\")\n except StopIteration as stop_iteration:\n gagnant = stop_iteration.args[0]\n print(gagnant, \"a gagné!\")\n en_jeu = False\n else:\n partie.update_etat_partie(etat_partie[\"joueurs\"], etat_partie[\"murs\"])\n print(partie.état_partie(), \"\\n\")\n if graphique:\n partie.afficher()\n else:\n print(partie, \"\\n\")\n\n\nif __name__ == '__main__':\n jouer_quoridor()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"633399625","text":"'''\n@author: Dean D'souza\n'''\n# Tokenizer was created in order to handle improper Tokenization of data through the default word_tokenize() function of nltk\n# These issues occurred due to the nature of the typed data\n\n# Importing Required Libraries\nimport re\n\n# Main function to be used for Tokenization (in the context of the data)\ndef pToken(pSent):\n \n # Splitting based on space character\n tok = pSent.split(' ')\n \n # Declaring an empty holder for the tokens\n temp = []\n \n # Iteratively inspecting and properly Tokenizing special conditions\n for t in tok:\n # Proper Tokenization of '&' as the it was translated to '&' while loading\n if re.search('^[A-Za-z0-9\\'\\-]*(&[A-Za-z0-9\\'\\-]*)+$',t):\n wlist = re.findall('[A-Za-z0-9\\'\\-]+[\\!@\\*\\?]*',t)\n dlist = re.findall('&',t)\n i=0\n j=0\n while i2:\n temp.append(re.findall('[A-Za-z0-9\\'\\-]*[\\!@\\*\\?]*',t)[1])\n temp.append('\"')\n elif re.search('^"[A-Za-z0-9\\'\\-]*$',t):\n tlist = re.findall('[A-Za-z0-9\\'\\-]+',t)\n if len(tlist)>1:\n temp. append('\"')\n temp.append(tlist[1])\n else:\n temp.append('\"') \n elif re.search('^[A-Za-z0-9\\'\\-]*[\\!@\\*\\?]*"$',t):\n if re.search('[A-Za-z0-9\\'\\-]+[\\!@\\*\\?]*',t):\n temp.append(re.findall('[A-Za-z0-9\\'\\-]+[\\!@\\*\\?]*',t)[0])\n temp. append('\"')\n # Proper Tokenization of '()'\n elif re.search('\\([@#]?[A-Za-z0-9\\'\\-]*[\\!@\\*\\?]*\\)$',t):\n temp.append('(')\n if len(re.findall('[@#]?[A-Za-z0-9\\'\\-]*[\\!@\\*\\?]*',t))>2:\n temp.append(re.findall('[A-Za-z0-9\\'\\-]*[\\!@\\*\\?]*',t)[1])\n temp.append(')')\n # Proper Tokenization of '('\n elif re.search('^\\([A-Za-z0-9\\'\\-]*[\\!@\\*\\?]*$',t):\n temp. append('(')\n tlist = re.findall('[A-Za-z0-9\\'\\-]+[\\!@\\*\\?]*',t)\n if len(tlist)>=1:\n temp.append(tlist[0])\n # Proper Tokenization of ')'\n elif re.search('^[A-Za-z0-9\\'\\-]*[\\!@\\*\\?]*\\)$',t):\n if re.search('[A-Za-z0-9\\'\\-]+[\\!@\\*\\?]*',t):\n tlist = re.findall('[A-Za-z0-9\\'\\-]+[\\!@\\*\\?]*',t)\n temp.append(re.findall('[A-Za-z0-9\\'\\-]+[\\!@\\*\\?]*',tlist[0])[0])\n temp. append(')')\n # Proper Tokenization of ','\n elif re.search('\\,[A-Za-z0-9\\'\\-]*$',t):\n temp. append(',')\n tlist = re.findall('[A-Za-z0-9\\'\\-]+',t)\n if len(tlist)>=1:\n temp.append(tlist[0])\n elif re.search('^[A-Za-z0-9\\'\\-]*[\\!@\\*\\?]*\\,$',t):\n if re.search('[A-Za-z0-9\\'\\-]+[\\!@\\*\\?]*',t):\n tlist = re.findall('[A-Za-z0-9\\'\\-]+[\\!@\\*\\?]*',t)\n temp.append(re.findall('[A-Za-z0-9\\'\\-]+[\\!@\\*\\?]*',tlist[0])[0])\n temp. append(',')\n # Proper Tokenization for text containing '...'\n elif re.search('^[A-Za-z0-9\\'\\-]+(\\.{2,}[A-Za-z0-9]*[\\!@\\*\\?]*)+$',t):\n wlist = re.findall('[A-Za-z0-9\\'\\-]+[\\!@\\*\\?]*',t)\n dlist = re.findall('\\.{2,}',t)\n i=0\n j=0\n while i1:\n temp.append(tlist[1])\n # This portion tries to Tokenize out all emojis\n elif re.search('^[.]+(\\.{2,}[.]*)+$',t):\n wlist = re.findall('[^\\.]+',t)\n dlist = re.findall('\\.{2,}',t)\n i=0\n j=0\n while i 0:\n return elem[0].text\n else:\n return \"\"\n\n def mapDate(self, date):\n \"\"\"\n Map date to formatted value\n \"\"\"\n if not date:\n return date\n return datetime.strptime(date, '%d %b %Y').strftime('%Y-%m-%d')\n\n def mapPhones(self):\n \"\"\"\n Maps collection of phone number data to json array\n \"\"\"\n phones = self.getElementText(constants.detailPhoneXPath)\n return phones.split(',')\n\n def mapFaxes(self):\n \"\"\"\n Maps collection of fax number data to json array\n \"\"\"\n faxes = self.getElementText(constants.detailFaxXPath)\n return faxes.split(',')\n\n def mapCorporateSecretaries(self):\n \"\"\"\n Maps collection of corporate Secretary data to json array\n \"\"\"\n do = True\n corporateSecretaries = []\n index = 1\n while do:\n nameElem = self.browser.find_elements_by_xpath(constants.corpSecNameXPath.format(index))\n if len(nameElem) == 0:\n do = False\n else:\n corporateSecretary = {\n \"name\": nameElem[0].text,\n \"email\": self.browser.find_elements_by_xpath(constants.corpSecEmailXPath.format(index))[0].text,\n \"phone\": self.browser.find_elements_by_xpath(constants.corpSecPhoneXPath.format(index))[0].text\n }\n corporateSecretaries.append(corporateSecretary)\n index = index + 1\n return corporateSecretaries\n\n def mapDirectors(self):\n \"\"\"\n Maps collection of director data to json array\n \"\"\"\n do = True\n directors = []\n index = 1\n while do:\n nameElem = self.browser.find_elements_by_xpath(constants.directorNameXPath.format(index))\n if len(nameElem) == 0:\n do = False\n else:\n director = {\n \"name\": nameElem[0].text,\n \"position\": self.browser.find_elements_by_xpath(constants.directorPositionXPath.format(index))[0].text\n }\n directors.append(director)\n index = index + 1\n # TODO: check if there's a company with more than 10 directors\n return directors\n\n def mapSubsidiaries(self):\n \"\"\"\n Maps collection of subsidiaries data to json array\n \"\"\"\n do = True\n subsidiaries = []\n index = 1\n\n try:\n dropdown = self.browser.find_element_by_xpath(constants.subsidiaryOpXPath)\n dropdown.click()\n time.sleep(self.staleElemWait)\n except NoSuchElementException:\n pass\n\n while do:\n nameElem = self.browser.find_elements_by_xpath(constants.subsidiaryNameXpath.format(index))\n if len(nameElem) == 0:\n do = False\n else:\n subsidiary = {\n \"name\": nameElem[0].text,\n \"type\": self.browser.find_elements_by_xpath(constants.subsidiaryTypeXpath.format(index))[0].text,\n \"total asset\": self.browser.find_elements_by_xpath(constants.subsidiaryTotalXpath.format(index))[0].text,\n \"percentage\": self.browser.find_elements_by_xpath(constants.subsidiaryPercXpath.format(index))[0].text\n }\n subsidiaries.append(subsidiary)\n\n if index == 100:\n nextElem = self.browser.find_element_by_id('subsidiaryTable_next')\n if 'disabled' in nextElem.get_attribute('class'):\n return subsidiaries\n nextElem.click()\n time.sleep(self.staleElemWait)\n index = 1\n else:\n index = index + 1\n return subsidiaries\n","sub_path":"crawler/util/companyMapper.py","file_name":"companyMapper.py","file_ext":"py","file_size_in_byte":6101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"481400318","text":"###################################################\n# Author : Pierre_Hao\n# Date : 2015/09/23\n# Discription : The main params of Triplet Loss Training\n###################################################\n\nimport numpy as np\n\nclass Params():\n \"\"\"Init the important params of training caffe model\"\"\"\n def __init__(self, batch_size=4):\n ### Caffe params\n self._batch_size = batch_size\n self.gpu_id = 0 # useful only gpu mode\n # This should be the same as your train.prototxt\n self.write_diff = True\n self.margin = 0.20\n self._name_to_top_map = {'data': 0, 'label': 1} \n self._solver_prototxt = 'models/solver.prototxt'\n self._pretrained_model = 'models/res50.caffemodel'# if needn't finetune, set it to None\n self._caffe_root = '/opt/caffe/'\n self._mean = np.float32([128.0, 128.0, 128.0])\n ### Data params\n self._img_root = '/media/F/train_data/guwan/' #for constructing training dataset \n self.USE_FULLPATH = True #in trainset.txt is the full path of image\n self._txt_path = 'image/trainset1.txt'\n ### params for creating trainset\n self._resize = (224,224)#(w,h)\n self._resize_mode = True\n self._pad_mode = False # if only do pad, maybe there will be an error on size\n self._pad_scale = 1.3\n self._input_data_size = self._set_input_size()\n \n def _set_input_size(self,channels=3):\n return (self._batch_size*3,channels,self._resize[0],self._resize[1])\n","sub_path":"python/Triplet_cls/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"613338532","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*\n# ***********************************************\n# @Time : 23/02/2021 15:14\n# @File : inference.py\n# @email : xiang.chen.17@ucl.ac.uk\n# @author: Xiang Chen\n# ***********************************************\nimport os\nimport pandas as pd\nimport time\nimport numpy as np\nimport torch as t\nimport torch.nn.functional as F\nfrom PIL import Image\nimport torchvision.transforms as transforms\nfrom DFANet import DFANet\nimport cv2\nimport argparse\n\nNUM_CLASSES = 3\nSEQ_TXT = './dataset/frame_name.txt'\nSAVE_DIR = './result_pics/'\nDATASET_PATH = './dataset/test/'\nCLASS_DICT = './dataset/class_dict.csv'\nMODEL_WEIGHTS = './weights/99.pth'\n\n# inference image size (height, width)\nIMG_SIZE = (480, 640)\n#IMG_SIZE = (384, 512)\n\ndef get_arguments():\n '''\n Parse all the arguments provided from the CLI.\n Returns:\n A list of parsed arguments.\n '''\n parser = argparse.ArgumentParser(description='BiseNet inference')\n parser.add_argument('--dataset-path', type=str, default=DATASET_PATH,\n help='Path to dataset files on which inference is performed.')\n parser.add_argument('--model-weights', type=str, default=MODEL_WEIGHTS,\n help='path to the file with model weights.')\n parser.add_argument('--num-classes', type=int, default=NUM_CLASSES,\n help='Number of classes to predict.')\n parser.add_argument('--save-dir', type=str, default=SAVE_DIR,\n help='Where to save predicted mask.')\n parser.add_argument('--seq-txt', type=str, default=SEQ_TXT,\n help='Text sprintf to sequence txt file.')\n parser.add_argument('--class-dict', type=str, default=CLASS_DICT,\n help='class color dictionary.')\n return parser.parse_args()\n\n\ndef main():\n if t.cuda.is_available():\n device = t.device('cuda')\n else:\n device = t.device('cpu')\n \"\"\"Create the model and start the evaluation process.\"\"\"\n args = get_arguments()\n\n # create network\n ch_cfg = [[8, 48, 96], [240, 144, 288], [240, 144, 288]]\n net = DFANet(ch_cfg, 64, args.num_classes).to(device)\n checkpoint = t.load(args.model_weights, map_location='cpu') # 如果在cpu上推理需要加上map_location的映射,在GPU上不需要\n net.load_state_dict(checkpoint)\n pd_label_color = pd.read_csv(args.class_dict, sep=',')\n name_value = pd_label_color['name'].values\n num_class = len(name_value)\n colormap = []\n\n # read the label color from the class dict\n for i in range(num_class):\n tmp = pd_label_color.iloc[i]\n color = [tmp['r'], tmp['g'], tmp['b']]\n colormap.append(color)\n\n cm = np.array(colormap).astype('uint8')\n\n # create output folder if it doesn't exist\n if not os.path.exists(args.save_dir):\n os.makedirs(args.save_dir)\n\n # Get number of lines in text file\n num_imgs = sum(1 for line in open(args.seq_txt))\n\n # perform inferences on dataset\n f_id = open(args.seq_txt, 'r')\n\n counter = 1\n sum_times = 0\n\n for line in f_id:\n image_name = line.strip('\\n')\n image_base_name = image_name.split('.')[0]\n # read image\n img_path = os.path.join(args.dataset_path, image_name)\n img = cv2.imread(img_path)\n if img is None:\n break\n img_reverse = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))\n transform = transforms.Compose([transforms.Resize(IMG_SIZE),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])\n img_out = transform(img_reverse).unsqueeze(0)\n\n # inference starting...\n start_time = time.time()\n valImg = img_out.to(device)\n out = net(valImg)\n elapsed_time = time.time() - start_time\n sum_times += elapsed_time\n print('Elapsed time: %.04f for image num %03d' % (elapsed_time, counter))\n out = F.log_softmax(out, dim=1)\n pre_label = out.max(dim=1)[1].squeeze().cpu().data.numpy()\n pre = cm[pre_label]\n cv2.imwrite(args.save_dir + image_base_name + '.png', pre)\n counter += 1\n\n f_id.close()\n print('Average time per image: %.5f' % (sum_times / num_imgs))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"DFANet/inference.py","file_name":"inference.py","file_ext":"py","file_size_in_byte":4357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"459954799","text":"from Struktura import Struktura\nfrom Player import Player\nfrom Items import Weapon\nfrom Items import Armor\n\nimport Code\n\n\nprint(\"Jesteś gotowy na przygodę?\\n\")\nprint(\"Wybierz klasę (1/2/3)\")\nprint(\"1. Wojownik\")\nprint(\"2. Mag\")\nprint(\"3. Łotrzyk\")\n\nwhile True:\n klasa = input(\">>>\")\n\n if klasa in [\"1\", \"2\", \"3\"]:\n break\n\n else:\n print(\"Zła wartość\")\n\nif klasa == \"1\":\n weapon = Weapon(\"Miecz pazia\", 50, 60, 5)\n armor = Armor(\"Zardzewiała zbroja\",10)\n player = Player(weapon,armor)\n\nelif klasa == \"2\":\n weapon = Weapon(\"Sztylet złodziejaszka\", 30, 90, 3)\n armor = Armor(\"Skurzana tunika\", 10)\n player = Player(weapon, armor)\n\nelif klasa == \"3\":\n weapon = Weapon(\"Dębowa różdżka\",70,80,10)\n armor = Armor(\"Stara szata\",10)\n player = Player(weapon, armor)\n\nstru = Struktura(klasa + \".txt\")\n\nCode.generate()\nplayer.sword.append(Weapon(\"kot\", 100, 100, 0))\nwhile True:\n #stru.p_move(player)\n player.attack()\n","sub_path":"Game.py","file_name":"Game.py","file_ext":"py","file_size_in_byte":974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"460662273","text":"import re\n\n\nclass Solution:\n def solveEquation(self, equation):\n \"\"\"\n :type equation: str\n :rtype: str\n \"\"\"\n sp = equation.split(\"=\")\n leftEquation = sp[0]\n rightEquation = sp[1]\n if leftEquation[0] not in \"+-\":\n leftEquation = \"+\" + leftEquation\n if rightEquation[0] not in \"+-\":\n rightEquation = \"+\" + rightEquation\n\n elementPat = re.compile(\"[\\\\+\\\\-]\\\\d*x?\")\n leftElement = elementPat.findall(leftEquation)\n rightElement = elementPat.findall(rightEquation)\n leftMerge = [0, ] * 2 # [c, x]\n rightMerge = [0, ] * 2\n for e in leftElement:\n e0 = self.transform(e)\n if e0[1] == \"x\":\n leftMerge[1] += e0[0]\n else:\n leftMerge[0] += e0[0]\n\n for e in rightElement:\n e0 = self.transform(e)\n if e0[1] == \"x\":\n rightMerge[1] += e0[0]\n else:\n rightMerge[0] += e0[0]\n\n if leftMerge[1] == rightMerge[1]:\n return \"Infinite solutions\" if leftMerge[0] == rightMerge[0] else \"No solution\"\n else:\n leftMerge[1] -= rightMerge[1]\n rightMerge[0] -= leftMerge[0]\n return \"x=\" + str(rightMerge[0] // leftMerge[1])\n\n def transform(self, s0):\n negFlag = True if s0[0] == \"-\" else False\n if \"x\" in s0:\n typex = \"x\"\n if len(s0) == 2:\n coe = 1\n else:\n coe = int(s0[1:len(s0) - 1])\n else:\n typex = \"c\" # const value\n coe = int(s0[1:])\n coe = -coe if negFlag else coe\n return coe, typex # (coe, type)\n\nif __name__ == \"__main__\":\n s = Solution()\n equ = \"x+5-3+x=6+x-2\"\n equ = \"x=3x\"\n equ = \"2x+3x-6x=x+2\"\n equ = \"x=x+2\"\n equ = \"5=6\"\n print(s.solveEquation(equ))","sub_path":"exercise/leetcode/python_src/by2017_Sep/Leet640.py","file_name":"Leet640.py","file_ext":"py","file_size_in_byte":1899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"89935487","text":"# external imports\nfrom sqlalchemy import event\n# local imports\nfrom nautilus.network import dispatch_action\n\nclass CRUDNotificationCreator:\n \"\"\"\n This mixin class provides basic crus event publishing when the model\n is mutated, following nautilus conventions.\n \"\"\"\n\n\n nautilus_base = True # required to prevent self-application on creation\n\n @classmethod\n def add_listener(cls, db_event, action_type):\n # on event, dispatch the appropriate action\n @event.listens_for(cls, db_event)\n def dispatchCRUDAction(mapper, connection, target):\n \"\"\" notifies the network of the new user model \"\"\"\n dispatch_action(\n action_type='{}_{}'.format(cls.__name__.lower(), type),\n payload=target.__json__(),\n )\n\n\n @classmethod\n def onCreation(cls):\n # perform the intended behavior\n super().onCreation()\n # add the crud action emitters\n cls.add_listener('after_insert', 'create_success')\n cls.add_listener('after_delete', 'delete_success')\n cls.add_listener('after_update', 'update_success')\n","sub_path":"nautilus/models/mixins/crudNotificationCreator.py","file_name":"crudNotificationCreator.py","file_ext":"py","file_size_in_byte":1136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"97032306","text":"import os\r\n\r\ndef AddTo_ServerList(serverID: str):\r\n \"\"\"\r\n Cette fonction reçoit en argument l'id du serveur et l'ajoute à la liste des serveurs\r\n s'il n'y est pas déjà inscrit.\r\n\r\n Return : void\r\n \"\"\"\r\n #Path\r\n path_list = \"data/servers/list.gbot\"\r\n path_serverCount = \"data/servers/scount.gbot\"\r\n\r\n #Variables\r\n tryNumber = 0\r\n serverCount = 0\r\n id_match = False #Cherche si le serveur a déjà été saisi\r\n first_read = '' #récupère la première ligne du fichier\r\n\r\n #Fichiers\r\n O_list = open(path_list, 'a+') #Création de la liste si elle n'existe pas\r\n O_list.close()\r\n O_scount = open(path_serverCount, 'a+') #Création du compteur de serveur s'il n'existe pas\r\n O_scount.close()\r\n\r\n #Recup du nombre de serveurs\r\n O_scount = open(path_serverCount, 'r')\r\n is_serverCount_empty = os.stat(path_serverCount).st_size #Vérifie si le fichier est vide ou pas\r\n if(is_serverCount_empty == 0): #Si le fichier est vide\r\n O_scount.close()\r\n\r\n #Ouverture en mode écriture\r\n O_scount = open(path_serverCount, 'w')\r\n O_scount.write('0') #Init à 0\r\n O_scount.close()\r\n else: #S'il n'est pas vide\r\n O_scount = open(path_serverCount, 'r')\r\n O_scount.seek(0) #On se place au début\r\n\r\n serverCount = O_scount.read() #On récupère le nombre de serveurs\r\n O_scount.close()\r\n \r\n O_scount.close()\r\n\r\n O_list = open(path_list, 'r')\r\n is_list_empty = os.stat(path_list).st_size #Vérifie si le fichier est vide ou pas\r\n if(is_list_empty == 0): #Si le fichier est vide on met un ID direct\r\n O_list.close()\r\n\r\n #Ouverture en append (c'est une liste on veut pas tout reset)\r\n O_list = open(path_list, 'a')\r\n O_list.write(serverID)\r\n O_list.write('\\n')\r\n O_list.close()\r\n\r\n #Incrémentation\r\n serverCount = int(serverCount) + 1\r\n #On MàJ le fichier servercount\r\n O_scount = open(path_serverCount, 'w')\r\n O_scount.write(str(serverCount))\r\n O_scount.close()\r\n else: #Si le fichier n'est pas vide\r\n O_list.close()\r\n\r\n while(tryNumber <= int(serverCount) and id_match == False): #Tant qu'on n'arrive pas à la fin du fichier\r\n O_list = open(path_list, 'r')\r\n O_list.seek(0)\r\n first_read = O_list.readline() #Lecture de la première ligne et on la teste\r\n\r\n if(first_read == serverID+'\\n'): #Si dès la première ligne le serveurID est inscrit, on va pas plus loin\r\n id_match = True\r\n \r\n for line in O_list: #On teste tout le fichier, et si on trouve une ligne avec la valeur serverID on stop\r\n if(line == serverID+'\\n'):\r\n id_match = True\r\n \r\n if(tryNumber == int(serverCount) and id_match == False): #Si on arrive à la fin du fichier et qu'il n'y a toujours pas eu de match on inscrit l'id du serveur\r\n O_list.close()\r\n\r\n #Ouverture en append pour AJOUTER un nouvel id\r\n O_list = open(path_list, 'a')\r\n O_list.write(serverID)\r\n O_list.write('\\n')\r\n O_list.close()\r\n\r\n #Incrémentation du compteur\r\n serverCount = int(serverCount) + 1\r\n\r\n #On MàJ le fichier compteur\r\n O_scount = open(path_serverCount, 'w')\r\n O_scount.write(str(serverCount))\r\n O_scount.close()\r\n \r\n tryNumber += 1","sub_path":"fnct/getservers.py","file_name":"getservers.py","file_ext":"py","file_size_in_byte":3556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"211403229","text":"import numpy as np\nimport theano\n\n\ndef init_params(rows, cols, method='xavier', mean=0., std=1., tensor=True, seed=None):\n \"\"\"Initialize the weight and bias matrices based on probabilistic distribution.\n \n :param rows: Size of the input, number of rows to be generated.\n :type rows: int\n :param cols: Size of the output, number of columns to be generated.\n :type cols: int\n :param method: Distibution to use to generated the weights, defaults to 'xavier'.\n :type method: str, optional\n :param mean: Mean for the distribution to be generated, defaults to 0.\n :type mean: float, optional\n :param std: Standard deviation for the distribution to be generated, defaults to 1.\n :type std: float, optional\n :param tensor: Needs to return a theano friendly-format, defaults to True.\n :type tensor: bool, optional\n :param seed: Seed to be set for randomness, defaults to None.\n :type seed: int, optional\n \n :raises ValueError: `method` needs to be 'ones', 'zeros', 'uniform', 'xavier' or 'normal', see below for details.\n \n :method: * **Zeros**: :math:`W_j = \\\\vec{0}`\n * **Ones**: :math:`W_j = \\\\vec{1}`\n * **Uniform**: :math:`W_j \\\\sim \\\\mathcal{U} _{\\\\left[0, 1 \\\\right)}`\n * **Xavier**: :math:`W_j \\\\sim \\\\mathcal{U}\\\\left[ -\\\\frac{\\\\sqrt{6}}{\\\\sqrt{n_j + n_{j+1}}}, \\\\frac{\\\\sqrt{6}}{\\\\sqrt{n_j + n_{j+1}}} \\\\right]`\n * **Normal**: :math:`W_j \\\\sim \\\\mathcal{N}(0, 1)`\n\n :references: * Neuneier, Ralph, and Hans Georg Zimmermann. \"`How to train neural networks `_\" In Neural networks: tricks of the trade, pp. 373-423. Springer, Berlin, Heidelberg, 1998.\n * Glorot, Xavier, and Yoshua Bengio. \"`Understanding the difficulty of training deep feedforward neural networks `_\" In Proceedings of the thirteenth international conference on artificial intelligence and statistics, pp. 249-256. 2010.\n :return: Weight or bias matrix for initiating ML optimization\n :rtype: numpy.array\n \"\"\"\n\n # Set random state\n rdm = np.random.RandomState(seed) if seed is not None else np.random\n\n # Get the weights\n if method.upper() == 'ONES':\n W = np.ones((rows, cols))\n if method.upper() == 'ZEROS':\n W = np.zeros((rows, cols))\n elif method.upper() == 'UNIFORM':\n W = rdm.rand(rows, cols)\n elif method.upper() == 'XAVIER':\n W = rdm.uniform(low = -np.sqrt(6. / (rows + cols)),\n high = np.sqrt(6. / (rows + cols)),\n size = (rows, cols))\n elif method.upper() == 'NORMAL':\n W = rdm.normal(mean, std, rows * cols).reshape((rows, cols))\n else:\n raise ValueError('Weight initialization method not valid.')\n\n if tensor:\n return np.asarray(W, dtype=theano.config.floatX)\n else:\n return W\n\n","sub_path":"statinf/ml/initializations.py","file_name":"initializations.py","file_ext":"py","file_size_in_byte":2908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"135902276","text":"\n\nfrom xai.brain.wordbase.nouns._formula import _FORMULA\n\n#calss header\nclass _FORMULAS(_FORMULA, ):\n\tdef __init__(self,): \n\t\t_FORMULA.__init__(self)\n\t\tself.name = \"FORMULAS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"formula\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_formulas.py","file_name":"_formulas.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"626034134","text":"#!/usr/bin/env python\n# coding: utf-8\n\nimport cherrypy\nimport ezjailwrapper\nfrom jinja2 import Environment, FileSystemLoader\nimport os.path\nimport re\n\nenv = Environment(loader=FileSystemLoader('templates'))\ncurrent_dir = os.path.dirname(os.path.abspath(__file__))\n\n\n\ndef cmdexec(func):\n def wrapper(*args, **kwargs):\n try:\n func(*args, **kwargs)\n except ezjailwrapper.CommandExecuteException as ex:\n cherrypy.session['error_message'] = ex.error_msg\n raise cherrypy.HTTPRedirect(\"index\")\n\n return wrapper\n\n\n\nclass EzjailWeb:\n \n @cherrypy.expose\n def index(self):\n jails = ezjailwrapper.list()\n tmpl = env.get_template('index.html')\n error_message = cherrypy.session.pop('error_message', None)\n return tmpl.render(jails=jails, error_message=error_message)\n\n\n @cherrypy.expose\n def new(self):\n tmpl = env.get_template('new.html')\n return tmpl.render()\n\n @cherrypy.expose\n @cmdexec\n def create(self, ipaddress, hostname):\n\n errors = {}\n if re.match(\"^[0-9]{1,3}\\\\.[0-9]{1,3}\\\\.[0-9]{1,3}\\\\.[0-9]{1,3}$\", ipaddress) == None:\n errors['ipaddress'] = 'Invalid IP Address'\n\n if re.match(\"^[0-9a-zA-Z-\\\\._]+$\", hostname) == None:\n errors['hostname'] = 'Invalid Hostname'\n\n if len(errors) > 0:\n tmpl = env.get_template('new.html')\n return tmpl.render(errors=errors, hostname=hostname, ipaddress=ipaddress)\n\n ezjailwrapper.create(hostname, ipaddress)\n\n raise cherrypy.HTTPRedirect(\"index\")\n\n\n @cherrypy.expose\n @cmdexec\n def start(self, jailname):\n ezjailwrapper.start(jailname)\n raise cherrypy.HTTPRedirect(\"index\")\n\n\n @cherrypy.expose\n @cmdexec\n def stop(self, jailname):\n ezjailwrapper.stop(jailname)\n raise cherrypy.HTTPRedirect(\"index\")\n\n\ncherrypy.quickstart(EzjailWeb(), config=\"ezjailweb.conf\")\n\n","sub_path":"ezjailweb.py","file_name":"ezjailweb.py","file_ext":"py","file_size_in_byte":1796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"166463531","text":"#!/usr/bin/python\n\nfrom __future__ import print_function\nimport subprocess\nimport numpy as np\nimport pandas as pd\nimport sys\nfrom tqdm import tqdm\nsys.path.append('/ref/analysis/pipelines/')\nimport pysam\nimport kang\nimport math\nfile_bam = sys.argv[1] #'intron3000.merge.sorted.bam'\nfile_fa = sys.argv[2] #'Creinhardtii_281_v5.0.fa'\nfile_pk = sys.argv[3] # '/ref/analysis/pipelines/pandas_df/Creinhardtii_281_v5.5.gene.gff3.pandas.df.pk'\ndicHD2seq = kang.Fasta2dic(file_fa)\n\ndef get_block(array,depth_cut=0):\n lim_len_block = 100 # size of read fragment\n #depth_cut = 0 # ... 10. ... .. .. ..\n block_list = []\n #print(len(np.shape(array)))\n if len(np.shape(array)) == 1:\n rows = 1\n block = []\n for n,j in enumerate(array):\n if j > depth_cut:\n block.append(n)\n else:\n if len(block) > lim_len_block:\n block_list.append([block[0],block[-1]])\n block = []\n else:\n block = []\n if block != []:\n block_list.append([block[0],block[-1]])\n else: \n rows, columns = np.shape(array) \n for i in range(rows):\n earray = array[i]\n block = []\n for n,j in enumerate(earray):\n if j > depth_cut:\n block.append(n)\n else:\n if len(block) > lim_len_block:\n block_list.append([i,block[0],block[-1]])\n block = []\n else:\n block = []\n return block_list\n\n\nrows = len(dicHD2seq.keys())\nchromosomes = dicHD2seq.keys()\nchromosomes.sort()\ndicN2chr = dict(enumerate(chromosomes))\ndicChr2N = {b:a for a,b in dicN2chr.iteritems()}\ncolumns = max([len(x) for x in dicHD2seq.values()])-1\nprint(rows,columns)\ncontinuity_matrix = np.zeros([rows,columns],dtype=np.int)\nmatch_matrix = np.zeros([rows,columns],dtype=np.int)\n#Outfile = open('chromosome.map.txt','w')\n#for a,b in dicChr2N.iteritems():\n# print(a,b,sep='\\t',file=Outfile)\n\n\nprint('start loop')\n\nsamfile = pysam.Samfile( file_bam, \"rb\" )\nit = samfile.fetch()\nfor line in tqdm(it):#$open('temp.sam.cut'): # should be changed to zero base map\n # Check qual\n\n if line.is_proper_pair == False:\n continue\n if line.is_duplicate == True:\n continue\n if line.is_qcfail == True:\n continue\n if line.is_secondary == True:\n continue\n\n # Check qual end\n chromosome = line.reference_name\n startpos = line.reference_start # zero based\n fragmentsize = line.tlen\n qname = line.qname\n echr = dicChr2N[chromosome]\n cigars = line.cigartuples\n adding_len = 0 \n for o,l in cigars: # operation, length\n if o == 0:\n match_matrix[echr,startpos+adding_len:startpos+adding_len+l] = 1\n adding_len += l \n else:\n adding_len += l\n \n if line.mpos - startpos > 0 :\n continuity_matrix[echr,startpos:line.mpos] += 1 # list characteristic can utillize fragment size itself.\n else:\n continuity_matrix[echr,startpos:startpos+line.reference_length] += 1 # minus 1 for continuity value -> removed for this time just coverage purpose\n\n #if fragmentsize > 200:\n # pass\n #else: continue\n #endpos = startpos + fragmentsize # minus 1 for continuity value -> removed for this time just coverage purpose\n #\n #continuity_matrix[echr,startpos:endpos] += 1 # list characteristic can utillize fragment size itself.\n\narray_contiguity = continuity_matrix\n\ndf_gff_cre = pd.read_pickle(file_pk)\ndic = {'mRNA' : [],\n 'length' : [],\n 'total.depth': [],\n 'ratio.depth': [],\n 'coverage (1x)' : [],\n 'coverage (10x)' : [],\n 'coverage (30x)' : [],\n 'match' : [],\n 'match.ratio' :[]\n }\ngenelist = set([x for x,y in df_gff_cre.index])\nfor genename in tqdm(genelist):\n try:\n if math.isnan(float(genename)):\n continue\n except ValueError:\n pass\n df = df_gff_cre.loc[genename]\n mask = (df[2]=='CDS')\n for ix in set(df[mask].index):\n df_mRNA = df[mask].loc[ix]\n if isinstance(df_mRNA, pd.Series):\n transcript_name = df_mRNA['transcriptname']\n else:\n transcript_name = df_mRNA['transcriptname'][0]\n try:\n chromosome = df_mRNA[0].values[0]\n except AttributeError:\n chromosome = df_mRNA[0]\n array = df_mRNA[[3,4]].values\n try:\n r,c = np.shape(array)\n if c != 2 :\n print('?!')\n exit()\n covered_array = []\n matched_array = []\n for i in range(r):\n \n left = array[i,:][0] #int(df_mRNA[3])\n right = array[i,:][1] #int(df_mRNA[4])\n echr = dicChr2N[chromosome]\n contiguity = list(array_contiguity[echr][left-1:right]) # continuity value require minus 1 from right pos\n matched = list(match_matrix[echr][left-1:right])\n covered_array += contiguity\n matched_array += matched\n except ValueError:\n left = array[0]\n right = array[1]\n echr = dicChr2N[chromosome]\n covered_array = list(array_contiguity[echr][left-1:right]) # continuity value require minus 1 from right pos\n matched_array = list(match_matrix[echr][left-1:right])\n covered_array = np.array(covered_array)\n length = len(covered_array)\n dic['mRNA'].append(transcript_name)\n dic['length'].append(length)\n dic['total.depth'].append(sum(covered_array))\n dic['ratio.depth'].append(float(sum(covered_array))/float(length))\n dic['coverage (1x)'].append(len((covered_array >= 1).nonzero()[0])/float(length))\n dic['coverage (10x)'].append(len((covered_array >= 10).nonzero()[0])/float(length))\n dic['coverage (30x)'].append(len((covered_array >= 30).nonzero()[0])/float(length))\n dic['match'].append(sum(matched_array))\n dic['match.ratio'].append(float(sum(matched_array))/float(length))\n\ndf_cont = pd.DataFrame(dic)\n\n#mask = (df_cont['coverage (1x)'] > 0.8) & (df_cont['match.ratio'] > 0.6)\n#df_cont_cov = df_cont[mask]\ndf_cont_cov = df_cont\nmatrix = df_cont_cov.sort_values(by='total.depth',ascending=False)[['mRNA','coverage (1x)','coverage (10x)','coverage (30x)','ratio.depth','total.depth','match','match.ratio']].values\nnp.savetxt(file_bam+'.transcripts.all.txt',matrix,fmt='%s',delimiter='\\t')\n#Outfile = open(file_bam+'.all.txt','w')\n#print(file_bam, '\\t'.join(map(str,array)),sep='\\t',file=Outfile)\n","sub_path":"py/bam2coveredalltranscripts.py","file_name":"bam2coveredalltranscripts.py","file_ext":"py","file_size_in_byte":6858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"195659726","text":"#ladder\n#Austin C 11/15/18\n#calculates the ladder\n\nimport math\n\ndef main():\n print(\"this program calculates the size of a ladder\")\n h = float(input(\"What is the hight of the ladder: \"))\n x = float(input(\"sin angle of ladder \"))\n sin = math.radians(x)\n L = h / sin\n print(\"The length of the ladder is: \" , L)\nmain()\n \n \n","sub_path":"python-1.3/ladder-radians#10.py","file_name":"ladder-radians#10.py","file_ext":"py","file_size_in_byte":343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"45037193","text":"from django.urls import path, include\r\nfrom . import views\r\n\r\nurlpatterns = [\r\n path('', views.StartView.as_view(), name='start'),\r\n path('register', views.UserFormView.as_view(), name=\"register\"),\r\n path('login', views.log_in, name='login'),\r\n path('logout', views.logout, name='logout'),\r\n path('create', views.PostAddView.as_view(), name='create'),\r\n path('update//', views.PostUpdateView.as_view(), name='update'),\r\n path('delete//', views.PostDeleteView.as_view(), name='delete')\r\n]","sub_path":"Week10/Blog/main/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"350116251","text":"import cPickle as pickle\nimport pymongo\nimport os\nimport time\nfrom pymongo import MongoClient\n\n\ndef retrieve_cluster_map(coll):\n doi_to_cluster = {}\n t0 = time.time()\n for idx, doc in enumerate(coll.find()):\n doi_to_cluster[doc['doi']] = doc['cluster_id']\n if idx % 1e6 == 0 and idx > 0:\n print('docs retrieved: {}'.format(idx))\n s = 'time elapsed to retrive & copy cluster map: {}'\n print(s.format(time.time() - t0))\n return doi_to_cluster\n\n\ndef load_cluster_labels():\n labels = []\n for filename in os.listdir('./sparse'):\n if 'label' not in filename:\n continue\n with open('./sparse/' + filename) as f:\n A = pickle.load(f)\n labels += A\n return set(labels)\n\n\ndef create_can_citations(coll, citations_coll, doi_to_cluster, labels):\n batch = []\n N = 0\n t0 = time.time()\n # for idx, doc in enumerate(citations_coll.find()):\n with open('./citations_compressed.txt', 'r') as f:\n for idx, line in enumerate(f):\n _, tar, src = line.strip().split('\\t')\n # src = doi_to_cluster[src]\n # if doc['doi'] not in doi_to_cluster:\n # continue\n # src = doi_to_cluster[doc['doi']]\n # tar = doc['cluster_id']\n if src not in doi_to_cluster:\n continue\n src = doi_to_cluster[src]\n if src not in labels or tar not in labels:\n continue\n # if src not in batch:\n batch.append({\n 'src': src,\n 'tar': tar\n })\n # else:\n # batch[src].append(tar)\n if idx % 100e3 == 0:\n N += len(batch)\n coll.insert_many(batch)\n batch = []\n t1 = time.time()\n s = 'time elapsed: {} | total count: {} | idx: {}'\n print(s.format(t1 - t0, N, idx))\n t0 = time.time()\n # insert remaining\n print('inserting remaining batch')\n coll.insert_many(batch)\n print('building index on src')\n coll.create_index([('src', pymongo.ASCENDING)])\n\n\nif __name__ == '__main__':\n\n client = MongoClient('localhost', 27017)\n db = client['citeseerx']\n clusters_coll = db['clusters']\n citations_coll = db['citations']\n\n print('retrieving doi_to_cluster map')\n doi_to_cluster = retrieve_cluster_map(clusters_coll)\n\n print('loading cluster labels')\n labels = load_cluster_labels()\n\n print('creating can_citations collection')\n can_citations_coll = db['can_citations']\n create_can_citations(can_citations_coll,\n citations_coll,\n doi_to_cluster,\n labels)\n","sub_path":"scripts/create_can_citations.py","file_name":"create_can_citations.py","file_ext":"py","file_size_in_byte":2767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"644861343","text":"#!/usr/bin/env python\n#-*- coding: utf-8 -*-\n\"\"\"\nAuthor\t:\n\nDate\t:\n\nBrief\t: \n\"\"\"\n\nimport tensorflow as tf\nimport numpy as np\nfrom . import Layer\nfrom . import WindowAlignmentLayer\n\nclass EmbeddingLayer(Layer):\n \"\"\"EmbeddingLayer\"\"\"\n def __init__(self, vocab_size, emb_size, name=\"embedding\", \n initializer=None, **kwargs):\n Layer.__init__(self, name, **kwargs) \n self._emb_size = emb_size\n if not initializer:\n initializer = tf.contrib.layers.variance_scaling_initializer()\n self._W = self.get_variable(name + '_W', shape=[vocab_size, emb_size],\n initializer=initializer)\n\n def _forward(self, seq):\n return tf.nn.embedding_lookup(self._W, seq)\n\n\nclass WordContextRegionEmbeddingLayer(EmbeddingLayer):\n \"\"\"WordContextRegionEmbeddingLayer\"\"\"\n def __init__(self, vocab_size, emb_size, win_size, \\\n win_merge_fn=None, \\\n name=\"word_context_region_embedding\", \\\n initializer=None, \\\n **kwargs):\n Layer.__init__(self, name, **kwargs) \n self._emb_size = emb_size\n self._win_size = win_size\n self._win_merge_fn = win_merge_fn\n if not initializer:\n initializer = tf.contrib.layers.variance_scaling_initializer()\n self._K = self.get_variable(name + '_K', shape=[vocab_size, win_size, emb_size],\n initializer=initializer)\n super(WordContextRegionEmbeddingLayer, self).__init__(vocab_size, emb_size, name,\n initializer, **kwargs)\n\n def _forward(self, seq):\n # Window alignment embedding\n win_aligned_seq = WindowAlignmentLayer(self._win_size)(seq)\n win_aligned_emb = super(WordContextRegionEmbeddingLayer, self)._forward(win_aligned_seq)\n\n win_radius = self._win_size / 2\n trimed_seq = seq[:, win_radius: seq.get_shape()[1] - win_radius]\n context_unit = tf.nn.embedding_lookup(self._K, trimed_seq)\n\n projected_emb = win_aligned_emb * context_unit\n return self._win_merge_fn(projected_emb, axis=2)\n\n\nclass ContextWordRegionEmbeddingLayer(EmbeddingLayer):\n \"\"\"ContextWordRegionEmbeddingLayer\"\"\"\n def __init__(self, vocab_size, emb_size, win_size, \n win_merge_fn=None,\n name=\"embedding\",\n initializer=None, **kwargs):\n super(ContextWordRegionEmbeddingLayer, self).__init__(vocab_size * win_size, emb_size, name,\n initializer, **kwargs)\n self._win_merge_fn = win_merge_fn\n self._word_emb = tf.get_variable(name + '_wordmeb', shape=[vocab_size, emb_size], \n initializer=initializer)\n self._unit_id_bias = np.array([i * vocab_size for i in range(win_size)])\n self._win_size = win_size\n\n def _win_aligned_units(self, seq):\n \"\"\"\n _win_aligned_unit\n \"\"\"\n win_aligned_seq = WindowAlignmentLayer(self._win_size)(seq)\n win_aligned_seq = win_aligned_seq + self._unit_id_bias\n win_aligned_unit = super(ContextWordRegionEmbeddingLayer, self)._forward(win_aligned_seq)\n return win_aligned_unit\n \n def _forward(self, seq):\n \"\"\"forward\n \"\"\"\n win_radius = self._win_size / 2\n word_emb = tf.nn.embedding_lookup(self._word_emb, \\\n tf.slice(seq, \\\n [0, win_radius], \\\n [-1, tf.cast(seq.get_shape()[1] - 2 * win_radius, tf.int32)]))\n word_emb = tf.expand_dims(word_emb, 2)\n win_aligned_unit = self._win_aligned_units(seq)\n embedding = win_aligned_unit * word_emb\n embedding = self._win_merge_fn(embedding, axis=2)\n return embedding\n\n\ndef main():\n \"\"\"main\"\"\"\n pass\n\nif '__main__' == __name__:\n main()\n\n","sub_path":"lib/tedll/layers/embedding_layer.py","file_name":"embedding_layer.py","file_ext":"py","file_size_in_byte":3724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"353908348","text":"# 'heap' is a heap at all indices >= startpos, except possibly for pos. pos\n# is the index of a leaf with a possibly out-of-order value. Restore the\n# heap invariant.\ndef _siftdown(heap, startpos, pos):\n newitem = heap[pos]\n # Follow the path to the root, moving parents down until finding a place\n # newitem fits.\n while pos > startpos:\n parentpos = (pos - 1) >> 1\n parent = heap[parentpos]\n if cmp_lt(newitem, parent):\n heap[pos] = parent\n pos = parentpos\n continue\n break\n heap[pos] = newitem\n","sub_path":"test_segment_base/heapq_0.py","file_name":"heapq_0.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"42492990","text":"class Solution(object):\n def isIsomorphic(self, s, t):\n \"\"\"\n :type s: str\n :type t: str\n :rtype: bool\n \"\"\"\n self.smap={}\n self.tmap={}\n if(len(self.smap) != len(self.tmap)):\n return False\n for each in range(0,len(s)):\n schar = s[each]\n tchar = t[each]\n if(schar in self.smap.keys()):\n if(self.smap[schar] != tchar):\n return False\n else:\n self.smap[schar] = tchar\n if(tchar in self.tmap.keys()):\n if(self.tmap[tchar] != schar):\n return False\n else:\n self.tmap[tchar] = schar\n return True\n ","sub_path":"205-isomorphicStrings.py","file_name":"205-isomorphicStrings.py","file_ext":"py","file_size_in_byte":743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"125586182","text":"import pygame\nimport pygame.sprite\n\nimport spritesheet\n\n\n\nclass Frog(pygame.sprite.Sprite):\n def __init__(self):\n\n ss = spritesheet.load()\n\n super(Frog,self).__init__()\n\n self.images = ss.images_at(\n (\n (1,611,80,80),\n (77,613,80,80),\n (181,617,80,80),\n (296,608,80,80)\n ),\n colorkey=(255,255,255)\n )\n self.rect = self.images[0].get_rect()\n self.ni = 0\n\n def image(self):\n self.ni=(self.ni + 1) % len(self.images)\n return self.images[self.ni]\n\n","sub_path":"game/frog.py","file_name":"frog.py","file_ext":"py","file_size_in_byte":695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"95627808","text":"from state import new_state\nimport numpy as np\nimport pickle\n\nopened = 0\nQ = np.zeros((18,30,21,2))\n\ndef FlappyPolicy(state, screen):\n\n global opened\n global Q\n\n if not opened :\n file = open(\"Qtrained\",'rb')\n Q = pickle.load(file)\n opened = 1\n\n s = new_state(state)\n action = np.argmax(Q[s[0],s[1],s[2]][:])\n return action*119\n\n\n","sub_path":"simonet/FlappyAgent.py","file_name":"FlappyAgent.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"40722756","text":"from HexDecConverter import AHexDecConverter\n\nimport math\n\nclass ADrawingPad:\n '''\n A class that takes encoded hexadecimal-parameters\n and outputs commands for a drawing pad program.\n '''\n # Minimum and Maximum Color Values\n kMinMaxColorValues = (0, 255)\n\n # Minimum and Maximum Coordinate Point Values \n # ((x-min, x-max), (y-min, y-max))\n kMinMaxCoordinatePointValues = ((-8192, 8191), (-8192, 8191))\n\n # Opcode dictionary. \n # Values are the translated command and number of byte args\n kCodebook = {\n \"F0\": (\"CLR\", 0),\n \"80\": (\"PEN\", 2),\n \"A0\": (\"C0\", 8),\n \"C0\": (\"MV\", 4)\n }\n\n # Minimum and Maximum decoded values from bytes\n kByteValueBounds = (-8192, 8191)\n\n def __init__(self):\n # I recognize that a lot of these also fall under the \"Clear()\" method,\n # but I put them here again for readability\n\n # Our byte encoder/decoder class from alpc1\n self.hexDecConverter = AHexDecConverter()\n\n self.currentPoint = (0, 0) # Current pen coordinates\n self.lastPoint = (0, 0) # last pen coordinates\n self.penUp = True # If the pen is currently up or not\n self.outOfBounds = False # If we're out of bounds\n self.penColor = (0, 0, 0, 255) # The pen's current color\n\n # List of all commands since the program was started\n self.commandList = [] \n\n # List of commands from the last time Action() was run\n self.currentCommandList = []\n\n def Action(self, hexString):\n '''\n Parses hexString for opcodes, then translates the opcodes \n and their arguments from encoded hexadecimal values to\n commands for a drawing pad program.\n\n Returns the stringified representation of the parsed commandList\n or an error message.\n '''\n hexStringLength = len(hexString)\n\n # If more than one char in hexString\n if hexStringLength > 1:\n # If an odd number of chars\n if hexStringLength % 2 > 0:\n hexString = hexString[:-1]\n # Split the list into bytecodes\n hexList = [\n hexString[i] + hexString[i+1] \n for i in range(0, len(hexString), 2)\n ]\n else:\n return(\"Not enough arguments given.\")\n\n # Resets Current Command List\n self.currentCommandList = []\n\n # Current Opcode\n currentCode = \"\"\n\n # While there are codes in hexList\n while len(hexList) > 0:\n # Args to send with our command\n hexArgs = []\n\n try:\n # Keep popping until we get a valid opcode\n while currentCode not in self.kCodebook:\n currentCode = hexList.pop(0)\n except:\n pass # End of list\n\n # If our command is CLR, don't collect arguments\n if currentCode != \"F0\":\n try:\n # Keep popping until there's another opcode in the queue\n while hexList[0] not in self.kCodebook:\n hexArgs.append(hexList.pop(0))\n except:\n pass # End of list\n\n # Build Our Command Out\n self.__BuildCommand(currentCode, hexArgs)\n\n currentCode = \"\"\n\n # If at least one valid command was parsed\n if len(self.currentCommandList) > 0:\n # Add currentCommandList to commandList\n self.commandList += self.currentCommandList\n else:\n return \"No valid commands were parsed\"\n\n # Returns our command list\n return self.GetCommandString()\n\n def GetCommandString(self, current = True):\n '''\n Returns stringified commandList or \n currentCommandList based on \"current\"\n '''\n # if current is True, send currentCommandList. Else send the full one.\n listToSend = self.currentCommandList if current else self.commandList\n\n return \";\\n\".join(listToSend) + \";\"\n\n def __BuildCommand(self, command, hexArgs=[]):\n '''\n Sends command and args out to relevant method. \n \n Returns True if successful.\n '''\n # If command is \"CLR\"\n if command == \"F0\":\n self.__Clear()\n\n # For all other commands\n else:\n # Decodes our args\n decodedArgs = self.__InterpretCodes(hexArgs)\n\n # Remove all invalid arguments\n decodedCommands = self.__RemoveInvalidCodes(command, decodedArgs)\n\n # If we had valid commands after all\n if decodedCommands != []:\n # If command is \"PEN\"\n if command == \"80\":\n # Only grab the first (and only) argument\n penCommand = decodedCommands[0]\n\n self.__SetPenUp(penCommand)\n # If command is \"C0\"\n elif command == \"A0\":\n # Convert colorCodes to a tuple\n colorCodes = tuple(decodedCommands)\n\n self.__SetColor(colorCodes)\n # If command is \"MV\"\n elif command == \"C0\":\n numberOfCommands = len(decodedCommands)\n\n # Re-organizes our arguments into coordinate pairs\n coordinates = [\n (decodedCommands[i], decodedCommands[i+1]) \n for i in range(0, numberOfCommands, 2)\n ]\n\n self.__MovePen(coordinates)\n\n return True\n\n def __Clear(self, sendToCommandList=True):\n '''\n Clears the current settings, making the current point (0,0), \n setting the pen to the \"up\" position,setting outOfBounds to False, \n and changing the color to (0,0,0,255) (black). \n\n Also appends \"CLR\" to currentCommandList if sendToCommandList == True.\n '''\n self.currentPoint = (0, 0) # Current pen coordinates\n self.lastPoint = (0, 0) # last pen coordinates\n self.penUp = True # If the pen is currently up or not\n self.outOfBounds = False # If we're out of bounds\n self.penColor = (0, 0, 0, 255) # The pen's current color\n\n # Sending \"CLR\" to the commandList is optional, but default\n if sendToCommandList:\n self.currentCommandList.append(\"CLR\")\n\n return True\n\n def __SetPenUp(self, numCode):\n '''\n Sets penUp to True or False depending on numCode.\n\n Appends \"PEN {UP/DOWN}\" to currentCommandList depending on the code.\n '''\n currentPenUp = self.penUp\n\n # penUp is True if numCode was decoded as 0\n self.penUp = True if numCode == 0 else False\n\n # Don't add \"PEN UP/DOWN\" to command list if nothing has changed.\n if currentPenUp != self.penUp:\n upOrDown = \"UP\" if numCode == 0 else \"DOWN\"\n self.currentCommandList.append(\"PEN \" + upOrDown)\n\n return True\n\n def __SetColor(self, colorCodes):\n '''\n Sets the current color based on the codes given. \n\n Appends \"C0 {r} {g} {b} {a}\" to currentCommandList.\n\n Returns True if successful or False if a number\n is outside of our color range.\n '''\n minColor = self.kMinMaxColorValues[0]\n maxColor = self.kMinMaxColorValues[1]\n\n for colorCode in colorCodes:\n # If color code is not between our min and max values (0, 255)\n if not (minColor <= colorCode <= maxColor):\n return False\n\n # Sets the color\n self.penColor = colorCodes\n colorValueString = \" \".join(str(i) for i in colorCodes)\n\n # Appends command to our list\n self.currentCommandList.append(\"C0 \" + colorValueString)\n\n return True\n\n def __MovePen(self, coordinatePairsList):\n ''' \n Moves pen based on the coordinates decoded from the given hexArgs. \n \n Appends \"PEN {UP/DOWN}\" to currentCommandList as necessary. \n\n Also checks/sets outOfBounds and appends \"MV ({x}, {y})\" \n to currentCommandList as necessary.\n\n Returns True if successful\n '''\n # Building our command\n currentCommand = \"MV\"\n\n # Set our last point to\n self.lastPoint = self.currentPoint\n\n # Iterates through all of our coordinate pairs\n for coordinatePair in coordinatePairsList:\n currentlyOutOfBounds = self.outOfBounds\n\n # Gets weighted coordinatePair\n weightedCoordinates = self.__WeighCoordinates(\n coordinatePair\n )\n\n # Sets currentPoint to the absolutePoint\n self.currentPoint = (coordinatePair[0] + self.currentPoint[0], \n coordinatePair[1] + self.currentPoint[1])\n\n # If the weighted point differs from the absolute point\n if weightedCoordinates != self.currentPoint:\n self.outOfBounds = True\n else:\n self.outOfBounds = False\n\n # If going out of or coming back in bounds\n if self.outOfBounds != currentlyOutOfBounds:\n # If the pen isn't already up\n if not self.penUp:\n # If now out of bounds\n if self.outOfBounds:\n # Append coordinates to our current command\n currentCommand += \" \" + str(weightedCoordinates)\n\n self.currentCommandList.append(currentCommand)\n self.currentCommandList.append(\"PEN UP\")\n else:\n # Coordinates upon re-entry\n reEntryCoordinates = self.__WeighCoordinates(\n self.lastPoint, False\n )\n # Append coordinates to our current command\n currentCommand += \" \" + str(reEntryCoordinates)\n\n self.currentCommandList.append(currentCommand)\n self.currentCommandList.append(\"PEN DOWN\")\n \n currentCommand = \"MV\"\n \n # If in bounds and the pen is down, \n # append weighted coordinates to current command\n if not self.outOfBounds and not self.penUp:\n currentCommand += \" \" + str(weightedCoordinates)\n\n # Set last point to current point\n self.lastPoint = self.currentPoint\n\n # If the pen isn't down or we're out of bounds\n if self.penUp or self.outOfBounds:\n currentCommand += \" \" + str(weightedCoordinates)\n \n # If we have coordinates in it, add the command to command list\n if currentCommand != \"MV\":\n self.currentCommandList.append(currentCommand)\n\n return True\n\n def __WeighCoordinates(self, \n unweightedCoordinates, testCurrentPoint=True):\n ''' \n Modifies coordinates based on kMinMaxCoordinatePointValues\n and returns the result.\n '''\n minMaxValues = self.kMinMaxCoordinatePointValues\n\n # Absolute coordinates\n absoluteX = unweightedCoordinates[0] + self.currentPoint[0]\n absoluteY = unweightedCoordinates[1] + self.currentPoint[1]\n\n # Setting up our weighted coordinates\n weightedX = absoluteX\n weightedY = absoluteY\n weightedCoordinates = [weightedX, weightedY]\n\n # If we have a vertical or horizontal line between our coordinates\n if (absoluteX == self.currentPoint[0] or \n absoluteY == self.currentPoint[1]):\n # If x is out of bounds in the negative direction\n if absoluteX < minMaxValues[0][0]:\n weightedCoordinates[0] = minMaxValues[0][0]\n # If x is out of bounds in the positive direction\n elif absoluteX > minMaxValues[0][1]:\n weightedCoordinates[0] = minMaxValues[0][1]\n # If y is out of bounds in the negative direction\n if absoluteY < minMaxValues[1][0]:\n weightedCoordinates[1] = minMaxValues[1][0]\n # If y is out of bounds in the positive direction\n elif absoluteY > minMaxValues[1][1]:\n weightedCoordinates[1] = minMaxValues[1][1]\n # If it's a diagonal line\n else:\n currentPoint = self.currentPoint\n\n unweightedX = unweightedCoordinates[0]\n unweightedY = unweightedCoordinates[1]\n\n weightedX = absoluteX\n weightedY = absoluteY\n\n try:\n # Get our tangent\n tangent = ((unweightedY) / (unweightedX))\n except: # Can't divide by 0\n tangent = 1\n\n # If x is out of bounds in the negative direction\n if absoluteX < minMaxValues[0][0]:\n weightedX = minMaxValues[0][0]\n if testCurrentPoint:\n weightedY = math.ceil(currentPoint[0] + \n (minMaxValues[0][0] - currentPoint[0]) * tangent)\n else:\n weightedY = math.ceil((minMaxValues[0][0] - \n currentPoint[0]) * tangent * 2)\n # If x is out of bounds in the positive direction\n elif absoluteX > minMaxValues[0][1]:\n weightedX = minMaxValues[0][1]\n if testCurrentPoint:\n weightedY = math.ceil(currentPoint[0] + \n (minMaxValues[0][1] - currentPoint[0]) * tangent)\n else:\n weightedY = math.ceil((minMaxValues[0][1] - \n currentPoint[0]) * tangent * 2)\n # If y is out of bounds in the negative direction\n elif absoluteY < minMaxValues[1][0]:\n weightedY = minMaxValues[1][0]\n \n try:\n weightedX = math.ceil(currentPoint[1] + \n (minMaxValues[1][0] - currentPoint[1]) / tangent)\n except: # Can't divide by 0\n weightedX = weightedX\n # If y is out of bounds in the positive direction\n elif absoluteY > minMaxValues[1][1]:\n weightedY = minMaxValues[1][1]\n \n try:\n weightedX = math.ceil(currentPoint[1] + \n (minMaxValues[1][1] - currentPoint[1]) / tangent)\n except: # Can't divide by 0\n weightedX = weightedX\n\n weightedCoordinates = [weightedX, weightedY]\n \n return tuple(weightedCoordinates)\n\n def __InterpretCodes(self, hexCodes):\n ''' \n Interprets hexCodes and returns a list of decoded decimal numbers.\n '''\n numberOfCodes = len(hexCodes)\n\n # Will hold all of our decoded decimal arguments\n decodedCommands = []\n\n # If we have an odd number of codes\n if numberOfCodes % 2 > 0 and numberOfCodes > 0:\n hexCodes.pop()\n numberOfCodes = len(hexCodes)\n\n # If we have two or more codes\n if numberOfCodes > 0:\n # Get a list of decoded arguments using our pairs\n decodedCommands = [\n self.hexDecConverter.Decode(hexCodes[i], hexCodes[i+1]) \n for i in range(0, numberOfCodes, 2) \n ]\n \n return decodedCommands\n\n def __RemoveInvalidCodes(self, command, decimalArgs):\n ''' \n Removes invalid codes from decimalArgs and returns the updated list.\n\n If the number of invalid codes is smaller than required for the \n command, returns an empty list.\n '''\n # The number of hexadecimal arguments we need for each command\n numArgsRequired = int(self.kCodebook[command][1] / 2)\n argListLength = len(decimalArgs)\n\n # If command is \"MV\"\n if command == \"C0\":\n # For each decoded argument\n for i, dArg in enumerate(decimalArgs):\n # if out of our specified range of values (-8192, 8191)\n if not (self.kByteValueBounds[0] <= \n dArg <= self.kByteValueBounds[1]):\n # Get a new list with only valid coordinate values\n decimalArgs = decimalArgs[:i]\n argListLength = len(decimalArgs)\n break\n\n # Our coordinates must be in multiples of four\n argsTooMany = argListLength % numArgsRequired\n\n # Pop the last few codes if necessary to have our multiples of four\n for i in range(argsTooMany):\n decimalArgs.pop()\n\n # If we end up with fewer than 4 args altogether, empty the list\n if len(decimalArgs) < numArgsRequired:\n decimalArgs = []\n # For all other commands\n else:\n # We must have no more than the required number of arguments\n argsTooMany = argListLength - numArgsRequired\n # Pop any extra\n for i in range(argsTooMany):\n decimalArgs.pop()\n \n # Iterate through each argument\n for dArg in decimalArgs:\n # If any of them are outside of our decimal range\n if not (self.kByteValueBounds[0] <= \n int(dArg) <= self.kByteValueBounds[1]):\n # Empty the list\n decimalArgs = []\n break\n\n return decimalArgs\n ","sub_path":"DrawingPad.py","file_name":"DrawingPad.py","file_ext":"py","file_size_in_byte":17603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"527191686","text":"# -*- coding: utf-8 -*-\nimport sys\nimport os\nimport uuid\nimport cv2\nimport logging.config\nimport pandas as pd\nfrom util import *\nfrom sklearn import cluster\n\ndef main(_path):\n conf_path = os.path.join(os.path.dirname(__file__), 'conf')\n logging_config_path = os.path.join(conf_path, 'logging.conf')\n logging.config.fileConfig(logging_config_path)\n logger = logging.getLogger(\"root\")\n\n img_path = _path\n\n logger.info(\"img_path:%s\" % img_path)\n\n # 读取图像,支持 bmp、jpg、png、tiff 等常用格式\n img_src = cv2.imread(img_path)\n logger.info(\"图片加载完成:%s\" % img_path)\n height_src, width_src, dim_src = img_src.shape\n width = BlockConf().resize[0]\n height = int(height_src * width / width_src)\n\n # 图片中心点\n center_point = Point(width / 2, height / 2)\n\n # 调整图像大小\n logger.info(\"开始调整图片大小\")\n size = (width, height)\n img_resize = cv2.resize(img_src, size, interpolation=cv2.INTER_CUBIC)\n logger.info(\"图片大小调整为:(%s, %s)\" % (width, height))\n\n # 转化为HSV图像\n logger.info(\"提取图片中绿色区域\")\n HSV = cv2.cvtColor(img_resize, cv2.COLOR_BGR2HSV)\n\n # 提取绿色区域\n LowerGreen = np.array([34, 15, 46])\n UpperGreen = np.array([107, 255, 255])\n mask = cv2.inRange(HSV, LowerGreen, UpperGreen)\n img = img_resize.copy()\n img[mask != 0] = [255, 255, 255]\n # words_img = cv2.bitwise_xor(img, cv2.bitwise_not(img), mask=mask)\n filtered_area = mask\n logger.info(\"绿色区域提取完毕\")\n # cv2.imwrite(\"filtered_area.jpg\", img)\n # cv2.imshow(\"Image\", img)\n # cv2.waitKey(0)\n\n # 寻找轮廓\n binary, contours, hierarchy = cv2.findContours(filtered_area, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n\n # 显示所有轮廓\n # cv2.drawContours(img, contours, -1, (0, 0, 255), 2)\n # cv2.imwrite(\"empty.jpg\", img)\n # cv2.imshow(\"Image\", img)\n # cv2.waitKey(0)\n\n # 最小边界面积\n min_area = width * height * 0.5\n contour = findAppositeContour(contours, min_area)\n logger.info(\"轮廓提取完毕:%s\" % len(contour))\n\n # 创建空白图像并用于绘制所找到的轮廓\n if len(contour) == 0:\n logger.error(\"未找到有效轮廓\")\n exit(1)\n empty_img = np.zeros(img.shape, np.uint8)\n cv2.drawContours(empty_img, [contour], -1, (255, 255, 255), 2)\n\n # 显示有效轮廓\n # cv2.imwrite(\"effective_contour.jpg\", empty_img)\n # cv2.imshow(\"Image\", empty_img)\n # cv2.waitKey(0)\n\n # 霍夫直线检测\n logger.info(\"执行霍夫变换直线检测\")\n gray = cv2.cvtColor(empty_img, cv2.COLOR_BGR2GRAY)\n lines = cv2.HoughLines(gray, 1, np.pi / 180, 160)\n\n # 提取为为二维\n lines = lines[:, 0, :]\n\n # 需要检查是否为四条直线\n logger.info(\"检测出%s条直线\" % len(lines))\n logger.info(\"检测出的直线:%s\" % lines)\n # lines = lines[lines[:, 1] < np.pi / 2]\n\n # 处理直线数据用于聚类 train_lines[dis, x, y]\n lines[:, 1][lines[:, 0] < 0] = lines[:, 1][lines[:, 0] < 0] + np.pi\n lines[:, 0] = np.abs(lines[:, 0])\n train_lines = np.c_[lines[:, 0] / height, np.cos(lines[:, 1]), np.sin(lines[:, 1])]\n\n # 对直线数据做聚类处理\n labels = cluster.KMeans(n_clusters=4, random_state=170).fit_predict(train_lines)\n logger.info(\"直线的类别:%s\" % labels)\n\n # 求每一类直线的均值直线\n lines = [pd.DataFrame(lines[labels == i]).agg({0: 'mean', 1: middle_degree}) for i in range(4)]\n logger.info(\"聚类处理后的直线:%s\" % lines)\n\n # 对rho, theta表示的直线进行处理封装\n my_points = parseLines(lines, center_point, img)\n # cv2.imwrite(\"line_img.jpg\", img)\n # cv2.imshow(\"Image\", img)\n # cv2.waitKey(0)\n\n # 选出矩形的四个顶点\n logger.info(\"计算出的交点为:%s\" % my_points)\n my_points.sort(key=lambda _: _.dis)\n vertexes = my_points[:4]\n logger.info(\"选出的矩形的四个顶点为:%s\" % my_points)\n orderVertexes(vertexes)\n logger.info(\"按照左上右上左下右下排序的顶点为::%s\" % my_points)\n\n # 透视校正\n std_shape = BlockConf().resize\n src = np.float32([[v.x, v.y] for v in vertexes])\n dst = np.float32([[0, 0], [std_shape[0], 0], [0, std_shape[1]], [std_shape[0], std_shape[1]]])\n M = cv2.getPerspectiveTransform(src, dst)\n\n # 用作汉字切割个打分\n res = cv2.warpPerspective(img, M, std_shape)\n\n # 用作标记分数和不及格汉字\n img_score_mark = cv2.warpPerspective(img_resize, M, std_shape)\n\n # 切割\n words = cutWords(res)\n (row, col) = BlockConf().shape\n for i in range(row):\n for j in range(col):\n cv2.imwrite(\"img/%s-%s.jpg\" % (i, j), words[i, j])\n\n # 标记不及格的字\n mean_score = markWorse(img_score_mark, words)\n\n # 保存结果图片\n dir_path = os.path.join(os.path.dirname(__file__), 'result')\n file_name = \"%s.jpg\" % uuid.uuid1()\n result_path = os.path.join(dir_path, file_name)\n if not os.path.exists(dir_path):\n os.makedirs(dir_path)\n cv2.imwrite(result_path, img_score_mark)\n print(os.path.join(\"result\", file_name + \":\" + str(mean_score)))\n\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"636200352","text":"#Union and Intersection of Two Linked Lists\n#Your task for this problem is to fill out the union and intersection functions. \n# The union of two sets A and B is the set of elements which are in A, in B, or in both A and B. \n# The intersection of two sets A and B, denoted by A ∩ B, is the set of all objects that are members of both the sets A and B.\n\n#You will take in two linked lists and return a linked list that is composed of either the union or intersection, respectively. \n# Once you have completed the problem you will create your own test cases and perform your own run time analysis on the code.\n\n#We have provided a code template below, you are not required to use it:\n\nclass Node:\n def __init__(self, value):\n self.value = value\n self.next = None\n\n def __repr__(self):\n return str(self.value)\n\n\nclass LinkedList:\n def __init__(self):\n self.head = None\n\n def __str__(self):\n cur_head = self.head\n out_string = \"\"\n while cur_head:\n out_string += str(cur_head.value) + \" -> \"\n cur_head = cur_head.next\n return out_string\n\n\n def append(self, value):\n\n if self.head is None:\n self.head = Node(value)\n return\n\n node = self.head\n while node.next:\n node = node.next\n\n node.next = Node(value)\n\n def size(self):\n size = 0\n node = self.head\n while node:\n size += 1\n node = node.next\n\n return size\n\ndef union(llist_1, llist_2):\n \n current1 = llist_1.head\n current2 = llist_2.head\n\n #: create a set from the linked list\n set1 = set()\n while current1:\n set1.add(current1.value)\n current1 = current1.next\n \n #print(f'set1={set1}')\n\n set2 = set()\n while current2:\n set2.add(current2.value)\n current2 = current2.next\n\n #print(f'set2={set2}')\n\n #: now combind two sets\n union_set = set1.union(set2)\n #print(f'union_set={union_set}')\n\n #: turn the set back to linkedlist\n union_llist = LinkedList()\n\n for item in union_set:\n union_llist.append(item)\n\n return union_llist\n\n\n \ndef intersection(llist_1, llist_2):\n\n current1 = llist_1.head\n current2 = llist_2.head\n\n #: create a set from the linked list\n set1 = set()\n while current1:\n set1.add(current1.value)\n current1 = current1.next\n \n #print(f'set1={set1}')\n\n set2 = set()\n while current2:\n set2.add(current2.value)\n current2 = current2.next\n\n #: create intersection linked list\n intersection_linkedlist = LinkedList()\n\n for item in set1:\n if item in set2:\n intersection_linkedlist.append(item)\n\n return intersection_linkedlist\n\n\n# Test case 1\n\nlinked_list_1 = LinkedList()\nlinked_list_2 = LinkedList()\n\nelement_1 = [3,2,4,35,6,65,6,4,3,21]\nelement_2 = [6,32,4,9,6,1,11,21,1]\n\nfor i in element_1:\n linked_list_1.append(i)\n\nfor i in element_2:\n linked_list_2.append(i)\n\nprint (union(linked_list_1,linked_list_2))\nprint (intersection(linked_list_1,linked_list_2))\n\n# Test case 2\n\nlinked_list_3 = LinkedList()\nlinked_list_4 = LinkedList()\n\nelement_1 = [3,2,4,35,6,65,6,4,3,23]\nelement_2 = [1,7,8,9,11,21,1]\n\nfor i in element_1:\n linked_list_3.append(i)\n\nfor i in element_2:\n linked_list_4.append(i)\n\nprint (union(linked_list_3,linked_list_4))\nprint (intersection(linked_list_3,linked_list_4))\n\n\n# Test case 3: edge case\n\nlinked_list_5 = LinkedList()\nlinked_list_6 = LinkedList()\n\nelement_1 = []\nelement_2 = [1,3,5]\n\nfor i in element_1:\n linked_list_5.append(i)\n\nfor i in element_2:\n linked_list_6.append(i)\n\nprint (union(linked_list_5,linked_list_6))\nprint (intersection(linked_list_5,linked_list_6))\n\n\n# Test case 4: edge case\n\nlinked_list_7 = LinkedList()\nlinked_list_8 = LinkedList()\n\nelement_1 = []\nelement_2 = []\n\nfor i in element_1:\n linked_list_7.append(i)\n\nfor i in element_2:\n linked_list_8.append(i)\n\nprint (union(linked_list_7,linked_list_8))\nprint (intersection(linked_list_7,linked_list_8))\n","sub_path":"P1_DataStructure/Problem_6_UnionIntersect.py","file_name":"Problem_6_UnionIntersect.py","file_ext":"py","file_size_in_byte":4057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"156911103","text":"# coding=utf-8\nimport types\n\n\"\"\"\n解析配置文件使用工具\n\"\"\"\n\n\nclass ConfigParser(dict):\n def __init__(self, path):\n self.path = path\n super(dict, self).__init__()\n\n d = types.ModuleType('config')\n d.__file__ = self.path\n try:\n with open(self.path) as config_file:\n exec (compile(config_file.read(), self.path, 'exec'), d.__dict__)\n except IOError as e:\n raise e\n\n for key in dir(d):\n if key.isupper():\n self[key] = getattr(d, key)\n","sub_path":"echecs_hall/app/extensions/config_parser.py","file_name":"config_parser.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"184837542","text":"import torch\nfrom torch.utils.data import TensorDataset\n\n\nTAG_TO_INDEX = {\n 'B-cause': 1,\n 'I-cause': 2,\n 'B-effect': 3,\n 'I-effect': 4,\n 'O': 5,\n # 'M': 6\n # 'B-trigger': 5,\n # 'I-trigger': 6,\n # 'B-strength': 7,\n # 'I-strength': 8\n}\nINDEX_TO_TAG = { TAG_TO_INDEX[key] : key for key in TAG_TO_INDEX }\nNUM_TAGS = len(TAG_TO_INDEX)\n\n\ndef build_dataset_for_bert(raw_dataset, token_to_id, tag_pad_idx=-1, total_length=None, device='cpu'):\n # raw_dataset: dict, dataset in json format\n # token_to_id: dict, token -> id\n all_tokens = []\n all_tags = []\n lengths = []\n for sample in raw_dataset:\n tokens = [token_to_id[token] if token in token_to_id else token_to_id['[UNK]'] for token in sample['tokens']]\n tags = [TAG_TO_INDEX[tag] for tag in sample['tags']]\n tokens = tokens[:(total_length-2)]\n tags = tags[:(total_length-2)]\n # add CLS and SEP\n tokens = [token_to_id['[CLS]']] + tokens + [token_to_id['[SEP]']]\n tags = [tag_pad_idx] + tags + [tag_pad_idx] # the output for CLS and SEP will be ignored, because their ground-truth tag is PAD\n lengths.append(len(tokens))\n all_tokens.append(tokens)\n all_tags.append(tags)\n\n\n if total_length is None:\n total_length = max(lengths)\n\n assert total_length >= max(lengths)\n for tokens in all_tokens:\n while len(tokens) < total_length:\n tokens.append(token_to_id['[PAD]'])\n\n for tags in all_tags:\n while len(tags) < total_length:\n tags.append(tag_pad_idx)\n\n all_tokens = torch.LongTensor(all_tokens).to(device)\n all_tags = torch.LongTensor(all_tags).to(device)\n lengths = torch.LongTensor(lengths).to(device)\n dataset = TensorDataset(all_tokens, all_tags, lengths)\n # print(dataset[0])\n # exit(0)\n return dataset\n\n\ndef build_dataset_for_bert_crf(raw_dataset, token_to_id, total_length=None, device='cpu'):\n all_tokens = []\n all_tags = []\n all_crf_tags = []\n lengths = []\n\n for sample in raw_dataset:\n tokens = [token_to_id[token] if token in token_to_id else token_to_id[\"\"] for token in sample[\"tokens\"]]\n # tokens = tokens[:(total_length-2)]\n # add CLS and SEP\n tokens = [token_to_id['']] + tokens + [token_to_id['']]\n # adjust tags to CRF format\n # END aligns with [SEP], but no token aligns with [CLS]\n # len(tags) = len(tokens) - 1\n num_tags = NUM_TAGS + 2 # add START and END\n start_tag_idx = num_tags - 2\n end_tag_idx = num_tags - 1\n tags = [TAG_TO_INDEX[tag] for tag in sample['tags']]\n # tags = tags[:(total_length-2)]\n\n crf_tags = [start_tag_idx * num_tags + tags[0]]\n\n for i in range(len(tags) - 1):\n crf_tags.append(tags[i] * num_tags + tags[i + 1])\n crf_tags.append(tags[-1] * num_tags + end_tag_idx)\n # add to dataset\n lengths.append(len(tokens))\n all_tokens.append(tokens)\n all_tags.append(tags)\n all_crf_tags.append(crf_tags)\n\n if total_length is None:\n total_length = max(lengths)\n assert total_length >= max(lengths)\n for tokens in all_tokens:\n while len(tokens) < total_length:\n tokens.append(token_to_id[''])\n for tags in all_tags:\n while len(tags) < total_length - 2:\n tags.append(end_tag_idx)\n for crf_tags in all_crf_tags:\n while len(crf_tags) < total_length - 1:\n crf_tags.append(end_tag_idx * num_tags + end_tag_idx)\n\n all_tokens = torch.LongTensor(all_tokens).to(device)\n all_tags = torch.LongTensor(all_tags).to(device)\n all_crf_tags = torch.LongTensor(all_crf_tags).to(device)\n lengths = torch.LongTensor(lengths).to(device)\n dataset = TensorDataset(all_tokens, all_tags, all_crf_tags, lengths)\n return dataset\n","sub_path":"BertCRF/bio_dataset.py","file_name":"bio_dataset.py","file_ext":"py","file_size_in_byte":3855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"512598889","text":"import partitions\nimport array\n\ndeck = ([4]*9)\ndeck.append(16)\n\ndeck = array.array('i', deck)\n\nd = 0\n\nfor i in range(0, 10):\n\n # Dealer showing\n\n deck[i] = deck[i]-1\n\n p = 0\n for j in range(0, 10):\n deck[j] = deck[j]-1\n n = partitions.partitions(deck, j+1)\n #print('Starting with ', j, n)\n deck[j] = deck[j]+1\n p += n\n\n print('Dealer showing ', i,' partitions =',p)\n d += p\n\n deck[i] = deck[i]+1\n\nprint('Total partitions =',d)\n","sub_path":"blackjack/cython-array/outcomes.py","file_name":"outcomes.py","file_ext":"py","file_size_in_byte":483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"321210660","text":"\nclass Solution:\n def defangIPaddr(self, address: str) -> str:\n\n res = \"\"\n\n for s in address:\n if s == '.':\n res = res + \"[.]\"\n else:\n res += s\n\n return res\n \n\naddress = \"1.1.1.1\"\n\ns = Solution()\nprint(s.defangIPaddr(address))","sub_path":"defangIPaddr.py","file_name":"defangIPaddr.py","file_ext":"py","file_size_in_byte":310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"176723979","text":"from path import data_path\nimport pandas as pd\nimport numpy as np\nfrom sklearn import preprocessing\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.metrics import balanced_accuracy_score, classification_report, accuracy_score, recall_score, precision_score\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.dummy import DummyClassifier\nfrom afinn import Afinn\nfrom wordcloud import WordCloud, STOPWORDS\nimport matplotlib.pyplot as plt\nimport statistics\nimport seaborn as sns\n\n\ndf = pd.read_csv(data_path)\n\n'''\n-------------------- Preprocessing -------------------\n'''\n\n# Keep only speeches from 2015 and later\ndf['Dato'] = df['Dato'].str.slice(start = 6, stop = 10)\ndf['Dato'] = df['Dato'].astype(int)\ndf = df[df['Dato'] > 2014]\n\n# Unique speakers\nspeakers = df['Title']\nunique_speakers = df['Title'].unique()\n\n# Filter out formanden\ndf = df[~df.Title.isin(['formand', 'Henrik Dam Kristensen', 'Leif Mikkelsen', 'Næstformand', 'Pia Kjærsgaard', 'næstformand'])]\n\n# Replace nan, party names, etc.\ndf['text'] = df.text.str.replace('nan' , '')\ndf['text'] = df.text.str.replace('tikke' , 'tak')\ndf['text'] = df.text.str.replace('matiasse' , 'mathias')\ndf['text'] = df.text.str.replace('alternativ' , '')\ndf['text'] = df.text.str.replace('dansk folkeparti' , '')\ndf['text'] = df.text.str.replace('liberal alliance' , '')\ndf['text'] = df.text.str.replace('radikal venstre' , '')\ndf['text'] = df.text.str.replace('radikal' , '')\ndf['text'] = df.text.str.replace('socialdemokrati' , '')\ndf['text'] = df.text.str.replace('socialistisk folkeparti' , '')\ndf['text'] = df.text.str.replace('det konservativ folkeparti' , '')\ndf['text'] = df.text.str.replace('venstre' , '')\ndf['text'] = df.text.str.replace('enhedslisten' , '')\ndf['text'] = df.text.str.replace('enhedslist' , '')\ndf['text'] = df.text.str.replace('konservativ folkeparti' , '')\ndf['text'] = df.text.str.replace('sk' , '')\n\n# Remove all speeches shorter than 80 characters\ndf=df[(df.text.astype(str).str.len()>80)]\n\n# How many speeches each speaker has, how many parties that are etc.\ncounts = df['Title'].value_counts()\ndf['Parti'].unique()\ndf['Parti'].value_counts()\n\n# Standardize party names and include the 9 biggest (Nye Borgerlige not annotated)\ndf.loc[df['Parti'] == 'Fælles', 'Name']\ndf.loc[df['Name'] == 'Pernille Vermund', 'Parti']\n\ndf['Parti'] = df['Parti'].replace({'Dansk': 'Dansk_Folkeparti'})\ndf['Parti'] = df['Parti'].replace({'DF': 'Dansk_Folkeparti'})\ndf['Parti'] = df['Parti'].replace({'Radikale': 'Radikale_Venstre'})\ndf['Parti'] = df['Parti'].replace({'Socialistisk': 'Socialistisk_Folkeparti'})\ndf['Parti'] = df['Parti'].replace({'Liberal': 'Liberal_Alliance'})\ndf['Parti'] = df['Parti'].replace({'V': 'Venstre'})\ndf['Parti'] = df['Parti'].replace({'Socialdemokraterne': 'Socialdemokratiet'})\ndf['Parti'] = df['Parti'].replace({'S': 'Socialdemokratiet'})\ndf['Parti'] = df['Parti'].replace({'Det': 'Det_Konservative_Folkeparti'})\ndf['Parti'] = df['Parti'].replace({'Konservative': 'Det_Konservative_Folkeparti'})\n\ndf = df.loc[df['Parti'].isin(['Dansk_Folkeparti', 'Radikale_Venstre', 'Venstre', 'Socialdemokratiet', 'Socialistisk_Folkeparti', 'Liberal_Alliance', 'Det_Konservative_Folkeparti', 'Enhedslisten', 'Alternativet'])]\n\n# Remove NaN from text and Parti and check that it worked\ndf = df.dropna(subset=['text', 'Parti'])\ndf['text'].isna().values.sum()\n\n\n'''\n-------------------- Binary Classification -------------------\n'''\n\ndf['Parti_b'] = df['Parti'].copy()\n\n# Binary Classification liberal/socialist\ndf['Parti_b'] = df['Parti_b'].replace({'Det_Konservative_Folkeparti': 0})\ndf['Parti_b'] = df['Parti_b'].replace({'Dansk_Folkeparti': 0})\ndf['Parti_b'] = df['Parti_b'].replace({'Radikale_Venstre': 0})\ndf['Parti_b'] = df['Parti_b'].replace({'Venstre': 0})\ndf['Parti_b'] = df['Parti_b'].replace({'Socialdemokratiet': 1})\ndf['Parti_b'] = df['Parti_b'].replace({'Socialistisk_Folkeparti': 1})\ndf['Parti_b'] = df['Parti_b'].replace({'Liberal_Alliance': 0})\ndf['Parti_b'] = df['Parti_b'].replace({'Enhedslisten': 1})\ndf['Parti_b'] = df['Parti_b'].replace({'Alternativet': 1})\n\n# TfidfVectorizer\nvectorizer_tfidf = TfidfVectorizer(sublinear_tf=True, \n max_df=0.3,\n min_df=100,\n lowercase=True,\n stop_words=None, \n max_features=20000,\n tokenizer=None,\n ngram_range=(1,4)\n )\n\n# Count vectorizer\nvectorizer_count = CountVectorizer(ngram_range =(1,4), max_features=20000)\n\n# Split data in train and test\nX_train, X_test, y_train, y_test = train_test_split(df['text'],df['Parti_b'],random_state=0)\n\n# Vectorize train, count\nvect_count = vectorizer_count.fit(X_train)\nX_train_vectorized_count = vect_count.transform(X_train)\n\n# Vectorize train, tf-idf\nvect_tfidf = vectorizer_tfidf.fit(X_train)\nX_train_vectorized_tfidf = vect_tfidf.transform(X_train)\n\n# Fit model, count\nclf = MultinomialNB(alpha = 0.1)\nclf.fit(X_train_vectorized_count, y_train)\npred_count = clf.predict(vect_count.transform(X_test))\nprint(\" Count, Balanced accuracy score = \" + str(balanced_accuracy_score(y_test, pred_count)))\nprint(\" Count Accuracy score = \" + str(accuracy_score(y_test, pred_count)))\nprint(\" Count, Precision = \" + str(precision_score(y_test, pred_count)))\nprint(\" Count, Recall = \" + str(recall_score(y_test, pred_count)))\n\n# Fit model, tfidf\nclf = MultinomialNB(alpha = 0.1)\nclf.fit(X_train_vectorized_tfidf, y_train)\npred_tfidf = clf.predict(vect_tfidf.transform(X_test))\nprint(\" Tf-idf, Balanced accuracy score = \" + str(balanced_accuracy_score(y_test, pred_tfidf)))\nprint(\" Tf-idf, Accuracy score = \" + str(accuracy_score(y_test, pred_tfidf)))\nprint(\" Tf-idf, Precision = \" + str(precision_score(y_test, pred_tfidf)))\nprint(\" Tf-idf, Recall = \" + str(recall_score(y_test, pred_tfidf)))\n\n# Binary, count, acurracy: 69.3, balanced accuracy: 69.3, precision: 68.5, recall 71.7\n# Binary, tdidf, acurracy: 65.5, balanced accuracy: 65.5, precision 65.2, recall 67.1\n\n'''\n-------------------- Multilabel Classification -------------------\n'''\n\n# Give each party a number\nle = preprocessing.LabelEncoder()\ntarget = le.fit_transform(df['Parti'])\nle_name_mapping = dict(zip(le.classes_, le.transform(le.classes_)))\nprint(le_name_mapping)\n\n# Split data in train and test\nX_train, X_test, y_train, y_test = train_test_split(df['text'],df['Parti'],random_state=0)\n\n# Vectorize train, count\nvect_count = vectorizer_count.fit(X_train)\nX_train_vectorized_count = vect_count.transform(X_train)\n\n# Vectorize train, tf-idf\nvect_tfidf = vectorizer_tfidf.fit(X_train)\nX_train_vectorized_tfidf = vect_tfidf.transform(X_train)\n\n# Fit model, count\nclf = LogisticRegression(random_state=0, class_weight = 'balanced', max_iter=1000).fit(X_train_vectorized_count, y_train)\npred_count_balanced = clf.predict(vect_count.transform(X_test))\nprint(\" Count, Balanced accuracy score = \" + str(balanced_accuracy_score(y_test, pred_count_balanced)))\nprint(\" Count Accuracy score = \" + str(accuracy_score(y_test, pred_count_balanced)))\nreport_count = classification_report(y_test, pred_count_balanced)\n\n# Fit model, tfidf\nclf = LogisticRegression(random_state=0, class_weight = 'balanced', max_iter=1000).fit(X_train_vectorized_tfidf, y_train)\npred_tfidf_balanced = clf.predict(vect_tfidf.transform(X_test))\nprint(\" Tf-idf, Balanced accuracy score = \" + str(balanced_accuracy_score(y_test, pred_tfidf_balanced)))\nprint(\" Tf-idf, Accuracy score = \" + str(accuracy_score(y_test, pred_tfidf_balanced)))\nreport_tfidf = classification_report(y_test, pred_tfidf_balanced)\n\n# count balanced accuracy: 42\n# count accuracy: 43.5\n# tf-idf balanced accuracy: 43.5\n# tf-idf accuracy: 41.5\n\n'''\n-------------------- Baseline Models -------------------\n'''\n\ndummy_clf = DummyClassifier(strategy=\"most_frequent\")\ndummy_clf.fit(X_train, y_train)\nDummyClassifier(strategy='uniform')\ndummy_clf.predict(X_train)\ndummy_clf.score(X_test, y_test)\n\n# Multilabel\n# Most frequent: 19.7 %\n# Stratified: 14.2 %\n# Uniform: 11.3\n\n# Binary 50.46 %\n\n\n'''\n------------------- Feature importance -----------------\n'''\n\ndef print_top10(vectorizer, clf, class_labels):\n \"\"\"Prints features with the highest coefficient values, per class\"\"\"\n feature_names = vectorizer.get_feature_names()\n for i, class_label in enumerate(class_labels):\n top10 = np.argsort(clf.coef_[i])[-10:]\n print(\"%s: %s\" % (class_label,\n \", \".join(feature_names[j] for j in top10)))\n\nprint_top10(vectorizer_tfidf, clf, le_name_mapping)\nprint_top10(vectorizer_count, clf, le_name_mapping)\n\n\n'''\n-------------------- Sentiment scores -------------------\n'''\n\n# Calculate sentiment scores\nafinn = Afinn(language='da')\ndf['sentiment'] = df['text'].apply(afinn.score)\ndf['sentiment_category'] = ['positive' if score > 0 \n else 'negative' if score < 0 \n else 'neutral' \n for score in df['sentiment']]\n\n# Print mean sentiment score per party\nfor parti in df.Parti.unique():\n print(parti + \": \" + str(statistics.mean(df.sentiment[df.Parti == parti])))\n\n# Plot sentiment scores\ndf_ny = df.copy()\ndf_ny['Parti'] = df_ny['Parti'].replace({'Dansk_Folkeparti': 'DF'})\ndf_ny['Parti'] = df_ny['Parti'].replace({'Radikale_Venstre': 'RV'})\ndf_ny['Parti'] = df_ny['Parti'].replace({'Socialistisk_Folkeparti': 'SF'})\ndf_ny['Parti'] = df_ny['Parti'].replace({'Liberal_Alliance': 'LA'})\ndf_ny['Parti'] = df_ny['Parti'].replace({'Socialdemokratiet': 'S'})\ndf_ny['Parti'] = df_ny['Parti'].replace({'Det_Konservative_Folkeparti': 'Kons.'})\ndf_ny['Parti'] = df_ny['Parti'].replace({'Enhedslisten': 'Enh.'})\ndf_ny['Parti'] = df_ny['Parti'].replace({'Alternativet': 'Alt.'})\ndf_ny['Parti'] = df_ny['Parti'].replace({'Venstre': 'V'})\n\npalette ={\"DF\":\"steelblue\",\"RV\":\"deeppink\",\"SF\":\"salmon\", \"LA\":\"orange\", \"S\":\"firebrick\", \"Kons.\":\"darkgreen\", \"Enh.\":\"crimson\", \"Alt.\":\"chartreuse\", \"V\":\"cornflowerblue\"}\nsns.set(style=\"whitegrid\")\ns = sns.stripplot(x=\"Parti\", y=\"sentiment\", data=df_ny, palette = palette, jitter=True)\nfig = s.get_figure()\nfig.savefig(\"sentiment_scores.png\")\n\n# Plot sentiment categories for each party\nfc = sns.factorplot(x=\"Parti\", hue=\"sentiment_category\", \n data=df_ny, kind=\"count\",\n palette={\"negative\": \"#FE2020\", \n \"positive\": \"#BADD07\", \n \"neutral\": \"#68BFF5\"})\n\nfc.savefig(\"sentiment_category.png\")\n\n# Create separate dataframes for socialist and liberal parties\nliberal = df[(df['Parti'] == 'Venstre') | (df['Parti']=='Det_Konservative_Folkeparti')| (df['Parti']=='Liberal_Alliance')| (df['Parti']=='Dansk_Folkeparti')| (df['Parti']=='Radikale_Venstre')]\nliberal = liberal.dropna(subset=['text', 'Parti'])\n\nsocialist = df[(df['Parti'] == 'Socialdemokratiet') | (df['Parti']=='Socialistisk_Folkeparti')| (df['Parti']=='Enhedslisten')| (df['Parti']=='Alternativet')]\nsocialist = socialist.dropna(subset=['text', 'Parti'])\n\n# Mean sentiment for liberal and socialist\nstatistics.mean(liberal.sentiment)\nstatistics.mean(socialist.sentiment)\n\n\n'''\n-------------------- Word clouds -------------------\n'''\n\n# Liberal word cloud\ntext = liberal.text.values\nwordcloud = WordCloud(\n width = 3000,\n height = 2000,\n background_color = 'white',\n colormap= \"Blues\",\n stopwords = None).generate(str(text))\nfig = plt.figure(\n figsize = (40, 30),\n facecolor = 'k',\n edgecolor = 'k')\nplt.imshow(wordcloud, interpolation = 'bilinear')\nplt.axis('off')\nplt.tight_layout(pad=0)\nplt.show()\n\n# Socialist word cloud\ntext = socialist.text.values\nwordcloud = WordCloud(\n width = 3000,\n height = 2000,\n background_color = 'white',\n colormap= \"Reds\",\n stopwords = None).generate(str(text))\nfig = plt.figure(\n figsize = (40, 30),\n facecolor = 'k',\n edgecolor = 'k')\nplt.imshow(wordcloud, interpolation = 'bilinear')\nplt.axis('off')\nplt.tight_layout(pad=0)\nplt.show()\n\n\n\n\n\n\n\n\n\n","sub_path":"code_NLP_AmalieSoerensen.py","file_name":"code_NLP_AmalieSoerensen.py","file_ext":"py","file_size_in_byte":12168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"148429728","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n#------------------------------------------------------------------------------#\n#\n# Scripts for processing of WRF and CAMx files to PALM dynamic driver\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n#\n# Copyright 2018-2021 Institute of Computer Science\n# of the Czech Academy of Sciences, Prague\n# Authors: Krystof Eben, Jaroslav Resler, Pavel Krc\n#\n#------------------------------------------------------------------------------#\n'''Configuration module.\n\nConfiguration options are sourced into this module's globals.\n'''\n\nimport os.path\nfrom pathlib import Path\nimport inspect\n\n\n# Just for PyCharm and similar IDEs to allow autocompletion from config values\nif False:\n from palm_dynamic_defaults import *\n from palm_dynamic_init import *\n\ndef configure(configname):\n global dir_scripts\n # get path of the palm_dynamic script to source default config and init\n dir_scripts = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))\n print('Running palm_dynamic from:', dir_scripts)\n # use config defaults\n configdefaultsfile = os.path.join(dir_scripts, \"palm_dynamic_defaults.py\")\n print('Default case config: ', configdefaultsfile)\n # user config file is located in configurations directory\n configfile = os.path.join(os.path.abspath(Path(dir_scripts).parent), \"configurations\", configname + '.conf')\n print('User case config:', configfile)\n # initialization of standard parameters done in script\n standardinitfile = os.path.join(dir_scripts, \"palm_dynamic_init.py\")\n print('Standard initialization: ', standardinitfile)\n # check existence of the supplied config file\n if not os.path.isfile(configfile):\n print(\"Config file \" + configfile + \" does not exists!\")\n print_help()\n exit(2)\n # read default config values\n exec(open(configdefaultsfile).read(), globals())\n # read user configuration\n exec(open(configfile).read(), globals())\n # perform the standard initialization\n exec(open(standardinitfile).read(), globals())\n","sub_path":"wrf2palm/WRF_interface_debug/dynamic/palm_dynamic_config.py","file_name":"palm_dynamic_config.py","file_ext":"py","file_size_in_byte":2689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"287260755","text":"from django.shortcuts import render, redirect\nfrom django.utils.decorators import method_decorator\nfrom account.views import LoginView\nfrom account.decorators import login_required_content\nfrom account.models import Account\nfrom .models import Contact\nfrom .forms import ContactForm\n# Create your views here.\ndef contact_complete(request):\n return render(request, 'contact_complete.html')\n\n@login_required_content\ndef content(request, *args, **kwargs):\n return render(request, 'content.html', { 'email': request.session.get('user') })\n\n@login_required_content\ndef contact(request, *args, **kwargs):\n if not request.session.get('user'):\n return redirect('/login')\n\n if request.method == 'POST':\n form = ContactForm(request.POST)\n if form.is_valid():\n user_id = request.session.get('user')\n account = Account.objects.get(email=user_id)\n\n contact = Contact()\n contact.title = form.cleaned_data['title']\n contact.contents = form.cleaned_data['contents']\n contact.writer = account\n contact.save()\n\n return redirect('/contact_complete')\n else:\n form = ContactForm()\n return render(request, 'contact.html', {'form': form, 'email': request.session.get('user')})\n","sub_path":"4. Django_Framework/Django_Web/content/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"462866938","text":"'''\r\nScraping Numbers from HTML using BeautifulSoup In this assignment you will write a Python program similar to http://www.py4e.com/code3/urllink2.py. The program will use urllib to read the HTML from the data files below, and parse the data, extracting numbers and compute the sum of the numbers in the file.\r\n'''\r\n\r\nfrom urllib.request import urlopen\r\nfrom bs4 import BeautifulSoup\r\nimport ssl\r\nimport re\r\n\r\nctx = ssl.create_default_context()\r\nctx.check_hostname = False\r\nctx.verify_mode = ssl.CERT_NONE\r\n\r\nurl = input('Enter - ')\r\nhtml = urlopen(url, context=ctx).read()\r\nsoup = BeautifulSoup(html, \"html.parser\")\r\n\r\nslist=list()\r\nv=0\r\ntags = soup('span')\r\nfor tag in tags:\r\n tag=tag.decode().split()\r\n num1=tag[1]\r\n x=re.findall('[0-9]+',num1)\r\n for num in x:\r\n if num==None:\r\n continue\r\n else:\r\n \t v=v+1\r\n \t num=int(num)\r\n \t slist.append(num)\r\ns=sum(slist)\r\nprint('Sum=',s)\r\n","sub_path":"Using Python to Access Web Data/Scraping HTML Data with BeautifulSoup.py","file_name":"Scraping HTML Data with BeautifulSoup.py","file_ext":"py","file_size_in_byte":937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"39711804","text":"\"\"\"\r\nClone of 2048 game.\r\n\"\"\"\r\n\r\nimport poc_2048_gui\r\nimport random\r\n\r\n# Directions, DO NOT MODIFY\r\nUP = 1\r\nDOWN = 2\r\nLEFT = 3\r\nRIGHT = 4\r\n\r\n# Offsets for computing tile indices in each direction.\r\n# DO NOT MODIFY this dictionary.\r\nOFFSETS = {UP: (1, 0),\r\n DOWN: (-1, 0),\r\n LEFT: (0, 1),\r\n RIGHT: (0, -1)}\r\n\r\ndef merge(line):\r\n \"\"\"\r\n Helper function that merges a single row or column in 2048\r\n \"\"\"\r\n nline=[]\r\n itr=0\r\n while itr < len(line):\r\n #making the new list\r\n nline.append(0)\r\n itr += 1\r\n\r\n\r\n key=0\r\n for dummy_j in line:\r\n #inserting elements\r\n if dummy_j != 0:\r\n nline[key] = dummy_j\r\n key += 1\r\n\r\n\r\n task=0\r\n while task < len(line)-1:\r\n #shifting\r\n if(nline[task] == 0):\r\n break;\r\n if nline[task] == nline[task+1]:\r\n nline[task]*=2\r\n nline[task+1]=0\r\n task+=2\r\n else:\r\n task+=1\r\n\r\n\r\n fline=[]\r\n itf=0\r\n while itf < len(nline):\r\n #final list\r\n fline.append(0)\r\n itf += 1\r\n\r\n kar=0\r\n for dummy_j in nline:\r\n #shifting into final list\r\n if dummy_j != 0:\r\n fline[kar] = dummy_j\r\n kar += 1\r\n\r\n\r\n return fline\r\n\r\nclass TwentyFortyEight:\r\n \"\"\"\r\n Class to run the game logic.\r\n \"\"\"\r\n\r\n def __init__(self, _gridheight_, _gridwidth_):\r\n # replace with your code\r\n self._gridheight_=_gridheight_\r\n self._gridwidth_=_gridwidth_\r\n self._grid_={}\r\n\r\n up_list = [(0, col) for col in range(self._gridwidth_)]\r\n down_list = [(self._gridheight_-1, col) for col in range(self._gridwidth_)]\r\n left_list = [(row, 0) for row in range(self._gridheight_)]\r\n right_list = [(row, self._gridwidth_-1) for row in range(self._gridheight_)]\r\n self._tiles_ = {UP:up_list, DOWN:down_list, LEFT:left_list, RIGHT:right_list}\r\n\r\n self.reset()\r\n\r\n def reset(self):\r\n \"\"\"\r\n Reset the game so the grid is empty except for two\r\n initial tiles.\r\n \"\"\"\r\n # replace with your code\r\n\r\n self._grid_=[[0 for col in range(self._gridwidth_)] for row in range(self._gridheight_)]\r\n self.new_tile()\r\n self.new_tile()\r\n\r\n def __str__(self):\r\n \"\"\"\r\n Return a string representation of the grid for debugging.\r\n \"\"\"\r\n string=\"\"\r\n\r\n for row in range(self._gridheight_):\r\n string+=\"[\"\r\n for col in range(self._gridwidth_):\r\n if col == self._gridwidth_-1:\r\n string += str(self._grid_[row][col])\r\n else:\r\n string += str(self._grid_[row][col])+\",\"\r\n string+=\"]\"+\"\\n\"\r\n return string\r\n\r\n def get__gridheight_(self):\r\n \"\"\"\r\n Get the height of the board.\r\n \"\"\"\r\n return self._gridheight_\r\n\r\n def get__gridwidth_(self):\r\n \"\"\"\r\n Get the width of the board.\r\n \"\"\"\r\n\r\n return self._gridwidth_\r\n\r\n def move(self, direction):\r\n \"\"\"\r\n Move all tiles in the given direction and add\r\n a new tile if any tiles moved.\r\n \"\"\"\r\n change_tile= False\r\n\r\n for itr in self._tiles_[direction]:\r\n temp_list=[]\r\n\r\n\r\n curr_row=itr[0]\r\n curr_col=itr[1]\r\n\r\n\r\n while 0 <= curr_row < self._gridheight_ and 0 <= curr_col < self._gridwidth_:\r\n\r\n temp_list.append(self._grid_[curr_row][curr_col])\r\n curr_row += OFFSETS[direction][0]\r\n curr_col += OFFSETS[direction][1]\r\n\r\n\r\n temp_list=merge(temp_list)\r\n\r\n curr_row = itr[0]\r\n curr_col = itr[1]\r\n index = 0\r\n\r\n while 0 <= curr_row < self._gridheight_ and 0 <= curr_col < self._gridwidth_:\r\n if self._grid_[curr_row][curr_col] != temp_list[index]:\r\n change_tile = True\r\n\r\n self._grid_[curr_row][curr_col] = temp_list[index]\r\n curr_row += OFFSETS[direction][0]\r\n curr_col += OFFSETS[direction][1]\r\n index += 1\r\n\r\n if change_tile == True:\r\n self.new_tile()\r\n\r\n def new_tile(self):\r\n \"\"\"\r\n Create a new tile in a randomly selected empty\r\n square. The tile should be 2 90% of the time and\r\n 4 10% of the time.\r\n \"\"\"\r\n val=1\r\n\r\n new_row = random.randrange(0,self._gridheight_)\r\n new_col = random.randrange(0,self._gridwidth_)\r\n\r\n while val != 0:\r\n new_row=random.randrange(0,self._gridheight_)\r\n new_col=random.randrange(0,self._gridwidth_)\r\n val=self._grid_[new_row][new_col]\r\n\r\n num=random.random()\r\n\r\n if num < 0.1:\r\n self._grid_[new_row][new_col]=4\r\n else:\r\n self._grid_[new_row][new_col]=2\r\n\r\n\r\n\r\n def set_tile(self, row, col, value):\r\n \"\"\"\r\n Set the tile at position row, col to have the given value.\r\n \"\"\"\r\n self._grid_[row][col]=value\r\n\r\n def get_tile(self, row, col):\r\n \"\"\"\r\n Return the value of the tile at position row, col.\r\n \"\"\"\r\n return self._grid_[row][col]\r\n\r\n\r\npoc_2048_gui.run_gui(TwentyFortyEight(4, 4))\r\n","sub_path":"2048.py","file_name":"2048.py","file_ext":"py","file_size_in_byte":5316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"343975820","text":"#-*- coding: utf-8 -*-\r\n'''\r\n@author: matsuyamanori@gmail.com\r\n'''\r\n\r\nimport urllib\r\nimport time\r\nimport json\r\nimport threading\r\nfrom ws4py.client.threadedclient import WebSocketClient\r\nfrom tplight import KL130\r\n\r\nclass SmartLight(WebSocketClient):\r\n def __init__(self, url, lightIP):\r\n print('*** __init__')\r\n self.url = url\r\n self.count = 0\r\n self.lastMessage = None\r\n self.light = KL130(lightIP)\r\n self.light.ransition_period = 0\r\n self.light.off()\r\n super().__init__(url, protocols=['http-only', 'chat'])\r\n\r\n def go(self):\r\n print('*** go')\r\n t = threading.Thread(target=self.go_sub)\r\n t.start()\r\n\r\n def go_sub(self):\r\n print('*** go_sub')\r\n self.connect()\r\n # self.run_forever\r\n \r\n def opened(self):\r\n print(\"*** opened\")\r\n\r\n def stop(self):\r\n print('*** stop')\r\n self.close()\r\n \r\n def closed(self, code, reason=None):\r\n print('*** closed:' + str(code) + ':' + reason)\r\n if code == 1006 :\r\n raise TimeoutError(str(code) + ':' + reason)\r\n \r\n def received_message(self, message):\r\n print('*** received_message')\r\n if len(message.data) > 0: \r\n self.count += 1\r\n self.lastMessage = json.loads(message.data)\r\n print(self.lastMessage)\r\n try:\r\n switch = self.lastMessage[1]['value']['value']['LightSwitchState']\r\n color = self.lastMessage[1]['value']['value']['LightColor']\r\n self.lightControl(switch, color)\r\n except (KeyError, TypeError) as e:\r\n print('*** received_message:except:' + str(self.lastMessage))\r\n print(e)\r\n\r\n def unhandled_error(self, error):\r\n print('*** unhandled_error:', error)\r\n \r\n def showLastMessage(self):\r\n print('*** showLastMessage')\r\n last = self.lastMessage\r\n print(str(self.count) + ':' + json.dumps(last, ensure_ascii=False))\r\n \r\n def lightControl(self, switch, color):\r\n print('*** lightSwith')\r\n if switch == False:\r\n self.light.off()\r\n else :\r\n self.light.on()\r\n if color == 'White' :\r\n self.light.temperature = 9000\r\n elif color == 'Red' :\r\n for _ in range(10):\r\n self.light.hsb = (10, 100, 100) \r\n time.sleep(0.5)\r\n self.light.hsb = (0, 0, 0) \r\n time.sleep(0.5)\r\n self.light.hsb = (10, 100, 100) \r\n \r\n \r\nif __name__ == '__main__':\r\n lightIP = '192.168.1.9'\r\n url = 'wss://aitc2.dyndns.org'\r\n query = '/openmasami/sample01/read/path/1F/居間/照明'\r\n agent = [ ('AGENTID', 'SmartLight') ]\r\n endPoint = url + urllib.parse.quote(query.encode('utf-8')) + '?' + urllib.parse.urlencode(agent)\r\n print(endPoint)\r\n while True:\r\n try:\r\n smartLight = SmartLight(endPoint, lightIP)\r\n smartLight.go()\r\n while True:\r\n input()\r\n smartLight.showLastMessage()\r\n except TimeoutError as e:\r\n print('*** TimeoutError:', e) \r\n except KeyboardInterrupt:\r\n smartLight.lightControl(False, 'XXX')\r\n smartLight.stop()\r\n exit(1)\r\n\r\n\r\n","sub_path":"SmartLight.py","file_name":"SmartLight.py","file_ext":"py","file_size_in_byte":3388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"578323912","text":"import turtle\nimport time\n\n# Python程序由一系列命令组成并从上到下执行\n# 可以通过循环和if语句控制程序\n# 不必事必躬亲,通过导入模块\n# 函数可以帮助重用代码,也可以使程序变得易于理解和维护\n# 变量可以存储信息以便后面使用\n\n\nboxsize = 200\ncaught = False\nscore = 0\n\n\n# functions that are called on keypresses\ndef up():\n mouse.forward(10)\n checkbound()\n\n\ndef left():\n mouse.left(45)\n\n\ndef right():\n mouse.right(45)\n\n\ndef back():\n mouse.backward(10)\n checkbound()\n\n\ndef quitTurtles():\n window.bye()\n\n\n# stop the mouse from leaving the square set by box size\ndef checkbound():\n global boxsize\n if mouse.xcor() > boxsize:\n mouse.goto((boxsize, mouse.ycor()))\n if mouse.xcor() < -boxsize:\n mouse.goto((-boxsize, mouse.ycor()))\n if mouse.ycor() > boxsize:\n mouse.goto((mouse.xcor(), boxsize))\n if mouse.ycor() < -boxsize:\n mouse.goto((mouse.xcor(), -boxsize))\n\n\n# set up screen\nwindow = turtle.Screen()\nmouse = turtle.Turtle()\ncat = turtle.Turtle()\n\nmouse.penup()\nmouse.goto(100, 100)\n\n# add key listeners\n# 当键盘上产生某些输入时,执行对应的函数\nwindow.onkeypress(up, \"Up\")\nwindow.onkeypress(left, \"Left\")\nwindow.onkeypress(right, \"Right\")\nwindow.onkeypress(back, \"Down\")\nwindow.onkeypress(quitTurtles, \"Escape\")\n\ndifficulty = window.numinput(\"Difficulty\",\n \"Enter a difficulty from easy (1), for hard (5) \",\n minval=1, maxval=5)\nwindow.listen()\n# main loop\n# note how it changes with difficulty\n# 当老鼠没有被猫抓住的时候,一直执行循环\nwhile not caught:\n cat.setheading(cat.towards(mouse))\n cat.forward(8 + difficulty)\n score = score + 1\n if cat.distance(mouse) < 5:\n caught = True\n time.sleep(0.2 - (0.01 * difficulty))\nwindow.textinput(\"Game Over\", \"Well done.You scored:\" + str(score * difficulty))\nwindow.bye()\n\n","sub_path":"turtle/catandmouse/catandmouse.py","file_name":"catandmouse.py","file_ext":"py","file_size_in_byte":1967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"376864795","text":"def threeSumClosest(nums, target):\n sm = 99999\n i = 0\n nums.sort()\n cl = 999999\n while i < len(nums):\n x = i+1\n y = len(nums) - 1\n while x < len(nums) and y > x:\n val = nums[x] + nums[y] + nums[i]\n if abs(target - val) < cl:\n sm = val\n cl = abs(target - val)\n elif val < target:\n x += 1\n else:\n y -= 1\n i += 1\n print(sm)\n\n\nthreeSumClosest([-1, 2, 1, -4], 1)\n","sub_path":"Algorithm-Python/LeetCode/threeSumClosest.py","file_name":"threeSumClosest.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"165137878","text":"from utlis import *\nimport cv2\n\nw,h = 480,360\npid = [0.5, 0.5, 0]\npError = 0\nstartCounter = 0 #0 for flight, 1 for no flight\n\nmyDrone = initializeTello()\n\nwhile True:\n\n## Flight\n if startCounter == 0: \n myDrone.takeoff()\n startCounter = 1\n\n ## Step 1\n img = telloGetFrame(myDrone, w, h)\n ## Step 2\n img, info = findFace(img)\n print(info[0][0])\n ## Step 3\n pError = trackFace(myDrone, info, w, pid, pError)\n\n cv2.imshow('Image', img)\n if cv2.waitKey(1) & 0xff == ord('q'):\n myDrone.land()","sub_path":"My Drone/Face Tracking/FaceTrackingTello.py","file_name":"FaceTrackingTello.py","file_ext":"py","file_size_in_byte":539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"214312880","text":"import argparse\n\nimport cv2\nimport os\nimport logging\nimport json\n\nimport numpy as np\n\nfrom generator import BatchGenerator\nfrom preprocessing import TrassirRectShapesAnnotations\n\n\nlogging.basicConfig(level=logging.INFO)\n\n\ndef _main_(args):\n config_path = args.conf\n jitter = float(args.jitter)\n\n with open(config_path) as config_buffer:\n config = json.load(config_buffer)\n\n validation_datasets = [{**ds, 'path': os.path.join(config['train']['images_dir'], ds['path'])}\n for ds in config['train']['train_datasets']]\n\n trassir_annotation = TrassirRectShapesAnnotations([], validation_datasets, config['model']['labels'], config['model']['skip_labels'])\n trassir_annotation.load()\n trassir_annotation.print_statistics()\n validation = trassir_annotation.get_validation_instances(config['train']['verifiers'],\n config['model']['max_box_per_image'])\n print('Val len: ', len(validation))\n\n generator = BatchGenerator(\n instances=validation,\n anchors=config['model']['anchors'],\n labels=config['model']['labels'],\n downsample=32,\n max_box_per_image=config['model']['max_box_per_image'],\n batch_size=config['train']['batch_size'],\n min_net_size=config['model']['min_input_size'],\n max_net_size=config['model']['max_input_size'],\n shuffle=True,\n jitter=jitter,\n norm=None,\n advanced_aug=True\n )\n\n for i in range(len(generator)):\n for image in generator[i][0][0]:\n cv2.imshow('image', cv2.cvtColor(image.astype(np.uint8), cv2.COLOR_RGB2BGR))\n key = cv2.waitKeyEx(0) & 0xFF\n if key == 27:\n return\n\n\nif __name__ == '__main__':\n argparser = argparse.ArgumentParser(\n description='Batch generator test')\n\n argparser.add_argument(\n '-c',\n '--conf',\n default='config.json',\n help='path to configuration file')\n argparser.add_argument(\n '-j',\n '--jitter',\n default=0.0,\n help='augmentation strength')\n\n args = argparser.parse_args()\n _main_(args)\n","sub_path":"generator_test.py","file_name":"generator_test.py","file_ext":"py","file_size_in_byte":2175,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"571495022","text":"'''\nCreated on May 30, 2016\n\n@author: ubuntu\n'''\n\nfrom Device import Device\nimport paho.mqtt.client as mqtt\n\n\n\nclass Remote(Device):\n '''\n classdocs\n '''\n \n temperature = '0'\n control_temperature = '0'\n \n def __init__(self):\n super(Remote,self).__init__(\"tmp0000001\")\n \n\n def subscribe(self):\n def on_connect(client, userdata, rc):\n print(\"Connected with result code \"+str(rc))\n\n client.subscribe(self._subscribe_queue)\n\n def on_message(client, userdata, msg):\n print (\"Topic: \", msg.topic+\"\\nMessage: \"+str(msg.payload))\n \n split = msg.payload.decode('utf-8').split(';')\n \n if (split[1] == self.get_serial()):\n if (split[0] == \"set\"):\n if (split[1] == self.get_serial()):\n Remote.control_temperature = split[2]\n print(\"Temperature set to \" + Remote.control_temperature)\n else:\n print(\"Not the right SERIAL\")\n \n \n client = mqtt.Client()\n client.on_connect = on_connect\n client.on_message = on_message\n client.connect(self._broker, self._port, 60)\n client.loop_forever()\n\nif __name__ == '__main__':\n remote = Remote()\n remote.subscribe()","sub_path":"Room - aioCOAP/Remote.py","file_name":"Remote.py","file_ext":"py","file_size_in_byte":1334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"554064084","text":"import rospy\nfrom yaw_controller import YawController\nfrom pid import PID\nfrom lowpass import LowPassFilter as LPF\n\nGAS_DENSITY = 2.858\nONE_MPH = 0.44704\n\n\nclass Controller(object):\n def __init__(self, vehicle_mass, fuel_capacity, brake_deadband, decel_limit, accel_limit,\n wheel_radius, wheel_base, steer_ratio, max_lat_accel, max_steer_angle):\n\n self.yaw_controller = YawController(wheel_base, steer_ratio, 0.1, max_lat_accel, max_steer_angle)\n \n kp = 0.3\n ki = 0.1\n kd = 0.0\n mn = 0.0\n mx = 0.9\n self.throttle_controler = PID(kp, ki, kd, mn, mx)\n \n tau = 0.5\n ts = 0.02\n self.vel_lpf = LPF(tau, ts)\n \n self.vehicle_mass=vehicle_mass\n self.fuel_capacity=fuel_capacity\n self.brake_deadband=brake_deadband\n self.decel_limit=decel_limit\n self.accel_limit=accel_limit\n self.wheel_radius=wheel_radius\n \n self.wheel_base=wheel_base\n self.steer_ratio=steer_ratio\n self.max_lat_accel=max_lat_accel\n self.max_steer_angle=max_steer_angle\n \n self.last_time = rospy.get_time()\n\n\n def control(self, curr_vel, dbw_enabled, linear_vel, angular_vel):\n if not dbw_enabled:\n self.throttle_controler.reset()\n return 0.0, 0.0, 0.0\n \n curr_vel = self.vel_lpf.filt(curr_vel)\n \n steering = self.yaw_controller.get_steering(linear_vel, angular_vel, curr_vel)\n \n vel_error = linear_vel - curr_vel\n self.last_vel = curr_vel\n \n curr_time = rospy.get_time()\n sample_time = curr_time - self.last_time\n self.last_time = curr_time\n \n throttle = self.throttle_controler.step(vel_error, sample_time)\n \n brake = 0.0\n if linear_vel < 0.05 and curr_vel < 0.05:\n throttle = 0.0\n brake = 700.0\n elif throttle < 0.1 and vel_error < 0:\n throttle = 0.0\n decel = max(vel_error, self.decel_limit)\n #decel = self.decel_limit\n brake = abs(decel) * self.vehicle_mass * self.wheel_radius\n \n return throttle, brake, steering\n","sub_path":"ros/src/twist_controller/twist_controller.py","file_name":"twist_controller.py","file_ext":"py","file_size_in_byte":2211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"548773224","text":"from keras.models import Sequential\nfrom keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout\nfrom keras.optimizers import SGD, Adadelta\nfrom keras.metrics import categorical_accuracy\n\n# Definição do modelo\nclassifier = Sequential()\n\n# Passo 1 - Convolução\n# \"same\" results in padding the input such that the output has the same length as the original input\nclassifier.add(Conv2D(32, (3, 3), input_shape = (64, 64, 3), padding='same', activation = 'elu'))\nclassifier.add(Conv2D(32, (3, 3), activation='elu'))\n\n# Passo 2 - Pooling \nclassifier.add(MaxPooling2D(pool_size = (2, 2)))\n\n# Passo 3 - Dropout\nclassifier.add(Dropout(0.25))\n\n# Segunda camada de convolução \nclassifier.add(Conv2D(64, (3, 3), padding='same', activation = 'elu'))\nclassifier.add(Conv2D(64, (3, 3), activation='elu'))\nclassifier.add(MaxPooling2D(pool_size = (2, 2)))\nclassifier.add(Dropout(0.25))\n\n# Terceira camada de convolução\nclassifier.add(Conv2D(64, (3, 3), padding='same', activation='elu'))\nclassifier.add(Conv2D(64, (3, 3), activation='elu'))\nclassifier.add(MaxPooling2D(pool_size=(2, 2)))\nclassifier.add(Dropout(0.25))\n\n# Flattening\nclassifier.add(Flatten())\n\n# Camda totalmente conectada \nclassifier.add(Dense(512, activation = 'tanh'))\nclassifier.add(Dropout(0.5))\nclassifier.add(Dense(128, activation = 'tanh'))\nclassifier.add(Dropout(0.45))\nclassifier.add(Dense(64, activation = 'tanh'))\nclassifier.add(Dropout(0.4))\nclassifier.add(Dense(units = 4, activation = 'softmax'))\n\ngld = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)\n\n# Compilando a CNN\nclassifier.compile(\n optimizer = 'adadelta', \n loss = 'categorical_crossentropy', \n #metrics = ['accuracy'])\n metrics=['accuracy', categorical_accuracy]\n)\n\n# 'Ajustando a CNN as imagens \nfrom keras.preprocessing.image import ImageDataGenerator\n\n# Augmentation configuration\ntrain_data = ImageDataGenerator(\n rescale = 1./255,\n rotation_range=180.,\n shear_range = 0.2,\n zoom_range = 0.2,\n horizontal_flip = True\n)\n\ntest_data = ImageDataGenerator(\n rescale = 1./255\n)\n\ntraining_set = train_data.flow_from_directory(\n 'dataset/traning_set',\n target_size = (64, 64),\n batch_size = 32,\n class_mode = 'categorical'\n)\n\ntest_set = test_data.flow_from_directory(\n 'dataset/test_set',\n target_size = (64, 64),\n batch_size = 32,\n class_mode = 'categorical'\n)\n\n# Fit the model\nhistory = classifier.fit_generator(\n training_set,\n steps_per_epoch = len(training_set), #4233\n epochs = 100,\n validation_data = test_set,\n validation_steps = len(test_set) #1060\n)\n\n# Salvar o modelo\nclassifier.save('drinks_100e.h5')\n\n\nimport matplotlib.pyplot as plt\n\n# Loss Curves\nplt.figure(figsize=[8,6])\nplt.plot(history.history['loss'],'r',linewidth=3.0)\nplt.plot(history.history['val_loss'],'b',linewidth=3.0)\nplt.legend(['Training loss', 'Validation Loss'],fontsize=18)\nplt.xlabel('Epochs ',fontsize=16)\nplt.ylabel('Loss',fontsize=16)\nplt.title('Loss Curves',fontsize=16)\n \n# Accuracy Curves\nplt.figure(figsize=[8,6])\nplt.plot(history.history['acc'],'r',linewidth=3.0)\nplt.plot(history.history['val_acc'],'b',linewidth=3.0)\nplt.legend(['Training Accuracy', 'Validation Accuracy'],fontsize=18)\nplt.xlabel('Epochs ',fontsize=16)\nplt.ylabel('Accuracy',fontsize=16)\nplt.title('Accuracy Curves',fontsize=16)\n\nplt.show()\n","sub_path":"drinks/cnn.py","file_name":"cnn.py","file_ext":"py","file_size_in_byte":3318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"221471568","text":"#!/usr/bin/env python3\n# -*- coding: UTF-8 -*-\n\n\nimport os\nimport sys\nsys.path.extend([os.path.dirname(os.path.abspath(__file__))])\nimport cv2\nimport time\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits import mplot3d\nimport tensorflow as tf\nimport utils\nfrom OneEuroFilter import OneEuroFilter\n\n\nclass VNectEstimator:\n # the side length of the bounding box\n _box_size = 368\n # this factor indicates that the input box size is 8 times the side length of the output heatmaps\n _hm_factor = 8\n # number of the joints to be detected\n _joints_num = 21\n # parent joint indexes of each joint (for plotting the skeleton lines)\n _joint_parents = [16, 15, 1, 2, 3, 1, 5, 6, 14, 8, 9, 14, 11, 12, 14, 14, 1, 4, 7, 10, 13]\n\n def __init__(self, plot=True, T=False):\n print('Initializing VnectEstimator...')\n # whether plot 2d and 3d animation\n self.plot = plot\n # whether apply transposed matrix (when camera is flipped)\n self.T = T\n # the ratio factors to scale the input image crops, no more than 1.0\n self.scales = [1] # or [1, 0.7] to be consistent with the author when training\n # initialize one euro filters for all the joints\n config_2d = {\n 'freq': 120,\n 'mincutoff': 1.7,\n 'beta': 0.3,\n 'dcutoff': 1.0\n }\n config_3d = {\n 'freq': 120,\n 'mincutoff': 0.8,\n 'beta': 0.4,\n 'dcutoff': 1.0\n }\n self.filter_2d = [(OneEuroFilter(**config_2d), OneEuroFilter(**config_2d)) for _ in range(self._joints_num)]\n self.filter_3d = [(OneEuroFilter(**config_3d), OneEuroFilter(**config_3d), OneEuroFilter(**config_3d))\n for _ in range(self._joints_num)]\n # load pretrained VNect model\n self.sess = tf.Session()\n saver = tf.train.import_meta_graph('../models/tf_model/vnect_tf.meta' if os.getcwd().endswith('src') else\n './models/tf_model/vnect_tf.meta')\n saver.restore(self.sess, tf.train.latest_checkpoint('../models/tf_model/'if os.getcwd().endswith('src') else\n './models/tf_model/'))\n graph = tf.get_default_graph()\n self.input_crops = graph.get_tensor_by_name('Placeholder:0')\n self.heatmap = graph.get_tensor_by_name('split_2:0')\n self.x_heatmap = graph.get_tensor_by_name('split_2:1')\n self.y_heatmap = graph.get_tensor_by_name('split_2:2')\n self.z_heatmap = graph.get_tensor_by_name('split_2:3')\n\n if self.plot:\n self.ax_3d = plt.axes(projection='3d')\n plt.ion()\n self.ax_3d.clear()\n plt.show()\n print('Initialization done.')\n\n def __call__(self, img_input):\n t = time.time()\n img_input = np.transpose(img_input, axes=[1, 0, 2]).copy() if self.T else img_input\n img_batch = self._gen_input_batch(img_input, self._box_size, self.scales)\n # inference\n hm, xm, ym, zm = self.sess.run([self.heatmap, self.x_heatmap, self.y_heatmap, self.z_heatmap],\n {self.input_crops: img_batch})\n # average scale outputs\n hm_size = self._box_size // self._hm_factor\n hm_avg = np.zeros((hm_size, hm_size, self._joints_num))\n xm_avg = np.zeros((hm_size, hm_size, self._joints_num))\n ym_avg = np.zeros((hm_size, hm_size, self._joints_num))\n zm_avg = np.zeros((hm_size, hm_size, self._joints_num))\n for i in range(len(self.scales)):\n rescale = 1.0 / self.scales[i]\n scaled_hm = utils.img_scale(hm[i, :, :, :], rescale)\n scaled_x_hm = utils.img_scale(xm[i, :, :, :], rescale)\n scaled_y_hm = utils.img_scale(ym[i, :, :, :], rescale)\n scaled_z_hm = utils.img_scale(zm[i, :, :, :], rescale)\n mid = [scaled_hm.shape[0] // 2, scaled_hm.shape[1] // 2]\n hm_avg += scaled_hm[mid[0] - hm_size // 2: mid[0] + hm_size // 2,\n mid[1] - hm_size // 2: mid[1] + hm_size // 2, :]\n xm_avg += scaled_x_hm[mid[0] - hm_size // 2: mid[0] + hm_size // 2,\n mid[1] - hm_size // 2: mid[1] + hm_size // 2, :]\n ym_avg += scaled_y_hm[mid[0] - hm_size // 2: mid[0] + hm_size // 2,\n mid[1] - hm_size // 2: mid[1] + hm_size // 2, :]\n zm_avg += scaled_z_hm[mid[0] - hm_size // 2: mid[0] + hm_size // 2,\n mid[1] - hm_size // 2: mid[1] + hm_size // 2, :]\n hm_avg /= len(self.scales)\n xm_avg /= len(self.scales)\n ym_avg /= len(self.scales)\n zm_avg /= len(self.scales)\n joints_2d = utils.extract_2d_joints_from_heatmap(hm_avg, self._box_size, self._hm_factor)\n joints_3d = utils.extract_3d_joints_from_heatmap(joints_2d, xm_avg, ym_avg, zm_avg, self._box_size,\n self._hm_factor)\n joints_2d, joints_3d = self._joint_filter(joints_2d, joints_3d)\n # if self.T:\n # joints_2d = joints_2d[:, ::-1]\n # joints_3d = joints_3d[:, [1, 0, 2]]\n print('FPS: {:>2.2f}'.format(1 / (time.time() - t)))\n\n if self.plot:\n # 2d plotting\n frame_square = utils.img_scale_squareify(img_input, self._box_size)\n frame_square = utils.draw_limbs_2d(frame_square, joints_2d, self._joint_parents)\n cv2.imshow('2D Prediction', frame_square)\n # 3d plotting\n self.imshow_3d(self.ax_3d, joints_3d, self._joint_parents)\n\n return joints_2d, joints_3d\n\n @staticmethod\n def _gen_input_batch(img_input, box_size, scales):\n # any input image --> sqrared input image acceptable for the model\n img_square = utils.img_scale_squareify(img_input, box_size)\n # generate multi-scale input batch\n input_batch = []\n for scale in scales:\n img = utils.img_scale_padding(img_square, scale) if scale < 1 else img_square\n input_batch.append(img)\n # input image range: [0, 255) --> [-0.4, 0.6)\n input_batch = np.asarray(input_batch, dtype=np.float32) / 255 - 0.4\n return input_batch\n\n def _joint_filter(self, joints_2d, joints_3d):\n for i in range(self._joints_num):\n joints_2d[i, 0] = self.filter_2d[i][0](joints_2d[i, 0], time.time())\n joints_2d[i, 1] = self.filter_2d[i][1](joints_2d[i, 1], time.time())\n\n joints_3d[i, 0] = self.filter_3d[i][0](joints_3d[i, 0], time.time())\n joints_3d[i, 1] = self.filter_3d[i][1](joints_3d[i, 1], time.time())\n joints_3d[i, 2] = self.filter_3d[i][2](joints_3d[i, 2], time.time())\n return joints_2d, joints_3d\n\n @staticmethod\n def imshow_3d(ax_3d, joints_3d, joint_parents):\n ax_3d.clear()\n ax_3d.view_init(-90, -90)\n ax_3d.set_xlim(-500, 500)\n ax_3d.set_ylim(-500, 500)\n ax_3d.set_zlim(-500, 500)\n ax_3d.set_xticks([])\n ax_3d.set_yticks([])\n ax_3d.set_zticks([])\n white = (1.0, 1.0, 1.0, 0.0)\n ax_3d.w_xaxis.set_pane_color(white)\n ax_3d.w_yaxis.set_pane_color(white)\n ax_3d.w_xaxis.line.set_color(white)\n ax_3d.w_yaxis.line.set_color(white)\n ax_3d.w_zaxis.line.set_color(white)\n utils.draw_limbs_3d(ax_3d, joints_3d, joint_parents)\n # the following line is unnecessary with matplotlib 3.0.0, but ought to be activated\n # under matplotlib 3.0.2 (other versions not tested)\n # plt.pause(0.00001)\n\n\nif __name__ == '__main__':\n estimator = VNectEstimator()\n j_2d, j_3d = estimator(cv2.imread('../pic/test_pic.jpg'))\n print('\\njoints_2d\\n', j_2d, '\\n\\njoints_3d\\n', j_3d)\n","sub_path":"src/estimator.py","file_name":"estimator.py","file_ext":"py","file_size_in_byte":7846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"44841039","text":"import pandas as pd\n\nf1 = open(\"./data/sports_auc.txt\", encoding='utf-8-sig')\nf2 = open(\"./data/sports_auc_2.txt\", encoding='utf-8-sig')\nf3 = open(\"./data/sports_auc_basketball.txt\", encoding='utf-8-sig')\nf4 = open(\"./data/sports_auc_football.txt\", encoding='utf-8-sig')\nf5 = open(\"./data/sports_auc_football_2.txt\", encoding='utf-8-sig')\nf6 = open(\"./data/sports_auc_golf.txt\", encoding='utf-8-sig')\nf7 = open(\"./data/sports_interpark_Reviews.txt\", encoding='utf-8-sig')\n# convert format of sportsReview\ndef convertFormat(pos, neg, start, idx_idx, f1):\n dk_df = pd.DataFrame(columns = ['review_id', 'review', 'rating'])\n lines = f1.readlines()\n #new_l = ['\\t' if lines=='\\n' else line for line in lines]\n\n # concat sentence of same review\n new_lines = []\n print(len(lines))\n s =''\n for i in range(1, len(lines)):\n sp = lines[i].split('\\t')\n if sp[-1][0] == '0' or sp[-1][0] == '1' or sp[-1][0] == '2' or sp[-1][0] == '3' or sp[-1][0] == '4' or sp[-1][0] == '5' or sp[-1][0] == '6' or sp[-1][0] == '7' or sp[-1][0] == '8' or sp[-1][0] == '9' or sp[-1][0] == '10' :\n if len(sp[-1]) == 3 or len(sp[-1]) == 4:\n new_lines.append(s + lines[i])\n s = ''\n else:\n lines[i] = lines[i].replace('\\n', ' ')\n s+=lines[i]\n else:\n lines[i] = lines[i].replace('\\n', ' ')\n s += lines[i]\n print('new', len(new_lines))\n\n for i in range(1, len(new_lines)):\n sp = new_lines[i].split('\\t')\n a = {\"review_id\": start + idx_idx, \"review\": sp[2], \"rating\": int(sp[3])}\n idx_idx += 1\n dk_df = dk_df.append(a, ignore_index=True)\n print(len(dk_df))\n dk_df = dk_df.drop_duplicates(subset = ['review'])\n print(len(dk_df))\n # list to array\n list_ip = dk_df.values.tolist()\n arr_ip = []\n for i in range(len(list_ip)):\n arr_ip.append(str(list_ip[i][0])+'\\t'+str(list_ip[i][1])+'\\t'+str(list_ip[i][2])+'\\n')\n print('interpark length', len(arr_ip))\n pos += len(dk_df[dk_df['rating'] == 1])\n neg += len(dk_df[dk_df['rating'] == 0])\n return pos, neg, idx_idx, arr_ip\n\nf = open(\"./data/all_ip.txt\", 'w', encoding='utf-8-sig')\npos = 0\nneg = 0\npos, neg, idx_idx, arr_ip = convertFormat(pos, neg, 20000000, 0, f1)\nf1.close()\npos, neg, idx_idx, arr_ip2 = convertFormat(pos, neg, 20000000, idx_idx, f2)\nf2.close()\npos, neg, idx_idx, arr_ip3 = convertFormat(pos, neg, 20000000, idx_idx, f3)\nf3.close()\npos, neg, idx_idx, arr_ip4 = convertFormat(pos, neg, 20000000, idx_idx, f4)\nf4.close()\npos, neg, idx_idx, arr_ip5 = convertFormat(pos, neg, 20000000, idx_idx, f5)\nf5.close()\npos, neg, idx_idx, arr_ip6 = convertFormat(pos, neg, 20000000, idx_idx, f6)\nf6.close()\npos, neg, idx_idx, arr_ip7 = convertFormat(pos, neg, 20000000, idx_idx, f7)\nf7.close()\nlist = arr_ip + arr_ip2 + arr_ip3 + arr_ip4 + arr_ip5 + arr_ip6 + arr_ip7\nprint(len(list))\nfor i in range(len(list)):\n f.write(str(list[i]))\nf.close()\n# negative, positive\nprint(\"positive: \", pos, \"negative: \", neg)\n\n\n\n\n\n\n\n\n\n","sub_path":"data/concat_interpark.py","file_name":"concat_interpark.py","file_ext":"py","file_size_in_byte":3055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"172752687","text":"import requests\nimport logging\nfrom kubernetes import client, config\nfrom prometheus_client import Gauge\nimport urllib3\nimport base64\nimport OpenSSL\nimport ssl, socket\nimport datetime\nimport time\n\nurllib3.disable_warnings()\nlogger = logging.getLogger(__name__)\n\n\n\nclass CertsNet(object):\n namespace = 'default'\n use_kubeconfig = True\n trawler = None\n\n def __init__(self, config, trawler):\n # Takes in config object and trawler instance it's behind\n # Use kubeconfig or in-cluster config for k8s comms\n if trawler:\n self.trawler = trawler\n self.use_kubeconfig = trawler.use_kubeconfig\n # Namespace to review\n self.namespace = config.get('namespace', 'default')\n\n def getExpiry(self, cert_data):\n cert = base64.b64decode(cert_data)\n x509 = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, cert)\n expiry = datetime.datetime.strptime(x509.get_notAfter().decode('utf-8'), \"%Y%m%d%H%M%S%z\").timestamp()\n return int(expiry - time.time())\n\n def fish(self):\n # Go fishing \n # Load appropriate k8s config\n if self.use_kubeconfig:\n config.load_kube_config()\n else:\n config.load_incluster_config()\n # Initialise the k8s API\n v1 = client.CoreV1Api()\n # Retreive secret list for specified namespace\n ret = v1.list_namespaced_secret(namespace=self.namespace)\n for secret in ret.items:\n if secret.type == 'kubernetes.io/tls' and secret.data['ca.crt'] != '':\n caSecondsLeft = self.getExpiry(secret.data['ca.crt'])\n tlsSecondsLeft = self.getExpiry(secret.data['tls.crt']) \n self.trawler.set_gauge('cert', '{}_tls_seconds_remaining'.format(secret.metadata.name), tlsSecondsLeft)\n self.trawler.set_gauge('cert', '{}_ca_seconds_remaining'.format(secret.metadata.name), caSecondsLeft)\n\n \nif __name__ == \"__main__\":\n net = CertsNet(config={'namespace':'apic'}, trawler=None)\n net.fish()\n","sub_path":"certs_net.py","file_name":"certs_net.py","file_ext":"py","file_size_in_byte":2038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"366642042","text":"import logging\n\nlogging.basicConfig(format=\"[%(asctime)s] %(message)s\", datefmt=\"%m-%d %H:%M:%S\")\nimport os\nimport time\n\nimport numpy as np\nimport tensorflow as tf\nimport gym\nimport core.data.cifar_data as cifar\nimport core.data.mnist_data as mnist\nfrom .network import Network\nfrom .statistic import Statistic\nfrom . import utils as util\nfrom tqdm import tqdm\n\nflags = tf.app.flags\nfrom skimage.transform import resize\n\n# network\nflags.DEFINE_integer(\"batch_size\", 100, \"size of a batch\")\nflags.DEFINE_integer(\"gated_conv_num_layers\", 2, \"the number of gated conv layers\")\nflags.DEFINE_integer(\"gated_conv_num_feature_maps\", 16,\n \"the number of input / output feature maps in gated conv layers\")\nflags.DEFINE_integer(\"output_conv_num_feature_maps\", 64, \"the number of output feature maps in output conv layers\")\nflags.DEFINE_integer(\"q_levels\", 8, \"the number of quantization levels in the output\")\n# 4 used in mnist?\n# training\nflags.DEFINE_float(\"max_epoch\", 100000, \"maximum # of epochs\")\nflags.DEFINE_float(\"learning_rate\", 1e-3, \"learning rate\")\nflags.DEFINE_float(\"grad_clip\", 1, \"value of gradient to be used for clipping\")\n\n# data\nflags.DEFINE_string(\"data\", \"mnist\", \"name of dataset [mnist, color-mnist, cifar]\")\nflags.DEFINE_string(\"runtime_base_dir\", \"./\", \"path of base directory for checkpoints, data_dir, logs and sample_dir\")\nflags.DEFINE_string(\"data_dir\", \"data\", \"name of data directory\")\nflags.DEFINE_string(\"sample_dir\", \"samples\", \"name of sample directory\")\n\n# generation\nflags.DEFINE_string(\"occlude_start_row\", 21, \"image row to start occlusion\")\nflags.DEFINE_string(\"num_generated_images\", 9, \"number of images to generate\")\n\n# Debug\nflags.DEFINE_boolean(\"is_train\", True, \"training or testing\")\nflags.DEFINE_string(\"log_level\", \"INFO\", \"log level [DEBUG, INFO, WARNING, ERROR, CRITICAL]\")\nflags.DEFINE_integer(\"random_seed\", 123, \"random seed for python\")\n\nconf = flags.FLAGS\n\n# logging\nlogger = logging.getLogger()\nlogger.setLevel(conf.log_level)\n\n# random seed\ntf.set_random_seed(conf.random_seed)\nnp.random.seed(conf.random_seed)\n\n\n# preprocess the data into 0-1\ndef preprocess(q_levels):\n def preprocess_fcn(images):\n # Create the target pixels from the image. Quantize the scalar pixel values into q_level indices.\n target_pixels = np.clip(((images * q_levels).astype('int64')), 0, q_levels - 1) # [N,H,W,C]\n return (images, target_pixels)\n\n return preprocess_fcn\n\ndef rgb2y(image):\n assert (len(image.shape) == 3)\n assert (image.shape[-1] == 3)\n im_y = np.dot(image[..., :3], [0.229, 0.587, 0.144])\n im_y = resize(im_y, (84, 84), order=1)\n im_y = resize(im_y, (42, 42), order=1)\n im_y = im_y / 255.\n return im_y.astype(np.float32)\n\n\ndef collect_samples(batch_size, env, action_n, ob_shape=(42, 42)):\n samples = []\n # temporally use random policy\n for i in range(batch_size):\n action = np.random.randint(action_n)\n s, r, terminal, _ = env.step(action)\n if terminal:\n env.reset()\n s = rgb2y(s)\n samples.append(s)\n # temporally ignore reward\n samples = np.array(samples).reshape((batch_size,) + ob_shape + (1,))\n q_fun = preprocess(8)\n return q_fun(samples)\n\n# I would find the value range of the image.\ndef process_density_images(image):\n # image = image / 255.\n density_images = resize(image, (42, 42), order=1)\n return density_images,astype(np.float32)\n\ndef process_density_input(samples):\n # NHWC thx!\n q_func = preprocess(8)\n return q_func(samples)\n\n\n\ndef generate_from_occluded(network, images):\n occlude_start_row = conf.occlude_start_row\n num_generated_images = conf.num_generated_images\n\n samples = network.generate_from_occluded(images, num_generated_images, occlude_start_row)\n\n occluded = np.copy(images[0:num_generated_images, :, :, :])\n # render white line in occlusion start row\n # occluded[:, occlude_start_row, :, :] = 255\n return samples, occluded\n\n\ndef train(env, network, stat, sample_dir):\n initial_step = stat.get_t()\n logger.info(\"Training starts on epoch {}\".format(initial_step))\n\n train_step_per_epoch = 100\n test_step_per_epoch = 10\n action_n = env.action_space.n\n for epoch in range(initial_step, conf.max_epoch):\n start_time = time.time()\n\n # 1. train\n total_train_costs = []\n for _ in tqdm(range(train_step_per_epoch)):\n images = collect_samples(conf.batch_size, env, action_n)\n cost = network.test(images, with_update=True)\n total_train_costs.append(cost)\n\n # 2. test\n total_test_costs = []\n for _ in tqdm(range(test_step_per_epoch)):\n images = collect_samples(conf.batch_size, env, action_n)\n cost = network.test(images, with_update=False)\n total_test_costs.append(cost)\n\n avg_train_cost, avg_test_cost = np.mean(total_train_costs), np.mean(total_test_costs)\n stat.on_step(avg_train_cost, avg_test_cost)\n\n # 3. generate samples\n images, _ = collect_samples(conf.batch_size, env, action_n)\n samples, occluded = generate_from_occluded(network, images)\n util.save_images(np.concatenate((occluded, samples), axis=2),\n 42, 42 * 2, conf.num_generated_images, 1,\n directory=sample_dir, prefix=\"epoch_%s\" % epoch)\n\n logger.info(\"Epoch {}: {:.2f} seconds, avg train cost: {:.3f}, avg test cost: {:.3f}\"\n .format(epoch, (time.time() - start_time), avg_train_cost, avg_test_cost))\n\ndef get_network():\n util.preprocess_conf(conf)\n network = Network(conf, 42, 42, 1)\n return network\n\n","sub_path":"dqn/GatedPixelCNN/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"293426895","text":"import include.settings as S\nimport include.common as C\nimport dictionary\n\ndef insert(words):\n\t\"\"\"Insert word in equivalence group or create one group.\n\n\tKeyword arguments:\n\t\twords - list contains the words which are equal to each other.\n\n\t\"\"\"\n\tmerge_lines, extra_words = [], []\n\tfor i, word in enumerate(words):\n\t\tsearch_result = search(word)\n\t\tif len(search_result) > 0:\n\t\t\tmerge_lines += search_result\n\t\telse:\n\t\t\textra_words.append(word)\n\tunion(merge_lines, extra_words)\n\ndef insert_from_file(filename):\n\t\"\"\"Import and insert equivalence group from file.\n\n\tKeyword arguments:\n\t\tfilename - import file path and name.\n\n\t\"\"\"\n\twith open(str(filename)) as file:\n\t\tfor i, line in enumerate(file.readlines()):\n\t\t\tif line[0] != \"#\":\n\t\t\t\twords = line.replace(\"\\n\", \"\").split(\",\")\n\t\t\t\tinsert(words)\n\ndef delete(word):\n\t\"\"\"Remove word from equivalence group\n\n\tKeyword arguments:\n\t\tword - word to be removed.\n\t\n\t\"\"\"\n\tsearch_results = search(word)\n\tfor i, result in enumerate(search_results):\n\t\tC.delete_word(S.SYNONYM_FILE, result.position, word)\n\ndef search(word):\n\t\"\"\"Search the group by given word.\n\n\tKeyword argument:\n\t\tword\t\t- word for searching.\n\n\tReturn:\n\t\treturn a list which is consist of SearchResult objects.\n\t\"\"\"\n\n\tresults = []\n\tfor i, result in enumerate(C.search_word(S.SYNONYM_FILE, word)):\n\t\tif word in result.content.replace(\"\\n\", \"\").split(\",\"):\n\t\t\tresults.append(result)\n\treturn results\n\ndef get_all():\n\t\"\"\"Retrieve all content(except for comment)\n\n\tReturn:\n\t\treturn a string od JSON array with 'SearchResult' attributes srtucture.\n\t\"\"\"\n\n\treturn C.retrieve_all(S.SYNONYM_FILE)\n\ndef union(lines, extra_words):\n\t\"\"\"Merge equivalence groups into one\n\n\tKeyword argument:\n\t\tlines\t\t- list contains whole line(start from 1) of group to be merged.\n\t\textra_words\t- list contains string to add into new merged line.\n\t\"\"\"\n\n\t# retrieve all the words and store in a list(origin_words)\n\torigin_words = []\n\tfor i, line in enumerate(lines):\n\t\torigin_words += line.content.replace(\"\\n\", \"\").split(\",\")\n\n\t# filter duplicate word and store into new list(final_words)\n\tfinal_words = []\n\tfor i, word in enumerate(origin_words):\n\t\tif not word in final_words:\n\t\t\tfinal_words.append(word)\n\tdel origin_words\n\n\t# put addition words given from parameter into word list(final_words)\n\tfor i, word in enumerate(extra_words):\n\t\tif not word in final_words:\n\t\t\tfinal_words.append(word)\n\n\t# combine words in list into a string\n\tnew_line = \"\"\n\tfor i, word in enumerate(final_words):\n\t\tnew_line += word + \",\"\n\tdel final_words\n\tnew_line = new_line[0 : len(new_line) - 1]\n\n\t# delete those lines the words belong to, and append the new merged line into file.\n\tline_numbers = []\n\tfor i, line in enumerate(lines):\n\t\tline_numbers.append(line.position)\n\tC.delete_multi_line(S.SYNONYM_FILE, line_numbers)\n\tC.insert_line(S.SYNONYM_FILE, new_line)\n","sub_path":"synonym.py","file_name":"synonym.py","file_ext":"py","file_size_in_byte":2812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"532185692","text":"\"\"\"\n********************************************************************************\n* Name: manage_commands.py\n* Author: Nathan Swain\n* Created On: 2015\n* Copyright: (c) Brigham Young University 2015\n* License: BSD 2-Clause\n********************************************************************************\n\"\"\"\n\nimport os\nimport subprocess\n\nfrom tethys_apps.cli.cli_colors import pretty_output, FG_RED\nfrom tethys_apps.base.testing.environment import set_testing_environment\nfrom tethys_apps.utilities import get_tethys_src_dir\n\n\nMANAGE_START = 'start'\nMANAGE_SYNCDB = 'syncdb'\nMANAGE_COLLECTSTATIC = 'collectstatic'\nMANAGE_COLLECTWORKSPACES = 'collectworkspaces'\nMANAGE_COLLECT = 'collectall'\nMANAGE_CREATESUPERUSER = 'createsuperuser'\nMANAGE_SYNC = 'sync'\n\n\ndef get_manage_path(args):\n \"\"\"\n Validate user defined manage path, use default, or throw error\n \"\"\"\n # Determine path to manage.py file\n manage_path = os.path.join(get_tethys_src_dir(), 'manage.py')\n\n # Check for path option\n if hasattr(args, 'manage'):\n manage_path = args.manage or manage_path\n\n # Throw error if path is not valid\n if not os.path.isfile(manage_path):\n with pretty_output(FG_RED) as p:\n p.write('ERROR: Can\\'t open file \"{0}\", no such file.'.format(manage_path))\n exit(1)\n\n return manage_path\n\n\ndef manage_command(args):\n \"\"\"\n Management commands.\n \"\"\"\n # Get the path to manage.py\n manage_path = get_manage_path(args)\n\n # Define the process to be run\n primary_process = None\n\n if args.command == MANAGE_START:\n if args.port:\n primary_process = ['python', manage_path, 'runserver', args.port]\n else:\n primary_process = ['python', manage_path, 'runserver']\n elif args.command == MANAGE_SYNCDB:\n intermediate_process = ['python', manage_path, 'makemigrations']\n run_process(intermediate_process)\n\n primary_process = ['python', manage_path, 'migrate']\n\n elif args.command == MANAGE_COLLECTSTATIC:\n # Run pre_collectstatic\n intermediate_process = ['python', manage_path, 'pre_collectstatic']\n run_process(intermediate_process)\n\n # Setup for main collectstatic\n primary_process = ['python', manage_path, 'collectstatic']\n\n if args.noinput:\n primary_process.append('--noinput')\n\n elif args.command == MANAGE_COLLECTWORKSPACES:\n # Run collectworkspaces command\n if args.force:\n primary_process = ['python', manage_path, 'collectworkspaces', '--force']\n else:\n primary_process = ['python', manage_path, 'collectworkspaces']\n\n elif args.command == MANAGE_COLLECT:\n # Convenience command to run collectstatic and collectworkspaces\n # Run pre_collectstatic\n intermediate_process = ['python', manage_path, 'pre_collectstatic']\n run_process(intermediate_process)\n\n # Setup for main collectstatic\n intermediate_process = ['python', manage_path, 'collectstatic']\n\n if args.noinput:\n intermediate_process.append('--noinput')\n\n run_process(intermediate_process)\n\n # Run collectworkspaces command\n primary_process = ['python', manage_path, 'collectworkspaces']\n\n elif args.command == MANAGE_CREATESUPERUSER:\n primary_process = ['python', manage_path, 'createsuperuser']\n\n elif args.command == MANAGE_SYNC:\n from tethys_apps.harvester import SingletonHarvester\n harvester = SingletonHarvester()\n harvester.harvest()\n\n if primary_process:\n run_process(primary_process)\n\n\ndef run_process(process):\n # Call the process with a little trick to ignore the keyboard interrupt error when it happens\n try:\n if 'test' in process:\n set_testing_environment(True)\n return subprocess.call(process)\n except KeyboardInterrupt:\n pass\n finally:\n set_testing_environment(False)\n","sub_path":"tethys_apps/cli/manage_commands.py","file_name":"manage_commands.py","file_ext":"py","file_size_in_byte":3945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"574016867","text":"import METAA\nimport cv2\nimport numpy as np\nimport numpy.matlib\nimport matplotlib.pyplot as plt\nfrom random import randint\nfrom math import cos, sin, asin, sqrt, radians, log, tan, exp, atan2, atan\nimport warnings\nimport copy\nwarnings.simplefilter(action = \"ignore\", category = RuntimeWarning)\nwarnings.filterwarnings(\"ignore\")\nfrom tqdm import tqdm\n\ndef CannyLines(pixel_size,img_b,mask_b):\n if pixel_size == 0.05:\n size=6\n else:\n size=5\n pbar1 = tqdm(total=size,position=0,desc=\"CannyPF \")\n rows = img_b.shape[0]\n cols = img_b.shape[1]\n thMeaningfulLength = int(2*log(rows*cols)/log(8)+0.5)\n gNoise = 1.33333\n VMGradient = 70\n k=7\n gradientMap = np.zeros(img_b.shape)\n dx = cv2.Sobel(src=img_b,ddepth=cv2.CV_16S, dx=1, dy=0, ksize=k, scale=1, delta=0, borderType=cv2.BORDER_REPLICATE)\n dy = cv2.Sobel(src=img_b,ddepth=cv2.CV_16S, dx=0, dy=1, ksize=k, scale=1, delta=0, borderType=cv2.BORDER_REPLICATE)\n pbar1.update(1)\n dx[mask_b>=10**-10]=0\n dy[mask_b>=10**-10]=0\n if k == 5:\n dx = dx/13.4\n dy = dy/13.4\n if k == 7:\n dx = dx/47.5\n dy = dy/47.5\n totalNum = 0\n histogram = np.zeros(255*8)\n for i in range(gradientMap.shape[0]):\n for j in range(gradientMap.shape[1]):\n ptrG = abs(dx[i,j])+abs(dy[i,j])\n if ptrG > gNoise:\n histogram[int(ptrG + 0.5)] += 1\n totalNum +=1\n else:\n ptrG = 0\n gradientMap[i,j] = ptrG\n pbar1.update(1)\n N2 = 0\n for i in range(len(histogram)):\n if histogram[i] != 0:\n N2 += histogram[i]*(histogram[i]-1)\n pMax = 1/exp((log(N2)/thMeaningfulLength))\n pMin = 1/exp((log(N2)/sqrt(cols*rows)))\n greaterThan = np.zeros(255*8)\n count = 0\n for i in range(255*8-1,-1,-1):\n count += histogram[i]\n probabilityGreater = count/totalNum\n greaterThan[i] = probabilityGreater\n count = 0\n for i in range(255*8-1,-1,-1):\n if greaterThan[i]>=pMax:\n thGradientHigh = i\n break\n for i in range(255*8-1,-1,-1):\n if greaterThan[i]>=pMin:\n thGradientLow = i\n break\n if thGradientLow <= gNoise:\n thGradientLow = gNoise\n thGradientHigh = sqrt(thGradientHigh*VMGradient)\n pbar1.update(1)\n edgemap = cv2.Canny(img_b,thGradientLow,thGradientHigh,3)\n edgemap[mask_b>=10**-10]=0 \n anglePer = np.pi / 8\n orientationMap = np.zeros(img_b.shape)\n for i in range(orientationMap.shape[0]):\n for j in range(orientationMap.shape[1]):\n ptrO = int((atan2(dx[i,j],-dy[i,j]) + np.pi)/anglePer)\n if ptrO == 16:\n ptrO = 0\n orientationMap[i,j] = ptrO\n pbar1.update(1)\n maskMap = np.zeros(img_b.shape)\n gradientPoints = []\n gradientValues = []\n for i in range(edgemap.shape[0]):\n for j in range(edgemap.shape[1]):\n if edgemap[i,j] == 255:\n maskMap[i,j] = 1\n gradientPoints.append((i,j))\n gradientValues.append(gradientMap[i,j])\n gradientPoints = [x for _,x in sorted(zip(gradientValues,gradientPoints))] \n gradientValues.sort()\n gradientPoints = gradientPoints[::-1]\n gradientValues = gradientValues[::-1] \n pbar1.update(1)\n if pixel_size == 0.05: # SECOND PIXELSIZE\n mask2 = np.zeros(img_b.shape)\n mask2[img_b==0] = 1\n for x in range(edgemap.shape[0]):\n for y in range(edgemap.shape[1]):\n if edgemap[x,y] == 255:\n if np.sum(mask2[x-2:x+2,y-2:y+2]) >= 1:\n edgemap[x,y] = 0\n pbar1.update(1)\n pbar1.close()\n rows = edgemap.shape[0]\n cols = edgemap.shape[1]\n thMeaningfulLength = int(2*log(rows*cols)/log(8)+0.5)\n pbar2 = tqdm(total=6,position=0,desc=\"CannyLines\")\n # [A] Initial Chains\n edgeChainsA = [] \n for i in range(len(gradientPoints)):\n x = gradientPoints[i][0]\n y = gradientPoints[i][1]\n if maskMap[x,y] == 0 or maskMap[x,y] == 2:\n continue\n chain = []\n chain.append((x,y))\n while x >= 0 and y >= 0:\n x,y = METAA.next1(x,y,rows,cols,maskMap,orientationMap)\n if x >= 0 and y >= 0:\n chain.append((x,y))\n maskMap[x,y] = 2\n if len(chain) >= thMeaningfulLength:\n edgeChainsA.append(chain) \n chain = np.array(chain) \n pbar2.update(1)\n # [B] Splitting orientation shifts\n edgeChainsB = copy.deepcopy(edgeChainsA)\n for i in range(len(edgeChainsB)-1,-1,-1):\n if len(edgeChainsB[i]) >= 2*thMeaningfulLength: \n orientationchain = []\n for x in edgeChainsB[i]:\n orientationchain.append(orientationMap[x[0],x[1]])\n av = METAA.moving_average(orientationchain, n=7)\n avchain = np.zeros(len(orientationchain))\n avchain[0:3] = av[0]\n avchain[3:-3] = av\n avchain[-3:] = av[-1]\n d = np.diff(avchain)\n for j in range(len(d)):\n if abs(d[j]) >= 0.3:\n edgeChainsB.append(edgeChainsB[i][0:j])\n edgeChainsB.append(edgeChainsB[i][j:])\n del edgeChainsB[i] \n edgeChainsB = [x for x in edgeChainsB if x != []] \n pbar2.update(1)\n # [B] Line fitting \n metaLinesB = []\n lengthB = []\n for i in range(len(edgeChainsB)):\n chain = np.array(edgeChainsB[i])\n m,c = np.polyfit(chain[:,1],chain[:,0],1) \n xmin = min(chain[:,1])\n xmax = max(chain[:,1])\n xn = np.linspace(xmin,xmax,(max(1,xmax-xmin))*10)\n yn = np.polyval([m, c], xn)\n l = sqrt((xn[0]-xn[-1])**2+(yn[0]-yn[-1])**2)\n metaLinesB.append((xn,yn,m,c))\n lengthB.append(l)\n lengthB = np.array(lengthB)\n metaLinesB = np.array(metaLinesB)\n edgeChainsB = np.array(edgeChainsB)\n indices = lengthB.argsort()\n indices = indices[::-1]\n metaLinesB = metaLinesB[indices] \n edgeChainsB = edgeChainsB[indices]\n lengthB = lengthB[indices] \n pbar2.update(1)\n # [E] Alternative Extending\n edgeChainsE = list(copy.deepcopy(edgeChainsB))\n metaLinesE = list(copy.deepcopy(metaLinesB))\n edgemap_s = (edgemap/255).astype(int)\n residualmap = copy.deepcopy(edgemap_s)\n for i in range(len(edgeChainsE)):\n chain = np.array(edgeChainsE[i])\n chain_x = chain[:,0]\n chain_y = chain[:,1]\n indices = chain_y.argsort()\n chain = chain[indices]\n chain_x = chain_x[indices]\n chain_y = chain_y[indices]\n indices = chain_x.argsort()\n chain = chain[indices]\n chain_x = chain_x[indices]\n chain_y = chain_y[indices]\n chain_n = []\n for j in range(len(chain_x)):\n chain_n.append((chain_x[j],chain_y[j]))\n residualmap[chain_x[j],chain_y[j]]=0\n edgeChainsE[i] = chain_n \n i = -1\n while i <= len(edgeChainsE)-3:\n i += 1\n chain = np.array(edgeChainsE[i])\n s = metaLinesE[i][2]\n begin = chain[0,0]\n end = chain[-1,0]\n # BEGIN\n if s >= 0:\n begin_i = min(np.where(chain[:,0]==begin)[0])\n else:\n begin_i = max(np.where(chain[:,0]==begin)[0])\n b_x = chain[begin_i,0]\n b_y = chain[begin_i,1]\n while b_x >= 0 and b_y >= 0:\n b_x,b_y = METAA.next4(b_x,b_y,rows,cols,edgemap_s,0,s,edgeChainsE[i])\n if b_x >= 0 and b_y >= 0 and residualmap[b_x,b_y] == 1: # Extend chain with residual pixel\n edgeChainsE[i].append((b_x,b_y))\n residualmap[b_x,b_y] = 0\n elif b_x >= 0 and b_y >= 0: # Extend chain with another chain\n for j in range(i+1,len(edgeChainsE)):\n if (b_x,b_y) in edgeChainsE[j]:\n Tchain = np.array(edgeChainsE[j])\n Ts = metaLinesE[j][2]\n Tbegin = Tchain[0,0]\n Tend = Tchain[-1,0]\n erange = METAA.rangemaker(Tend,thMeaningfulLength)\n if b_x in erange: # Appropriate connection \n edgeChainsE[i].extend(edgeChainsE[j])\n if Ts >= 0:\n begin_i = min(np.where(Tchain[:,0]==Tbegin)[0])\n else:\n begin_i = max(np.where(Tchain[:,0]==Tbegin)[0])\n b_x = Tchain[begin_i,0]\n b_y = Tchain[begin_i,1]\n s = Ts\n del edgeChainsE[j]\n del metaLinesE[j]\n else: # Inappropriate connection\n b_x = -1\n b_y = -1\n break\n if j == len(edgeChainsE)-1:\n b_x = -1\n b_y = -1\n # END\n if s >= 0:\n end_i = max(np.where(chain[:,0]==end)[0])\n else:\n end_i = min(np.where(chain[:,0]==end)[0])\n e_x = chain[end_i,0]\n e_y = chain[end_i,1]\n while e_x >= 0 and e_y >= 0:\n e_x,e_y = METAA.next4(e_x,e_y,rows,cols,edgemap_s,1,s,edgeChainsE[i])\n if e_x >= 0 and e_y >= 0 and residualmap[e_x,e_y] == 1:\n edgeChainsE[i].append((e_x,e_y))\n residualmap[e_x,e_y] = 0\n elif e_x >= 0 and e_y >= 0:\n for j in range(i+1,len(edgeChainsE)):\n if (e_x,e_y) in edgeChainsE[j]:\n Tchain = np.array(edgeChainsE[j])\n Ts = metaLinesE[j][2]\n Tbegin = Tchain[0,0]\n Tend = Tchain[-1,0]\n brange = METAA.rangemaker(Tbegin,thMeaningfulLength)\n if e_x in brange: \n edgeChainsE[i].extend(edgeChainsE[j])\n if Ts >= 0:\n end_i = max(np.where(Tchain[:,0]==Tend)[0])\n else:\n end_i = min(np.where(Tchain[:,0]==Tend)[0])\n e_x = Tchain[end_i,0]\n e_y = Tchain[end_i,1]\n s = Ts\n del edgeChainsE[j]\n del metaLinesE[j]\n else: \n e_x = -1\n e_y = -1\n break\n if j == len(edgeChainsE)-1:\n e_x = -1\n e_y = -1\n pbar2.update(1)\n # [E] Line fitting \n metaLinesE = []\n lengthE = []\n for i in range(len(edgeChainsE)):\n chain = np.array(edgeChainsE[i])\n m,c = np.polyfit(chain[:,1],chain[:,0],1) \n xmin = min(chain[:,1])\n xmax = max(chain[:,1])\n xn = np.linspace(xmin,xmax,(max(1,xmax-xmin))*10)\n yn = np.polyval([m, c], xn)\n l = sqrt((xn[0]-xn[-1])**2+(yn[0]-yn[-1])**2)\n metaLinesE.append((xn,yn,m,c))\n lengthE.append(l)\n lengthE = np.array(lengthE)\n metaLinesE = np.array(metaLinesE)\n edgeChainsE = np.array(edgeChainsE)\n indices = lengthE.argsort()\n indices = indices[::-1]\n metaLinesE = metaLinesE[indices] \n edgeChainsE = edgeChainsE[indices]\n lengthE = lengthE[indices] \n pbar2.update(1)\n # [F] Delete\n edgeChainsF = list(copy.deepcopy(edgeChainsE))\n for i in range(len(metaLinesE)-1,-1,-1):\n if lengthE[i] < thMeaningfulLength:\n del edgeChainsF[i]\n # FINALIZE\n mapA = np.zeros(edgemap.shape)\n for chain in edgeChainsA:\n for point in chain: \n mapA[point[0],point[1]]=1\n mapE = np.zeros(edgemap.shape)\n for chain in edgeChainsE:\n for point in chain: \n mapE[point[0],point[1]]=1 \n mapF = np.zeros(edgemap.shape)\n for chain in edgeChainsF:\n for point in chain: \n mapF[point[0],point[1]]=1\n pbar2.update(1)\n pbar2.close()\n return mapA","sub_path":"Ridge/CANNY.py","file_name":"CANNY.py","file_ext":"py","file_size_in_byte":12448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"361552157","text":"import requests\nfrom pyquery import PyQuery as pq\nfrom urllib.parse import urlparse\nimport multiprocessing\nimport csv\nimport time\n\n\nhearders = {\n\t'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36',\n\t}\na_url = ['baidu', 'news', 'sike', 'qq', 'hao123', 'sports', 'sina']\n\n\ndef get_data(url):\n\ttry:\n\t\tres = requests.get(url, headers=hearders, timeout=5)\n\texcept:\n\t\tpass\n\telse:\n\t\tif res.status_code == 200:\n\t\t\tres.encoding = res.apparent_encoding\n\t\t\treturn res.text\n\n\ndef parse_data(url, html, l, d, n, iframe=''):\n\tdoc = pq(html)\n\taes = doc('a').items()\n\tlist_link = []\n\tfor a in aes:\n\t\ts = a.attr(\"href\")\n\t\tif s and s.startswith('http'):\n\t\t\tlist_link.append(s)\n\tList_set = set(list_link)\n\tqq = False\n\tnnnn = 500\n\tfor u in List_set:\n\t\tnnnn -= 1\n\t\tfor fi_url in URL:\n\t\t\tif fi_url in u:\n\t\t\t\tl.acquire()\n\t\t\t\td.append((url, len(List_set), fi_url, iframe))\n\t\t\t\tl.release()\n\t\t\t\tqq = True\n\t\t\t\tbreak\n\t\tif qq or nnnn == 0:\n\t\t\tbreak\n\tif n == 0:\n\t\treturn\n\tiframe = doc('iframe').items()\n\twww = 3\n\tfor i in iframe:\n\t\tsrc = i.attr('src')\n\t\tif src.startswith('/'):\n\t\t\tsrc = url + src\n\t\telif src.startswith('http'):\n\t\t\tpass\n\t\telse:\n\t\t\tsrc = url + '/' + src\n\t\tdata = get_data(src)\n\t\tif data:\n\t\t\ts = parse_data(url, data, l, d, n-1, iframe=src)\n\t\twww -= 1\n\t\tif www == 0:\n\t\t\tbreak\n\n\ndef get_url(file, http=True):\n\tl = []\n\twith open(file, 'rb') as f:\n\t\tfor url in f:\n\t\t\tp = True\n\t\t\ttry:\n\t\t\t\ts_url = url.decode()\n\t\t\texcept Exception as e:\n\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\tfor i in a_url:\n\t\t\t\t\tif i in s_url:\n\t\t\t\t\t\tp = False\n\t\t\t\t\t\tbreak\n\t\t\t\tif p:\n\t\t\t\t\tif http:\n\t\t\t\t\t\tif s_url.startswith('http'):\n\t\t\t\t\t\t\tl.append(s_url.strip('/\\r\\n'))\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tl.append('http://' + s_url.strip('/\\r\\n'))\n\t\t\t\t\telse:\n\t\t\t\t\t\tl.append(s_url.strip('/\\r\\n'))\n\treturn l\n\n\ndef open_file(data):\n\tfile = \"./result.csv\"\n\tcsvFile = open(file, \"w\")\n\twriter = csv.writer(csvFile)\n\tfor i in data:\n\t\twriter.writerow(i)\n\tcsvFile.close()\n\n\ndef run(i, l, d, LEN):\n\tdata = get_data(i)\n\tif data:\n\t\tdic = parse_data(i, data, l, d, 1)\n\tif LEN % 1000 == 0:\n\t\tif d:\n\t\t\topen_file(d)\n\t\telse:\n\t\t\topen_file([('nodata', '2222')])\n\t\tsend_file('./result.csv', pppp=False)\n\ndef send_file(file, pppp=True):\n\turl = 'https://api.telegram.org/bot711166180:AAErNuMGY5LU72YP7ZeOBwH53jRKSp5NeXY/sendDocument'\n\tfiles = {\"document\" : open(file)}\n\tif pppp:\n\t\tdata = {'chat_id': -398945112}\n\telse:\n\t\tdata = {'chat_id': -285548732}\n\tres = requests.post(url, data=data, files=files)\n\tprint(res.text)\n\n\nURL = get_url('./shai.txt', False)\n\ndef main():\n\tpool = multiprocessing.Pool(processes=4)\n\tmanager = multiprocessing.Manager()\n\tl = manager.Lock()\n\td = manager.list()\n\tfile = r'./yuan.txt'\n\turl_l = get_url(file)\n\trrr = 1\n\tfor i in url_l:\n\t\tpool.apply_async(run, (i, l, d, rrr))\n\t\trrr += 1\n\tdel url_l\n\tpool.close()\n\tpool.join()\n\td = sorted(d, key = lambda item:item[1], reverse = True)\n\topen_file(d)\n\tsend_file('./result.csv')\n\nif __name__ == '__main__':\n\tprint(time.time())\n\tmain()\n\tprint(time.time())\n\n","sub_path":"test11.py","file_name":"test11.py","file_ext":"py","file_size_in_byte":3005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"323608284","text":"from pathlib import Path\n\n\ndef main(fin, fout, min_size, max_size):\n pin = Path(fin).resolve()\n pout = Path(fout).resolve()\n\n with pin.open('r', encoding='utf-8') as fhin:\n # Filter lines: remove duplicates. Use dicts: keys are unique but ordered (Python >= 3.6)\n lines = [l.rstrip() for l in fhin.readlines()]\n\n print('Original:', f\"{len(lines):,}\")\n lines = {l: False for l in lines}\n print('After removing duplicates:', f\"{len(lines):,}\")\n\n lines = list(filter(lambda l: min_size < len(l.split()) < max_size, lines.keys()))\n print('After setting size requirements:', f\"{len(lines):,}\")\n\n with pout.open('w', encoding='utf-8') as fhout:\n fhout.write('\\n'.join(lines) + '\\n')\n\n\nif __name__ == '__main__':\n import argparse\n\n parser = argparse.ArgumentParser(description='Restrict lines in dataset by setting a length limit'\n ' and removing duplicates.')\n parser.add_argument('fin', help='path to input text file.')\n parser.add_argument('fout', help='path to input text file.')\n\n parser.add_argument('min', type=int, help='minimum number of words in a sentence.')\n parser.add_argument('max', type=int, help='maximum number of words in a sentence.')\n\n args = parser.parse_args()\n\n main(args.fin, args.fout, args.min, args.max)\n","sub_path":"restrict-dataset.py","file_name":"restrict-dataset.py","file_ext":"py","file_size_in_byte":1345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"464449362","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\n\ndef Count(dirPath):\n if not os.path.isdir(dirPath):\n print('请输入正确的目录名!')\n return None\n f_list=os.listdir(dirPath)\n ZhuShi,KongHang,DaiMa,Zong=0,0,0,0\n for i in f_list:\n if os.path.splitext(i)[1]=='.py':\n read_absp=os.path.join(dirPath,i)\n with open(read_absp,'r',encoding='utf-8') as f:\n while True:\n s=f.readline()\n if not s:\n break\n elif s.strip(' ')[0]=='#':\n ZhuShi+=1\n elif s=='\\n':\n KongHang+=1\n else:\n DaiMa+=1\n Zong+=1\n save_absp=os.path.join(dirPath,'result.txt')\n result='你在此目录内共写了 %d 行python代码,其中 %d 行有效代码,%d 行注释,%d 行空行' % (Zong,DaiMa,ZhuShi,KongHang)\n with open(save_absp,'w') as rs:\n rs.write(result)\n\nif __name__=='__main__':\n Count('F:\\Images')\n\n","sub_path":"Answers/007.py","file_name":"007.py","file_ext":"py","file_size_in_byte":1084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"346042590","text":"from django.urls import path\nfrom .views import *\n\nurlpatterns = [\n path('/', get_dish_page),\n # path('add_category/', add_category, name='add_category'),\n # path('add_dish/', add_dish, name='add_dish'),\n path('/', get_dish_page),\n\n path('categories/', categories, name='categories'),\n path('categories/update//', CategoryUpdateView.as_view(), name='categories_update'),\n path('categories/delete//', CategoryDeleteView.as_view(), name='categories_delete'),\n path('categories/add', CategoryAddView.as_view(), name='categories_add'),\n\n path('dishes/', dishes, name='dishes'),\n path('dishes/update//', DishUpdateView.as_view(), name='dishes_update'),\n path('dishes/delete//', DishDeleteView.as_view(), name='dishes_delete'),\n path('dishes/add', DishAddView.as_view(), name='dishes_add'),\n\n path('anons/', anons, name='anons'),\n path('anons/update//', AnonsUpdateView.as_view(), name='anons_update'),\n path('anons/delete//', AnonsDeleteView.as_view(), name='anons_delete'),\n path('anons/add', AnonsAddView.as_view(), name='anons_add'),\n ]\n","sub_path":"menu/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"478766940","text":"# -*- coding:utf-8 -*-\n__author__ = 'madl'\n__date__ = '2019/1/24 0024 下午 12:09'\n\nfrom django.conf.urls import url\nfrom apps.car import views\n\nurlpatterns = [\n url('list1/', views.car_list, name='car_list'),\n url('del/', views.del_shop, name='del'),\n url('confirm/', views.confirm, name='confirm'),\n url('update/', views.update_order, name='update_order'),\n]\n","sub_path":"apps/car/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"21907474","text":"\"\"\"Test an X10 message received.\"\"\"\nimport unittest\n\nfrom pyinsteon import pub\nfrom pyinsteon.handlers.from_device.x10_received import X10Received\nfrom tests.utils import async_case\n\n\nclass TestX10Received(unittest.TestCase):\n \"\"\"Test case for X10 message received.\"\"\"\n\n def setUp(self):\n \"\"\"Set up the test.\"\"\"\n self._x10_receieved = None\n\n def x10_subscriber(self, topic=pub.AUTO_TOPIC):\n \"\"\"Handle X10 command.\"\"\"\n self._x10_receieved = topic.name\n\n @async_case\n async def test_x10_on_received(self):\n \"\"\"Test X10 message received.\"\"\"\n handler = X10Received()\n pub.subscribe(self.x10_subscriber, \"x10g09\")\n handler.subscribe(self.x10_subscriber)\n # Receive housecode G unitcode 9\n handler.handle_x10_received(0x57, 0x00)\n # Receive housecode G command On\n handler.handle_x10_received(0x52, 0x80)\n assert self._x10_receieved == \"x10g09.on\"\n\n @async_case\n async def test_x10_off_received(self):\n \"\"\"Test X10 message received.\"\"\"\n handler = X10Received()\n pub.subscribe(self.x10_subscriber, \"x10a03\")\n handler.subscribe(self.x10_subscriber)\n # Receive housecode A unitcode 3\n handler.handle_x10_received(0x62, 0x00)\n # Receive housecode A command Off\n handler.handle_x10_received(0x63, 0x80)\n assert self._x10_receieved == \"x10a03.off\"\n\n @async_case\n async def test_x10_all_lights_on(self):\n \"\"\"Test X10 message received.\"\"\"\n handler = X10Received()\n pub.subscribe(self.x10_subscriber, \"x10c\")\n handler.subscribe(self.x10_subscriber)\n # Receive housecode C All Lights On\n handler.handle_x10_received(0x21, 0x80)\n assert self._x10_receieved == \"x10c.all_lights_on\"\n","sub_path":"tests/test_handlers/test_x10_received.py","file_name":"test_x10_received.py","file_ext":"py","file_size_in_byte":1794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"212290681","text":"# program to find the mixed layer depth from the density threshold (kara\n# et al 2000)\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport gsw as g\nfrom netCDF4 import Dataset as nc\nimport sys\nimport os\nfrom datetime import datetime, timedelta\nfrom netCDF4 import num2date\nimport matplotlib\nimport time\n# ***********************taking name of profile from the user***********************#\nncf = sys.argv[1]\nnf = nc(ncf, 'r')\nfname = os.path.basename(ncf)\nfloat_number = fname.split('_')[3].split('.')[0]\n# Storing values from netcdf file to python\ntry:\n ohtemp = nf.variables['HYCM_T'][:]\n oatemp = nf.variables['ARGO_T'][:]\n ohsal = nf.variables['HYCM_S'][:]\n oasal = nf.variables['ARGO_S'][:]\n longi = nf.variables['LON'][:]\n lati = nf.variables['LAT'][:]\n opres = nf.variables['DEPTH1_19'][:]\n t = nf.variables['TX'][:]\n nf.close()\nexcept KeyError:\n nf.close()\n print (\"Some variable is missing in %s\" % fname)\n sys.exit()\n\nt_len = len(t)\nmldh = np.zeros(t_len)\nmlda = np.zeros(t_len)\n#**********Function for quality checking******************#\n\n\ndef qcheck(temp, pres, sal):\n \"cut the proflie below the missing values\"\n if np.any(temp[:] == 99999):\n pk = np.where(temp[:] == 99999)\n if pk[0][0] < 4:\n reject = 1\n else:\n temp = temp[0:pk[0][0]]\n sal = sal[0:pk[0][0]]\n pres = pres[0:pk[0][0]]\n if np.any(sal[:] == 99999):\n pk = np.where(sal[:] == 99999)\n if pk[0][0] < 4:\n reject = 1\n else:\n temp = temp[0:pk[0][0]]\n sal = sal[0:pk[0][0]]\n pres = pres[0:pk[0][0]]\n if np.any(pres[:] == 99999):\n pk = np.where(pres[:] == 99999)\n if pk[0][0] < 4:\n reject = 1\n else:\n temp = temp[0:pk[0][0]]\n sal = sal[0:pk[0][0]]\n pres = pres[0:pk[0][0]]\n#***********************reject the profile if it is less than 5***********#\n if len(pres[:]) < 5:\n reject = 1\n ind = np.where(pres[:] > 0)\n if pres[ind[0][0]] > 20:\n reject = 1\n if sum(pres[:]) == 0:\n reject = 1\n#**************************finding mld*************************************#\n\n\ndef findmld(temp, pres, sal):\n \"Calculating the index of the reference value\"\n m = len(sal)\n tmpmat = np.square(pres[:] - 10)\n starti = np.argmin(tmpmat)\n pres = pres[starti:m]\n sal = sal[starti:m]\n temp = temp[starti:m]\n starti = 0\n m = len(sal)\n mldindex = 0\n#***********finding potential density with a reference pressure of 0 dbar***********************************#\n # absal=g.SA_from_SP(sal[:],pres,lon[i],lat[i])\n contemp = g.CT_from_t(sal, temp, pres)\n pden = g.sigma0(sal, contemp)\n dt = g.sigma0(sal[0], contemp[0] - 0.8) - pden[0]\n dtp = g.sigma0(sal[0], contemp[0] + 0.8) - pden[0]\n#*************finding the index where thershold is exceeded**************************************************#\n for j in range(0, m):\n if pden[0] > pden[j]:\n dt = dtp\n if abs(pden[0] - pden[j]) > abs(dt):\n mldindex = j\n break\n if mldindex == 0:\n return 9999\n#********************interpolating the pressure value for exact match at density threshold******************#\n xp = [pres[mldindex - 1], pres[mldindex]]\n dum = pden[0] - pden[mldindex - 1]\n dummy = pden[0] - pden[mldindex]\n fp = [dum, dummy]\n pinter = np.arange(pres[mldindex - 1], pres[mldindex], 0.5)\n pdt = np.interp(pinter, xp, fp)\n ch = np.where(abs(pdt) < abs(dt))\n mldi = ch[-1][-1]\n mixed = pinter[mldi]\n return mixed\n#***************************HYCOM_MLD or ARGO_MLD******************************************#\n\n\ndef get_mld(otemp, osal):\n mld = np.zeros(t_len)\n for i in range(0, t_len):\n reject = 0\n temp = otemp[i, :]\n pres = opres[:]\n sal = osal[i, :]\n qcheck(temp, pres, sal)\n if reject == 0:\n mld[i] = findmld(temp, pres, sal)\n else:\n mld[i] = 9999\n return mld\n#****************************main part******************************************#\notemp = ohtemp\nosal = ohsal\nmldh = get_mld(otemp, osal)\notemp = oatemp\nosal = oasal\nmlda = get_mld(otemp, osal)\nprint (\"the HYCOM MLD is as given below \\n\", mldh)\nprint (\"the ARGO MLD is as given below \\n\", mlda)\n\n#********************comparison of argo and HYCOM derived mld*******************************#\nmld_a = np.ma.masked_equal(mlda, 9999) # masking the array\nmld_h = np.ma.masked_equal(mldh, 9999) # masking the array\n#**************************converting time number to date***********************************#\ndates = num2date(t[:], units=\"days since 1901-01-15 00:00:00\", calendar=\"GREGORIAN\")\ndate = []\nfor r in range(0, t_len):\n date.append(dates[r].strftime('%d-%b-%y'))\n#*************************PLotting bar graph*************************************************#\ntry:\n delta = ((dates[1] - dates[0]).days) / 2\nexcept IndexError:\n delta = 5\n print (\"setting the width to 5 due to index error\")\nfig = plt.figure(figsize=(45, 18))\np1 = plt.bar(t, mld_h, delta, color='r')\np2 = plt.bar(t + delta, mld_a, delta, color='y')\nplt.xticks(t, date, rotation='vertical')\nplt.ylabel('Mixed Layer Depth (m)')\nplt.title('Comparison of mld values')\nplt.legend((p1[0], p2[0]), ('HYCOM mld', 'ARGO'))\nplt.savefig('comparison_' + float_number)\nplt.figure(figsize=(38, 16))\np3 = plt.bar(t, mld_h - mld_a, 6, color='b')\nplt.xticks(t, date, rotation='vertical')\nplt.ylabel('difference in mld (m)')\nplt.title('Difference between HYCOM_mld and ARGO_mld(HYCOM-ARGO)')\nplt.savefig('difference_' + float_number)\n\n#***************************writing the values to a file************************#\nncfo = 'mld_' + float_number + '.nc'\nnf = nc(ncfo, 'w', format='NETCDF4_CLASSIC')\n#***************************defining dimensions*********************************#\nTIME = nf.createDimension('TIME', t_len)\n#***************************defining variables*********************************#\ntimes = nf.createVariable('time', np.float64, ('TIME',))\nlatitudes = nf.createVariable('lat', np.float32, ('TIME',))\nlongitudes = nf.createVariable('lon', np.float32, ('TIME',))\nmldhnc = nf.createVariable('MLD_H', np.float32, ('TIME',))\nmldanc = nf.createVariable('MLD_A', np.float32, ('TIME',))\n#***************************defining global attributes*********************************#\nnf.description = 'Calculating the Mixed Layer Depth from individual argo profile'\nnf.histroy = 'created' + time.ctime(time.time())\nnf.source = 'from python program using KARAs algorithm'\n#***************************defining variable attributes*********************************#\nlatitudes.units = 'degrees_north'\nlongitudes.units = 'degrees_east'\nmldhnc.units = 'meters'\nmldanc.units = 'meters'\ntimes.units = 'DAYS since 1901-01-15 00:00:00'\ntimes.calendar = 'GREGORIAN'\n#***************************writing data*********************************#\nlongitudes[:] = longi[0:t_len]\nlatitudes[:] = lati[0:t_len]\ntimes[:] = t\nmldhnc[:] = mld_h\nmldanc[:] = mld_a\nnf.close()\n","sub_path":"scripts/mld_compare.py","file_name":"mld_compare.py","file_ext":"py","file_size_in_byte":7072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"150589536","text":"from selenium import webdriver\r\nfrom selenium.webdriver.common.keys import Keys\r\nfrom django.test import LiveServerTestCase\r\nfrom django.contrib.staticfiles.testing import StaticLiveServerTestCase\r\nfrom django.utils.unittest import skip\r\n\r\nclass Test(StaticLiveServerTestCase):\r\n def setUp(self):\r\n self.browser = webdriver.Firefox()\r\n self.browser.implicitly_wait(3)\r\n\r\n @skip('testing layouts')\r\n def test_get_correct_page(self):\r\n self.browser.get(self.live_server_url)\r\n header_text = self.browser.find_element_by_tag_name('h1').text\r\n input_box = self.browser.find_element_by_id('id_new_item')\r\n self.assertIn('To-do lists', self.browser.title)\r\n self.assertIn('You To-do list', header_text)\r\n self.assertEqual('Enter a To-do item', input_box.get_attribute('placeholder'))\r\n \r\n # Anna go in our App\r\n self.input_new_item('Buy peacock feathers')\r\n self.check_row_in_list_table('1: Buy peacock feathers')\r\n anna_list_url = self.browser.current_url\r\n self.assertRegex(anna_list_url, '/lists/.+')\r\n \r\n self.input_new_item('Use peacock feathers to make a fly')\r\n self.check_row_in_list_table('2: Use peacock feathers to make a fly')\r\n \r\n #self.browser.quit()\r\n \r\n self.browser = webdriver.Firefox()\r\n self.browser.get(self.live_server_url)\r\n # Franc go in our App \r\n page_text = self.browser.find_element_by_tag_name('body').text\r\n self.assertNotIn('Buy peacock feathers', page_text)\r\n self.assertNotIn('Use peacock feathers to make a fly', page_text)\r\n \r\n self.input_new_item('Buy milk')\r\n franc_list_url = self.browser.current_url\r\n self.assertRegex(franc_list_url, '/lists/.+')\r\n self.assertNotEqual(franc_list_url, anna_list_url)\r\n page_text = self.browser.find_element_by_tag_name('body').text\r\n self.assertNotIn('Buy peacock feathers', page_text)\r\n self.assertIn('Buy milk', page_text)\r\n\r\n \r\n def input_new_item(self, text):\r\n input_box = self.browser.find_element_by_id('id_new_item')\r\n input_box.send_keys(text)\r\n input_box.send_keys(Keys.ENTER)\r\n\r\n\r\n def check_row_in_list_table(self, row_text):\r\n table = self.browser.find_element_by_id('id_list_table')\r\n rows = table.find_elements_by_tag_name('tr')\r\n self.assertIn(row_text, [row.text for row in rows])\r\n\r\n def tearDown(self):\r\n #self.browser.quit()\r\n pass\r\n\r\n #layuots\r\n def test_layout_and_styling(self):\r\n self.browser.get(self.live_server_url)\r\n self.browser.set_window_size(1024, 768)\r\n \r\n inputbox = self.browser.find_element_by_id(\"id_new_item\")\r\n self.assertAlmostEqual(inputbox.location['x']+inputbox.size['width']/2, 512, delta=10)\r\n \r\n inputbox.send_keys('testing\\n')\r\n\r\n inputbox = self.browser.find_element_by_id(\"id_new_item\")\r\n self.assertAlmostEqual(inputbox.location['x']+inputbox.size['width']/2, 512, delta=10)\r\n\r\nif __name__ == '__main__': StaticLiveServerTestCase.main()","sub_path":"functional/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":3137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"575843827","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals, absolute_import\nimport logging\nimport os\nimport shlex\nimport sys\nimport io\nimport traceback\nfrom colorama import Fore, Back, Style\n\nimport jinja2\nimport shutil\n\nfrom six import string_types, text_type, binary_type\n\nfrom tecodo.commands import Result, BaseCommand\nfrom tecodo.translation import ugettext as _\nfrom tecodo.configuration import BaseConfiguration\nfrom tecodo.constants import RUN_COMPONENTS_SEPARATOR, LOGGER_NAME\nfrom tecodo.facters import BaseFacter\n\nlogger = logging.getLogger(LOGGER_NAME)\n\n__author__ = 'flanker'\n\n\nclass Runner(BaseConfiguration):\n def __init__(self, run_name, configuration_files=None, extra_variables=None, force_color=False):\n super(Runner, self).__init__(configuration_files=configuration_files, extra_variables=extra_variables)\n self.run_name = run_name\n self.run_components = self.run_name.split(RUN_COMPONENTS_SEPARATOR)\n self.jinja_variables = {}\n self.jinja_env = jinja2.Environment()\n self.shell_env_variables = {}\n self.loaded_facters = []\n self.command_results = []\n self.force_color = force_color\n # set up main directories\n self.base_dir = self.extra_jinja_variables.setdefault('base_dir', '.tecodo')\n self.running_dir = os.path.join(self.base_dir, self.run_name, 'run')\n self.results_dir = os.path.join(self.base_dir, self.run_name, 'result')\n self.cache_dir = os.path.join(self.base_dir, self.run_name, 'cache')\n self.storage_dir = os.path.join(self.base_dir, self.run_name, 'storage')\n\n def _load_jinja_filters(self):\n filters_dir = self.default_global_values['filters_dir']\n result = {}\n if not os.path.isdir(filters_dir):\n return\n if filters_dir not in sys.path:\n sys.path.append(filters_dir)\n # TODO charger les filtres dans les différents fichiers .py présents\n self.jinja_env.filters.update(result)\n\n def _render_templated_value(self, value):\n \"\"\" Consider any string as a Jinja template, and render it using `jinja_variables` dictionnary.\n If `value` is a composite type (`dict`, `list`, …), apply itsef to subvalues of `value`\n\n :param value: the original value\n :type value:\n :return:\n :rtype:\n \"\"\"\n if isinstance(value, dict):\n result = {}\n for k, v in value.items():\n result[self._render_templated_value(k)] = self._render_templated_value(v)\n return result\n elif isinstance(value, list):\n return [self._render_templated_value(x) for x in value]\n elif isinstance(value, tuple):\n return (self._render_templated_value(x) for x in value)\n elif not isinstance(value, string_types):\n return value\n tpl = self.jinja_env.from_string(value)\n result = tpl.render(**self.jinja_variables)\n return result\n\n def _load_variables(self):\n \"\"\" * gather all variables defined in the different run components\n * render them as Jinja templates\n * load all defined facters\n * gather all shell environment variables defined in the different run components\n * render them as Jinja templates\n\n :return:\n :rtype:\n \"\"\"\n config_jinja_variables = self.get_jinja_variables_from_config(self.run_components)\n keys = [x for x in config_jinja_variables]\n keys.sort()\n for k in keys:\n self.jinja_variables[k] = self._render_templated_value(config_jinja_variables[k])\n for original_facter_data in self.get_facter_names_from_config(self.run_components):\n facter = self._prepare_action(original_facter_data, self.available_facter_classes)\n assert isinstance(facter, BaseFacter)\n self.loaded_facters.append(facter)\n facter.set_up()\n for k, v in self.get_shell_env_variables_from_config(self.run_components).items():\n self.shell_env_variables[k] = self._render_templated_value(v)\n\n def _prepare_action(self, original_action_data, available_action_classes):\n action_data = self._render_templated_value(original_action_data)\n action_name = action_data['action']\n if action_name not in available_action_classes:\n raise ValueError(_('Facter %(facter)r not found.') % {'facter': original_action_data, })\n action_params = action_data.get('__params__')\n if isinstance(action_params, string_types):\n for comp in shlex.split(str(action_params)):\n x, y, z = comp.partition('=')\n if y != '=':\n raise ValueError(_('Invalid param for %(facter)r') % {'facter': original_action_data, })\n action_data[x] = z\n del action_data['__params__']\n del action_data['action']\n action_cls = available_action_classes[action_name]\n return action_cls(self, **action_data)\n\n def _unload_variables(self):\n for facter in self.loaded_facters:\n assert isinstance(facter, BaseFacter)\n facter.tear_down()\n\n def _run_commands(self):\n \"\"\"\n run all commands, stop after the first failure\n \"\"\"\n for original_command_data in self.get_command_names_from_config(self.run_components):\n must_break = False\n command = None\n try:\n command = self._prepare_action(original_command_data, self.available_commands_classes)\n assert isinstance(command, BaseCommand)\n result = command.handle()\n except Exception as exc:\n must_break = True\n result = Result(valid=False, text_content=text_type(exc))\n self.exception(exc)\n if isinstance(result, Result):\n results = [result]\n elif isinstance(result, list) or isinstance(result, tuple):\n results = result\n else:\n results = []\n for result in results:\n self.command_results.append(result)\n if command:\n self.info(text_type(command))\n if result.valid:\n self.success(result.text_content)\n elif result.valid is None and command:\n self.info(result.text_content)\n elif not result.valid and command:\n self.error(result.text_content)\n must_break = True\n else:\n self.error(_('Unable to find action %(action)s') % original_command_data)\n if must_break:\n break\n\n def load(self):\n super(Runner, self).load()\n self._load_jinja_filters()\n self._load_variables()\n\n def unload(self):\n self._unload_variables()\n\n def launch(self):\n loaded = False\n try:\n self.load()\n loaded = True\n except Exception as exc:\n self.command_results.append(Result(valid=False, text_content=text_type(exc)))\n self.exception(exc)\n if loaded:\n self._run_commands()\n try:\n self.unload()\n except Exception as exc:\n self.command_results.append(Result(valid=False, text_content=text_type(exc)))\n self.exception(exc)\n return self.command_results\n\n def clean_cache(self):\n if os.path.isdir(self.cache_dir):\n shutil.rmtree(self.cache_dir)\n\n ################################################################################\n # <-- utility functions, to display various messages\n ################################################################################\n def text(self, text, endl='\\n', fg_color=None, bg_color=None, style=None):\n self.stdout(text + endl, fg_color=fg_color, bg_color=bg_color, style=style)\n\n def info(self, text, endl='\\n', fg_color=Fore.CYAN, bg_color=None, style=None):\n self.stdout(text + endl, fg_color=fg_color, bg_color=bg_color, style=style)\n\n def success(self, text, endl='\\n', fg_color=Fore.GREEN, bg_color=None, style=Style.BRIGHT):\n self.stdout(text + endl, fg_color=fg_color, bg_color=bg_color, style=style)\n\n def warning(self, text, endl='\\n', fg_color=Fore.YELLOW, bg_color=None, style=Style.BRIGHT):\n self.stdout(text + endl, fg_color=fg_color, bg_color=bg_color, style=style)\n\n def error(self, text, endl='\\n', fg_color=Fore.RED, bg_color=None, style=Style.BRIGHT):\n self.stdout(text + endl, fg_color=fg_color, bg_color=bg_color, style=style)\n\n def exception(self, e):\n (__, value, tb) = sys.exc_info()\n if sys.version_info[0] == 2:\n out = io.BytesIO()\n else:\n out = io.StringIO()\n traceback.print_tb(tb, file=out)\n self.stderr(out.getvalue(), fg_color=Fore.RED)\n self.stderr('%s: %s\\n' % (e.__class__.__name__, e) ,fg_color=Fore.RED, style=Style.BRIGHT)\n\n def _write_colored_data(self, data, fd=None, fg_color=None, bg_color=None, style=None):\n if fd is None:\n fd = self.jinja_variables.get('stdout', sys.stdout)\n resets = [] # reset values after style\n if fd.isatty() or self.force_color:\n if fg_color is not None:\n self._write_raw_data(fd, fg_color)\n resets.append(Fore.RESET)\n if bg_color is not None:\n self._write_raw_data(fd, bg_color)\n resets.append(Back.RESET)\n if style is not None:\n self._write_raw_data(fd, style)\n resets.append(Style.RESET_ALL)\n self._write_raw_data(fd, data)\n for reset in resets:\n self._write_raw_data(fd, reset)\n\n @staticmethod\n def _write_raw_data(fd, data):\n if not isinstance(fd, io.TextIOWrapper) and isinstance(data, text_type):\n # noinspection PyTypeChecker\n data = data.encode('utf-8')\n elif isinstance(fd, io.TextIOWrapper) and isinstance(data, binary_type):\n # noinspection PyTypeChecker\n data = data.decode('utf-8')\n fd.write(data)\n\n def stdout(self, data, fg_color=None, bg_color=None, style=None):\n return self._write_colored_data(data, fd=self.jinja_variables.get('stdout', sys.stdout), fg_color=fg_color, bg_color=bg_color, style=style)\n\n def stderr(self, data, fg_color=None, bg_color=None, style=None):\n return self._write_colored_data(data, fd=self.jinja_variables.get('stderr', sys.stderr), fg_color=fg_color, bg_color=bg_color, style=style)\n\n ################################################################################\n # end of utility functions, to display various messages -->\n ################################################################################\n\n\nif __name__ == '__main__':\n import doctest\n\n doctest.testmod()\n","sub_path":"tecodo/runner.py","file_name":"runner.py","file_ext":"py","file_size_in_byte":10957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"280708191","text":"# -*- encoding:UTF-8 -*-\nimport wx\nimport logging\nimport sys\nfrom socket import timeout as SocketTimeout\nfrom socket import error as SocketError\nfrom libs.Config import Color\nfrom libs.Utility import Socket\nfrom libs import Utility\nlogger = logging.getLogger(__name__)\nreload(sys)\nsys.setdefaultencoding('utf-8')\n\n\nclass Frame(wx.Frame):\n def __init__(self):\n wx.Frame.__init__(self, None, id=wx.ID_ANY, title=u\"写序列号\", size=(600, 150),\n style=wx.SYSTEM_MENU | wx.CAPTION | wx.CLOSE_BOX | wx.MINIMIZE_BOX)\n self.panel = Panel(self)\n self.SetBackgroundColour(Color.Azure2)\n self.Center()\n\n\nclass Panel(wx.Panel):\n def __init__(self, parent):\n wx.Panel.__init__(self, parent, id=wx.ID_ANY, pos=wx.DefaultPosition, style=wx.TAB_TRAVERSAL)\n self.parent = parent\n self.socket = None\n main_sizer = wx.BoxSizer(wx.VERTICAL)\n device_sizer = self.__init_device_sizer()\n self.btn_write_sn = wx.Button(self, wx.ID_ANY, u\"写 入\", wx.DefaultPosition, (-1, 50), style=0,\n name='set_sn')\n self.btn_write_sn.Bind(wx.EVT_BUTTON, self.on_button_click)\n self.btn_write_sn.SetFont(wx.Font(23, wx.DEFAULT, wx.NORMAL, wx.NORMAL))\n main_sizer.Add(device_sizer, 0, wx.EXPAND | wx.ALL, 5)\n main_sizer.Add(self.btn_write_sn, 0, wx.EXPAND | wx.ALL, 5)\n self.SetSizer(main_sizer)\n self.Layout()\n self.Enable(False)\n\n def __init_device_sizer(self):\n sizer = wx.BoxSizer(wx.HORIZONTAL)\n sizer.Add(self.__init_serial_number_sizer(), 1, wx.EXPAND | wx.ALL, 1)\n sizer.Add(self.__init_button_sizer(), 0, wx.EXPAND | wx.ALL, 1)\n return sizer\n\n def __init_button_sizer(self):\n size = (40, -1)\n sizer = wx.BoxSizer(wx.HORIZONTAL)\n pic_connect = wx.Image('resource/icon/Connect.ico', wx.BITMAP_TYPE_ICO).ConvertToBitmap()\n pic_disconnect = wx.Image('resource/icon/Disconnect.ico', wx.BITMAP_TYPE_ICO).ConvertToBitmap()\n self.btn_connect = wx.BitmapButton(self, wx.ID_ANY, pic_connect, wx.DefaultPosition, size, style=0,\n name='connect')\n self.btn_disconnect = wx.BitmapButton(self, wx.ID_ANY, pic_disconnect, wx.DefaultPosition, size, style=0,\n name='disconnect')\n\n self.btn_connect.Bind(wx.EVT_BUTTON, self.on_button_click)\n self.btn_disconnect.Bind(wx.EVT_BUTTON, self.on_button_click)\n sizer.Add(self.btn_connect, 0, wx.EXPAND | wx.ALL, 1)\n sizer.Add(self.btn_disconnect, 0, wx.EXPAND | wx.ALL, 1)\n return sizer\n\n def __init_serial_number_sizer(self):\n sizer = wx.BoxSizer(wx.HORIZONTAL)\n self.title = wx.StaticText(self, wx.ID_ANY, u\"序列号: \", wx.DefaultPosition, wx.DefaultSize, 0)\n self.serial_number = wx.TextCtrl(self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize,\n wx.TE_CENTER | wx.TE_PROCESS_ENTER)\n self.serial_number.Bind(wx.EVT_SET_FOCUS, self.click_on_text_ctrl)\n f = wx.Font(23, wx.DEFAULT, wx.NORMAL, wx.NORMAL)\n self.title.SetFont(f)\n self.serial_number.SetFont(f)\n sizer.Add(self.title, 0, wx.ALIGN_CENTER_VERTICAL | wx.EXPAND | wx.TOP | wx.LEFT, 5)\n sizer.Add(self.serial_number, 1, wx.ALIGN_CENTER_VERTICAL | wx.EXPAND | wx.ALL, 1)\n return sizer\n\n def on_button_click(self, event):\n obj = event.GetEventObject()\n name = obj.Name\n # if name == \"refresh\":\n # self.port_choice.SetItems(UART.list_ports())\n if name == \"connect\":\n self.connect()\n elif name == \"disconnect\":\n self.disconnect()\n elif name == \"set_sn\":\n self.set_serial_number()\n\n def connect(self):\n if self.socket is not None:\n self.socket.close()\n try:\n self.socket = Socket.Client(address=\"192.168.1.1\")\n self.socket.get_serial_number()\n self.refresh_serial_number()\n self.Enable(enable=True)\n except SocketError:\n Utility.Alert.Error(u\"连接失败:超时。\")\n return False\n except SocketTimeout:\n Utility.Alert.Error(u\"连接失败:超时。\")\n return False\n except IndexError:\n Utility.Alert.Error(u\"连接失败:目标拒绝。\")\n return False\n except KeyError:\n Utility.Alert.Error(u\"连接失败:目标拒绝。\")\n self.socket.close()\n self.socket = None\n return False\n\n def disconnect(self):\n if self.socket is not None:\n self.socket.close()\n self.socket = None\n self.Layout()\n self.Enable(False)\n self.serial_number.SetValue(\"\")\n\n def set_serial_number(self):\n serial = self.serial_number.GetValue()\n if not serial:\n Utility.Alert.Error(u\"请输入序列号\")\n return\n elif len(serial) > 18:\n Utility.Alert.Error(u\"输入的序列号太长,\\n当前:%s,最大:18\" % len(serial))\n return\n result = self.socket.get_serial_number()\n if result is None:\n Utility.Alert.Error(\"通讯异常,请重新连接。\")\n self.disconnect()\n return False\n if result != \"123456789012345678\":\n dlg = wx.MessageDialog(\n None,\n u\"设备中已存在序列号:\\\"%s\\\",\\n是否要用新的序列号:\\\"%s\\\" 替换。\" % (result, serial),\n u\"消息\",\n wx.YES_NO | wx.ICON_QUESTION\n )\n if dlg.ShowModal() == wx.ID_YES:\n self.update_serial_number(serial=serial)\n dlg.Destroy()\n return\n self.update_serial_number(serial=serial)\n\n def refresh_serial_number(self):\n value = self.socket.get_serial_number()\n if value is None:\n raise KeyError\n elif value == \"123456789012345678\":\n self.serial_number.SetValue(value=\"\")\n else:\n self.serial_number.SetValue(value=value)\n\n def update_serial_number(self, serial):\n for x in range(3):\n self.socket.set_serial_number(serial)\n result = self.socket.get_serial_number()\n if serial == result:\n dialog = Utility.Alert.CountdownDialog(u\"写入成功,1秒后自动关闭。\")\n dialog.Countdown(countdown=1)\n self.disconnect()\n return True\n Utility.Alert.Error(u\"写入失败,请重试。\")\n self.refresh_serial_number()\n return False\n\n def Enable(self, enable=True):\n lst1 = [self.btn_disconnect, self.serial_number, self.btn_write_sn]\n lst2 = [self.btn_connect]\n for ctrl in lst1:\n ctrl.Enable(enable)\n for ctrl in lst2:\n ctrl.Enable(not enable)\n if enable:\n if self.serial_number.GetValue() == \"\":\n self.serial_number.SetFocus()\n else:\n self.title.SetFocus()\n\n def click_on_text_ctrl(self, event):\n self.serial_number.SetValue(\"\")\n event.Skip()\n","sub_path":"libs/UserInterface/A02_Write_Serial.py","file_name":"A02_Write_Serial.py","file_ext":"py","file_size_in_byte":7285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"508963498","text":"from rest_framework import routers\n\nfrom . import views\n\nrouter = routers.DefaultRouter()\nrouter.register(r'collect', views.CollectViewSet, base_name='collect')\nrouter.register(r'cancel_collect', views.CollectViewSet, base_name='cancel_collect')\nrouter.register(r'forward', views.ForwardViewSet, base_name='forward')\nrouter.register(r'forwarded', views.ForwardedViewSet, base_name='forwarded')\nrouter.register(r'count', views.CountViewSet, base_name='count')\nrouter.register(r'export', views.ExportTaskViewSet, base_name='export')\nrouter.register(r'is_read', views.UserIsReadViewSet, base_name='is_read')\nrouter.register(r'org_recent_event', views.UserOrgRecentEventViewSet, base_name='org_recent_event')\n\nrouter2 = routers.DefaultRouter()\nrouter2.register(r'collect', views.CollectViewSet, base_name='collect')\nrouter2.register(r'org_recent_event', views.UserOrgRecentEventViewSet, base_name='org_recent_event')\nrouter2.register(r'manage_new_message', views.ManageNewMessageViewSet, base_name='manage_new_message')\nrouter2.register(r'org_recent_cooperation', views.UserOrgRecentCooperationViewSet, base_name='org_recent_cooperate')\nrouter2.register(r'org_recent_gc_bulletin', views.UserOrgRecentGCBulletinViewSet, base_name='org_recent_gc_bulletin')\n","sub_path":"scripts/marketbox-medical-svr/django_server/apps/operate/routers.py","file_name":"routers.py","file_ext":"py","file_size_in_byte":1251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"498539889","text":"import colorama \r\nimport random \r\nfrom colorama import Fore,Back,Style\r\ncolorama.init()\r\nfrom time import sleep\r\nimport os\r\nif os.name == 'nt':\r\n _ = os.system('cls')\r\nelif os.name == 'mac':\r\n _ = os.system('clear')\r\nelif os.name =='posix':\r\n _ = os.system('clear')\r\nelse:\r\n _ = os.name('clear')\r\ntürkiyenin_illeri =[\"Adana\",\"Adıyaman\",\"Afyon\",\"Ağrı\",\"Amasya\",\"Ankara\",\"Antalya\",\"Artvin\",\"Aydın\",\"Balıkesir\",\"Bilecik\",\"Bingöl\",\"Bitlis\",\"Bolu\",\"Burdur\",\"Bursa\",\"Çanakkale\",\"Çankırı\",\"Çorum\",\"Denizli\",\"Diyarbakır\",\"Edirne\",\"Elazığ\",\"Erzincan\",\"Erzurum\",\"Eskişehir\",\"Gaziantep\",\"Giresun\",\"Gümüşhane\",\"Hakkari\",\"Hatay\",\"Isparta\",\"Mersin\",\"İstanbul\",\"İzmir\",\"Kars\",\"Kastamonu\",\"Kayseri\",\"Kırklareli\",\"Kırşehir\",\"Kocaeli\",\"Konya\",\"Kütahya\",\"Malatya\",\"Manisa\",\"Kahramanmaraş\",\"Mardin\",\"Muğla\",\"Muş\",\"Nevşehir\",\"Niğde\",\"Ordu\",\"Rize\",\"Sakarya\",\"Samsun\",\"Siirt\",\"Sinop\",\"Sivas\",\"Tekirdağ\",\"Tokat\",\"Trabzon\",\"Tunceli\",\"Şanlıurfa\",\"Uşak\",\"Van\",\"Zonguldak\",\"Aksaray\",\"Bayburt\",\"Karaman\",\"Kırıkkale\",\"Batman\",\"Şırnak\",\"Bartın\",\"Ardahan\",\"Iğdır\",\"Yalova\",\"Karabük\",\"Kilis\",\"Osmaniye\",\"Düzce\"]\r\nprint(Fore.RED+\"---------Bil Bakalım Türkiyenin İlleri--------\")\r\nprint(Fore.MAGENTA)\r\nprint(\"\"\"\r\n|****************************|\r\n| Ad:Terbiyesiz |\r\n| Soyad:Robot |\r\n| Amaç:Oyun Oynamak |\r\n| NOT:81 İl Var |\r\n| NOT:İlk Harf Büyük Yaz |\r\n| |\r\n| |\r\n| |\r\n| |\r\n|****************************|\r\n\"\"\")\r\nrastgele = random.choice(türkiyenin_illeri)\r\na = 1\r\ndeneme = 0\r\nwhile a ==1:\r\n tahmin=input(Fore.CYAN + \"Bil Bakalım Hangi Şehirdeyim ??\\n-Bulunduğum Şehir;\")\r\n deneme += 1\r\n if (tahmin == rastgele):\r\n print(Fore.CYAN + str(deneme) + \" Kez Denemişsin Karşim Ama Sonunda Kazandın :))\")\r\n print(Fore.GREEN + \"Uygulamadan Çıkılıyor....\")\r\n print(\"-5..\")\r\n sleep(1)\r\n print(\"-4..\")\r\n sleep(1)\r\n print(\"-3..\")\r\n sleep(1)\r\n print(\"-2..\")\r\n sleep(1)\r\n print(\"-1..\")\r\n sleep(1)\r\n print(\"-0..\")\r\n print(\"-Terbiyesiz Yazılımcı İyi Günler Diler...\")\r\n sleep(2)\r\n if os.name == 'nt':\r\n _ = os.system('cls')\r\n elif os.name == 'mac':\r\n _ = os.system('clear')\r\n elif os.name =='posix':\r\n _ = os.system('clear')\r\n else:\r\n _ = os.name('clear')\r\n exit()\r\n else:\r\n if deneme ==1:\r\n print(Fore.YELLOW + \"Üzülme Kanka Hiç Kolay Değil..\") \r\n print(\"İpucu;\" +rastgele[:1])\r\n if deneme == 2:\r\n print(Fore.YELLOW + \"Üzülme Kanka Hiç Kolay Değil..\") \r\n print(\"İpucu;\" +rastgele[:2])\r\n elif deneme == 5:\r\n print(Fore.YELLOW + \"Üzülme Kanka Hiç Kolay Değil..\") \r\n print(\"İpucu;\" +rastgele[:3])\r\n elif deneme ==10:\r\n print(Fore.YELLOW + \"Üzülme Kanka Hiç Kolay Değil..\") \r\n print(\"İpucu;\" +rastgele[:4])\r\n elif deneme ==17:\r\n print(Fore.YELLOW + \"Üzülme Kanka Hiç Kolay Değil..\") \r\n print(\"İpucu;\" +rastgele[:5])","sub_path":"FindCity.py","file_name":"FindCity.py","file_ext":"py","file_size_in_byte":3277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"189650957","text":"from .task_generator_scannet import TaskGeneratorScannet\nfrom .task_generator_coco2014 import TaskGeneratorCoco2014\nfrom .task_generator_cocostuff import TaskGeneratorCocoStuff\n\ntask_generators = {\n 'coco2014': TaskGeneratorCoco2014,\n 'scannet': TaskGeneratorScannet,\n 'cocostuff': TaskGeneratorCocoStuff,\n}\n\n__all__ = ['get_task_generator']\n\ndef get_task_generator(name, **kwargs):\n return task_generators[name.lower()](**kwargs)","sub_path":"src/task/task_generator_collector.py","file_name":"task_generator_collector.py","file_ext":"py","file_size_in_byte":442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"644395453","text":"from collections import namedtuple\nfrom algorithms.dynamic_programming.fibonacci import *\n\ndef test_fibonacci_recusion():\n FibTest = namedtuple('RercursiveFibTest', ['input', 'wanted'])\n tests = [\n FibTest(5, 5),\n FibTest(10, 55),\n FibTest(20, 6765),\n FibTest(25, 75025)\n ]\n\n for test in tests:\n assert test.wanted == fibonacci(test.input)\n\ndef test_fibonacci_dp():\n FibTest = namedtuple('DynamicProgramminFibTest', \n ['input', 'wanted'])\n tests = [\n FibTest(5, 5),\n FibTest(10, 55),\n FibTest(20, 6765),\n FibTest(25, 75025)\n ]\n\n for test in tests:\n assert test.wanted == dp_fibonacci(test.input)","sub_path":"algorithms/dynamic_programming/tests/fibonacci_test.py","file_name":"fibonacci_test.py","file_ext":"py","file_size_in_byte":713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"203563563","text":"import json\nimport boto3\nfrom urllib import parse\n\ns3 = boto3.client('s3')\n\ndef get_target_key(source_key):\n\n ### defining file key (prefix + filename)\n city, year, month, day, query = ['']*5\n for s in source_key.split('/'):\n city = s if 'city' in s else city\n year = s if 'year' in s else year\n month = s if 'month' in s else month\n day = s if 'day' in s else day\n query = s.split('=')[-1] if 'query' in s else query\n\n city = city.replace(\"ã\", \"a\").replace(\" \", \"\")\n\n prefix = '/'.join(map(lambda x: x.split('=')[-1].zfill(2), [year, month, day, city])) + '/'\n\n if source_key[-3:] == 'csv':\n name = query + '.csv'\n elif source_key[-3:] == 'pdf':\n name = source_key.split('/')[-1]\n\n return prefix + name\n\n\ndef lambda_handler(event, context):\n\n source_bucket = event['Records'][0]['s3']['bucket']['name']\n source_key = parse.unquote_plus(event['Records'][0]['s3']['object']['key'])\n copy_source = {'Bucket': source_bucket, 'Key': source_key}\n\n waiter = s3.get_waiter('object_exists')\n waiter.wait(Bucket=source_bucket, Key=source_key)\n\n target_bucket = 'hugo-data'\n target_key = get_target_key(source_key)\n\n ### coping files\n s3.copy_object(Bucket=target_bucket, Key=target_key,\n CopySource=copy_source, ACL='public-read')\n","sub_path":"dashboard/aws/lambdas/dashboard_copy_file_to_public_bucket.py","file_name":"dashboard_copy_file_to_public_bucket.py","file_ext":"py","file_size_in_byte":1342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"492763023","text":"'''\nA CNN example using TensorFlow library.\nThis example is using the MNIST database of handwritten digits\n(http://yann.lecun.com/exdb/mnist/)\n\nCode references:\nhttps://github.com/shouvikmani/Tensorflow-Deep-Learning-Tutorial/blob/master/tutorial.ipynb\nhttps://github.com/aymericdamien/TensorFlow-Examples/\n\nThe source code modified modified by S.W. Oh.\n'''\n\nfrom __future__ import print_function\n\nimport tensorflow as tf\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\n# import Dense (fully-connected) layer and Convolution layer\nfrom util.layer import Dense, Conv2D\n\n# Import MNIST data\nfrom tensorflow.examples.tutorials.mnist import input_data\nmnist = input_data.read_data_sets(\"./data/\", one_hot=True)\n\n# Parameters\nlearning_rate = 0.01\ntraining_epochs = 5\nbatch_size = 10\ndisplay_step = 1\n\n###### Build graph ######################################################\n\n# Place holders\nx = tf.placeholder(tf.float32, [None,28,28,1]) # mnist data image of shape [28,28,1]\ny = tf.placeholder(tf.float32, [None,10]) # 0-9 digits recognition => 10 classes\n\n# Construct CNN \nh = Conv2D(x, [3,3,1,4], [1,1,1,1], 'SAME', 'conv1') # shape: [Batch,28,28,4]\nh = tf.nn.relu(h)\nh = tf.nn.max_pool(h, [1,2,2,1], [1,2,2,1], 'SAME') # shape: [Batch,14,14,4]\n\nh = Conv2D(h, [3,3,4,8], [1,1,1,1], 'SAME', 'conv2') # shape: [Batch,14,14,8]\nh = tf.nn.relu(h)\nh = tf.nn.max_pool(h, [1,2,2,1], [1,2,2,1], 'SAME') # shape: [Batch,7,7,8]\n\nh = tf.reshape(h, [-1,7*7*8]) # flatten [Batch,7,7,8] -> [Batch,7*7*8]\nlogit = Dense(h, [7*7*8,10], 'fc1')\n\npred = tf.nn.softmax(logit) # Softmax\n\n# Directly compute loss from logit (to ensure stability and avoid overflow)\ncost = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logit, labels=y))\n\n# Define optimizer and train_op\ntrain_op = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)\n\n#########################################################################\n\n\n\n###### Start Training ###################################################\n# Open a Session\nwith tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n\n # Training cycle\n for epoch in range(training_epochs):\n avg_cost = 0.\n total_batch = int(mnist.train.num_examples/batch_size)\n # Loop over all batches\n for i in range(total_batch):\n batch_xs, batch_ys = mnist.train.next_batch(batch_size)\n batch_xs = np.reshape(batch_xs, [batch_size,28,28,1])\n # Run optimization op (backprop) and cost op (to get loss value)\n _, c = sess.run([train_op, cost], feed_dict={x: batch_xs, y: batch_ys})\n # Compute average loss\n avg_cost += c / total_batch\n # Display logs per epoch step\n if (epoch+1) % display_step == 0:\n print(\"Epoch:\", '%04d' % (epoch+1), \"cost=\", \"{:.9f}\".format(avg_cost))\n\n print(\"Optimization Finished!\")\n\n # Test model\n correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))\n # Calculate accuracy\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n print(\"Accuracy:\", accuracy.eval({x: np.reshape(mnist.test.images, [-1,28,28,1]), y: mnist.test.labels}))","sub_path":"03_CNN_MNIST.py","file_name":"03_CNN_MNIST.py","file_ext":"py","file_size_in_byte":3176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"227529051","text":"from pyspark.sql import Row, SparkSession\nfrom pyspark.sql.functions import col, lower, lit, upper, initcap, length, from_unixtime, substring, length, expr, \\\n current_date\n##redditor = df.drop('_c0')\nimport getpass\nimport logging\n\nusername = getpass.getuser()\n\n\ndef main(spark):\n logging.basicConfig(level=logging.CRITICAL,\n format='%(asctime)s:%(levelname)s:%(message)s')\n\n \"\"\"Main ETL definition\n :return :None\n \"\"\"\n\n data = extract_task(spark)\n data_transformed = transformation_task(data)\n load_data(data_transformed)\n\n # log the success and terminate spark application\n # log.warn('Job is Finished')\n spark.stop()\n return None\n\n\ndef extract_task(spark):\n \"\"\"Load data from parquet file format.\n #param spark: Spark session object.\n #return: Spark Dataframe.\n \"\"\"\n cat_df = spark.read.csv('/user/xxx/userCryptoStorage/valid_category*', header=True)\n\n return cat_df\n\n\ndef transformation_task(df):\n \"\"\"Tranform original dataset.\n\n :param_df: Input Dataframe.\n :return: Transformed DataFrame.\n \"\"\"\n ##Prepare dataframes\n spark.sql('use xxx_usercrypto_db')\n ##create staging table\n # spark.sql('drop table if exists stg_users')\n # df.write.saveAsTable('stg_users')\n cat_df = df.drop('_c0')\n mainDF = spark.sql('select * from ods_category')\n delta = cat_df.withColumn('updated_date', current_date()).select(col('category_id'),\n col('name').alias('category_name'),\n col('market_cap'), col('updated_date'))\n\n ##joining dataframes\n main = mainDF.alias('main')\n delta = delta.alias('delta')\n updatedDF = main. \\\n join(delta, main.category_id == delta.category_id, 'outer')\n upsertDF = updatedDF.where((~col(\"main.category_id\").isNull()) & (~col(\"delta.updated_date\").isNull())).select(\n \"delta.*\").distinct()\n unchangedDF = updatedDF.where(col(\"main.category_id\").isNull()).select(\"delta.*\")\n ##delta= redditor_df.withColumn('updated_date',lit(None).cast('string'))\n unchangedDF = unchangedDF.withColumn('updated_date', lit(None).cast('string'))\n finalDF = upsertDF.union(unchangedDF)\n return finalDF\n\n\ndef load_data(finalDF):\n \"\"\"write to table.\n\n :param df: DataFrame to print.\n :return: None\n \"\"\"\n finalDF.createOrReplaceTempView('temp_finaldf')\n spark.sql('''insert OVERWRITE TABLE ods_category SELECT * FROM temp_finaldf''')\n s = spark.sql('select count(*) from ods_category')\n print(s)\n return None\n\n\n# entry point for Pyspark ETL Application\nif __name__ == '__main__':\n # Start Spark Application and Spark Session,logger and config\n spark = SparkSession. \\\n builder. \\\n config('spark.ui.port', '0'). \\\n enableHiveSupport(). \\\n appName(f'{username} | cda_ods_category'). \\\n master('yarn'). \\\n getOrCreate()\n\n main(spark)\n","sub_path":"SparkPipelines/ods_category_ld.py","file_name":"ods_category_ld.py","file_ext":"py","file_size_in_byte":2995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"324903687","text":"# coding=utf-8\nimport _env\nfrom solo.web.mongo import Doc\nfrom app.pspm.controller.report_interface.report import ReportInterface\nfrom app.pspm.controller.tools import EnumMap\n\n\nPLATFORM_MAP = EnumMap.enum('Android', 'Ios')\nPUBLISHER_TYPE = EnumMap.enum('SDK', 'Online', 'Offline')\nADVERTISER_TYPE = EnumMap.enum('S2S', 'API')\n\n\nclass ReportHour(Doc, ReportInterface):\n '''\n pspm report data\n '''\n structure = dict(\n _id=int,\n advertiser_id=int,\n advertiser_type=int, # S2S: 1, API: 2\n publisher_id=int,\n campaign_id=int,\n geo=str,\n platform=int, # Android: 1, Ios: 2\n category=str,\n placement=str,\n package=str,\n quality_1=int,\n quality_2=int,\n quality_3=int,\n publisher_type=int, # SDK: 1, Online: 2, Offline: 3\n publisher_slot=str,\n sub_1=str,\n sub_2=str,\n sub_3=str,\n am_id=int,\n bd_id=int,\n pm_id=int,\n impressions=int,\n # clicks=int,\n gross_clicks=int,\n unique_clicks=int,\n conversions=int,\n revenue=float,\n cost=float,\n profit=float,\n hour=str, # for example: 2016-03-12 01:00\n day=str, # for example: 2016-03-12\n month=str, # for example: 2016-10\n year=str, # for example: 2016\n create_time=str\n )\n","sub_path":"pspm/model/report_hour.py","file_name":"report_hour.py","file_ext":"py","file_size_in_byte":1376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"176849148","text":"#!/data/exec/python/bin/python3\n# -*- coding: utf-8 -*-\n# @Time : 2020/12/1 11:47\n# @Author : YuZhiYi\n# @Email : yuzhiyi@54.com\nimport time\nimport json\nfrom kafka import KafkaConsumer,TopicPartition\nkafka_cluster = ['192.168.10.181:9092','192.168.10.182:9092','192.168.10.183:9092']\ntopic = 'foobar'\nconsumer = KafkaConsumer(group_id='mygroup02',\n bootstrap_servers=kafka_cluster,\n key_deserializer=bytes.decode, # 键的反序列化器 默认为None 传入 b'key'\n value_deserializer=lambda v: json.loads(v.decode('utf-8')), # 值的反序列化器 默认为None 传入 b'value'\n max_poll_records = 500, # 单次调用poll返回的最大记录数\n enable_auto_commit = False, # 设置是否自动提交偏移量 自动提交虽然方便,但是没有留余地来避免重复消息处理\n #auto_commit_interval_ms = 5000, # 如果 enable_auto_commit 设置为True 则为自动提交的间隔\n )\n\nconsumer.subscribe(topics=[topic,]) # 订阅主题\n\n# 或者\n\n# 指定主题和分区和offset\n# partitions = [ TopicPartition(topic=topic,partition=i) for i in range(16)]\n# consumer.assign(partitions=partitions)\n# for i in range(16):\n# consumer.seek(partitions[i],0) # 分区[i],offset\n\n\nindex = 0\ncount = 0\ntry:\n while True:\n msg = consumer.poll(timeout_ms=5, max_records=None, update_offsets=True) # poll()方法 总是返回由生产者写入kafka但还没有被消费者读取过的记录\n index += 1\n for topic_partion in msg.values():\n for consumer_record in topic_partion:\n count += 1\n print('第%s批次消息 第%s条消息:主题 %s 分区 %s offset %s 键 %s 消息内容 %s' % (index,count,consumer_record.topic, consumer_record.partition, consumer_record.offset, consumer_record.key, consumer_record.value))\n else:\n try:\n consumer.commit()\n except Exception as e:\n print(e)\n time.sleep(2)\nexcept Exception as e:\n print(e)\nfinally:\n consumer.close()\n","sub_path":"python_scripts/kafka/example02/my_kafka_consumer_poll_commit_sync.py","file_name":"my_kafka_consumer_poll_commit_sync.py","file_ext":"py","file_size_in_byte":2184,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"520308468","text":"# -*- coding: utf-8 -*-\nfrom flask import Blueprint, request, jsonify, make_response, g, redirect\nfrom common.libs.Helper import ops_render,getCurrentDate,iPagination,getDictFilterField\nfrom common.models.User import (User)\nfrom common.libs.user.UserService import (UserService)\n\nfrom common.libs.UrlManager import (UrlManager)\nfrom application import app, db\nimport json\n\nroute_SignIn = Blueprint( 'index_page',__name__ )\n\n@route_SignIn.route(\"/\")\ndef index():\n return ops_render( \"SignIn/index.html\" )\n\n@route_SignIn.route(\"/login\", methods=[\"GET\", \"POST\"])\ndef login():\n if request.method == \"GET\":\n g.current_user = check_login()\n if g.current_user:\n return redirect(UrlManager.buildUrl(\"/blog\"))\n return ops_render(\"SignIn/index.html\")\n\n resp = {'code': 200, 'msg': '登录成功~~', 'data': {}}\n req = request.values\n login_name = req['login_name'] if 'login_name' in req else ''\n login_pwd = req['login_pwd'] if 'login_pwd' in req else ''\n\n if login_name is None or len(login_name) < 1:\n resp['code'] = -1\n resp['msg'] = \"请输入正确的登录用户名~~\"\n return jsonify(resp)\n\n if login_pwd is None or len(login_pwd) < 1:\n resp['code'] = -1\n resp['msg'] = \"请输入正确的邮箱密码~~\"\n return jsonify(resp)\n\n user_info = User.query.filter_by(login_name=login_name).first()\n if not user_info:\n resp['code'] = -1\n resp['msg'] = \"请输入正确的登录用户名~~\"\n return jsonify(resp)\n\n\n\n if user_info.login_pwd != UserService.genePwd( login_pwd,user_info.login_salt ):\n resp['code'] = -1\n resp['msg'] = \"请输入正确的登录用户名和密码-2~~\"\n return jsonify(resp)\n\n g.current_user=user_info\n response = make_response(json.dumps({'code': 200, 'msg': '登录成功~~'}))\n response.set_cookie(app.config['AUTH_COOKIE_NAME'], '%s#%s' % (\n UserService.geneAuthCode(user_info), user_info.uid), 60 * 60 * 24 * 120) # 保存120天\n return response\n\n@route_SignIn.route(\"/register\",methods = [ 'GET','POST'])\ndef register():\n if request.method == \"GET\":\n return ops_render(\"SignIn/register.html\")\n\n resp = {'code': 200, 'msg': '操作成功~~', 'data': {}}\n req = request.values\n\n username = req['username'] if 'username' in req else ''\n nickname = req['nickname'] if 'nickname' in req else ''\n password = req['password'] if 'password' in req else ''\n email = req['email'] if 'email' in req else ''\n \n User_info = User.query.filter_by(login_name=username).first()\n if User_info:\n resp['code'] = -1\n resp['msg'] = \"已有此账号!\"\n return jsonify(resp)\n\n salt= UserService.geneSalt()\n print(salt)\n password= UserService.genePwd(password,salt)\n\n\n model_User_info = User()\n model_User_info.created_time = getCurrentDate()\n model_User_info.login_name = username\n model_User_info.nickname = nickname\n model_User_info.login_salt = salt\n model_User_info.login_pwd = password\n model_User_info.email=email\n model_User_info.updated_time = getCurrentDate()\n db.session.add(model_User_info)\n db.session.commit()\n return jsonify(resp)\n\n\ndef check_login():\n cookies = request.cookies\n auth_cookie = cookies[app.config['AUTH_COOKIE_NAME']] if app.config['AUTH_COOKIE_NAME'] in cookies else None\n\n\n if '/api' in request.path:\n app.logger.info(request.path)\n auth_cookie = request.headers.get(\"Authorization\")\n app.logger.info( request.headers.get(\"Authorization\") )\n\n if auth_cookie is None:\n return False\n\n auth_info = auth_cookie.split(\"#\")\n if len(auth_info) != 2:\n return False\n\n try:\n user_info = User.query.filter_by(uid=auth_info[1]).first()\n except Exception:\n return False\n\n if user_info is None:\n return False\n\n if auth_info[0] != UserService.geneAuthCode( user_info ):\n return False\n\n if user_info.status != 1:\n return False\n\n return user_info\n\n\n\n\n","sub_path":"web/controllers/SignIn/SignIn.py","file_name":"SignIn.py","file_ext":"py","file_size_in_byte":4043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"307211636","text":"\"\"\" Compiled: 2020-09-18 10:38:49 \"\"\"\n\n#__src_file__ = \"extensions/settlement/etc/upgrade/FSettlementUpgradeModuleAdmin.py\"\nfrom __future__ import print_function\nimport acm\n\nclass ModuleIndex:\n PARAMETER = 0\n HOOKS = 1\n \nclass ModuleAdministrator:\n\n __thisAdministrator = None\n \n def __init__(self):\n if ModuleAdministrator.__thisAdministrator:\n raise ModuleAdministrator.__thisAdministrator\n\n self.__modules = dict()\n ModuleAdministrator.__thisAdministrator = self\n \n def GetModule(self, moduleIndex):\n module = None\n name = self.ModuleNameFromIndex(moduleIndex)\n if (moduleIndex in self.__modules):\n module = self.__modules[moduleIndex]\n if not module:\n module = acm.FAel[self.ModuleNameFromIndex(moduleIndex)]\n if not module:\n module = acm.FAel()\n module.Name(self.ModuleNameFromIndex(moduleIndex))\n print(name + ' created')\n if (moduleIndex not in self.__modules):\n self.__modules[moduleIndex] = module\n return module\n \n def ModuleNameFromIndex(self, moduleIndex):\n if moduleIndex == 0:\n return 'FSettlementParameters'\n elif moduleIndex == 1:\n return 'FSettlementHooks'\n else:\n return ''\n\n def AddVariableAndValue(self, moduleIndex, varName, value):\n res = False\n module = GetModuleAdministrator().GetModule(moduleIndex)\n if (module):\n module.Text(module.Text() + '\\n' + varName + ' = ' + str(value) + '\\n')\n res = True\n return res\n\n def AddFreeTextString(self, moduleIndex, text):\n res = False\n module = GetModuleAdministrator().GetModule(moduleIndex)\n if (module):\n module.Text(module.Text() + '\\n' + str(text) + '\\n')\n res = True\n return res\n\n def SaveModule(self, moduleIndex):\n res = False\n module = GetModuleAdministrator().GetModule(moduleIndex)\n if module:\n try:\n module.Commit()\n self.__modules[moduleIndex] = module\n res = True\n print('Saved module: ' + GetModuleAdministrator().ModuleNameFromIndex(moduleIndex))\n except Exception as e:\n print('Could not save the module with name %s. %s' % (GetModuleAdministrator().ModuleNameFromIndex(moduleIndex), str(e)))\n \n return res \n \n\ndef GetModuleAdministrator(singletonModuleAdmin = ModuleAdministrator):\n moduleAdmin = None\n try:\n moduleAdmin = singletonModuleAdmin()\n except ModuleAdministrator as anInstance:\n moduleAdmin = anInstance\n return moduleAdmin\n\n","sub_path":"Extensions/Default/FPythonCode/FSettlementUpgradeModuleAdmin.py","file_name":"FSettlementUpgradeModuleAdmin.py","file_ext":"py","file_size_in_byte":2746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"566575300","text":"\"\"\" Wrappers needed for execution\n\"\"\"\n\nimport logging\n\nfrom json import loads\n\nfrom ..execution_support.arlexecute import arlexecute\nfrom workflows.arlexecute.processing_component_interface.arl_json.json_assertions import assert_valid_schema\nfrom data_models.parameters import arl_path\n\ndef initialise_config_wrapper(config_file):\n \"\"\"Obtain the configuration from a JSON file, validating against arl_schema\n \n :param config_file: Name of file containing JSON configuration\n :return: configuration\n \"\"\"\n with open(config_file, 'r') as file:\n config = loads(file.read())\n \n assert_valid_schema(config, arl_path('workflows/arlexecute/processing_component_interface/arl_json/arl_schema'\n '.json'))\n \n return config\n\n\ndef initialise_logging_wrapper(conf):\n \"\"\" Initialise logging from JSON configuration\n \n See arl_schema.json\n\n :param conf: JSON configuratiion\n \"\"\"\n if conf['logging']['level'] == \"INFO\":\n level = logging.INFO\n else:\n level = logging.DEBUG\n \n logging.basicConfig(filename=conf[\"buffer\"][\"directory\"]+conf['logging']['filename'],\n filemode=conf['logging']['filemode'],\n format=conf['logging']['format'],\n datefmt=conf['logging']['datefmt'],\n level=level)\n\n\ndef initialise_execution_wrapper(conf):\n \"\"\"Initialise the execution framework from JSON configuration\n \n See arl_schema.json\n \n :param conf: JSON configuratiion\n \"\"\"\n arlexecute.set_client(use_dask=conf[\"execute\"][\"use_dask\"],\n n_workers=conf[\"execute\"][\"n_workers\"],\n memory_limit=conf[\"execute\"][\"memory_limit\"])\n arlexecute.run(initialise_logging_wrapper, conf)\n","sub_path":"workflows/arlexecute/processing_component_interface/execution_helper.py","file_name":"execution_helper.py","file_ext":"py","file_size_in_byte":1825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"654278458","text":"# user/bin/python3\nimport pickle\nimport sys\n\nimport numpy as np\n\n\nMIN_THRESHOLD = 0.7\nMAX_THRESHOLD = 1.9\n\n\ndef save_label(argsv):\n base_directory = argsv[1]\n\n image_name = argsv[2].split(' ')\n\n with (open(base_directory+'/embeddings.pickle', \"rb\")) as openfile:\n image_embeding_name = pickle.load(openfile)\n\n try:\n image_embedings = []\n for name in image_name:\n i = image_embeding_name['names'].index(name)\n image_embedings.append(image_embeding_name['embeddings'][i])\n del image_embeding_name['embeddings'][i]\n del image_embeding_name['names'][i]\n\n except:\n print('image name not in pickle file')\n\n diff_embeding = np.zeros([\n len(image_embeding_name['names']),\n len(image_embeding_name['embeddings'][0])\n ])\n\n labels = []\n names = []\n for i, img_embeding in enumerate(image_embedings):\n for j, embeding_ in enumerate(image_embeding_name['embeddings']):\n diff = np.subtract(img_embeding, embeding_)\n diff_embeding[j] = np.sum(np.square(diff))\n\n mean = np.mean(diff_embeding)\n if mean < MIN_THRESHOLD:\n label = 1\n\n elif MAX_THRESHOLD < mean < MIN_THRESHOLD:\n label = 2\n\n else:\n label = 3\n\n labels.append(label)\n names.append(image_name[i])\n\n with open(base_directory+'/label.txt', 'w') as text_file:\n for i in range(len(names)):\n text_file.write('file_name: {}, label: {}\\n'.format(names[i], labels[i]))\n\n\nif __name__ == '__main__':\n save_label(sys.argv)\n\n","sub_path":"compute embeding (vggface2)/label.py","file_name":"label.py","file_ext":"py","file_size_in_byte":1601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"501602357","text":"import matplotlib.pyplot as plt \nimport numpy as np\n\nmedal_count_men = [192, 128, 66]\nmedal_count_women = [123, 75, 41]\nmedal_type = [\"Gold\", \"Silver\", \"Bronze\"]\n\nxpos = np.arange(len(medal_type))\n\nplt.ylabel(\"Medals Won\")\nplt.xlabel(\"Type of Medal\")\n\nplt.xticks(xpos, medal_type)\nplt.bar(xpos-0.2, medal_count_men, width=0.4, label=\"Men\", edgecolor=\"black\", color=(255/255, 10/255, 10/255))\nplt.bar(xpos+0.2, medal_count_women, width=0.4, label=\"Women\", edgecolor=\"black\", color=(255/255, 255/255, 255/255))\n\nplt.legend()\nplt.show()\n","sub_path":"data/men_vs_women_medals.py","file_name":"men_vs_women_medals.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"447289226","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom django.contrib.auth import authenticate,login\nfrom .models import Profile\nfrom .forms import LoginForm,UserRegistrationForm,ProfileEditForm,UserEditForm\nfrom django.contrib.auth.decorators import login_required\n\n# Create your views here.\n\ndef user_login (request):\n if request.method==\"POST\":\n form=LoginForm(request.POST)\n if form.is_valid():\n data=form.cleaned_data\n user=authenticate(username=data['username'],password=data['password'])\n if user is not None:\n if user.is_active:\n login(request,user)\n return HttpResponse('Authenticate Successfully')\n else:\n return HttpResponse('Disable Account')\n else:\n return HttpResponse('Invalid login')\n else:\n form=LoginForm()\n\n return render(request,'account/login.html',{'form':form})\n\n@login_required\ndef dashboard(request):\n return render(request,\n 'account/dashboard.html',\n {'section':\"dashboard\"})\n\ndef register(request):\n if request.method=='POST':\n user_form=UserRegistrationForm(request.POST)\n if user_form.is_valid():\n new_user=user_form.save(commit=False)\n new_user.set_password(user_form.cleaned_data['password'])\n new_user.save()\n profile=Profile.objects.create(user=new_user)\n return render(request,'account/register_done.html',\n {'new_user':new_user})\n else:\n user_form=UserRegistrationForm()\n return render(request,'account/register.html',\n {'user_form':user_form})\n@login_required\ndef edit(request):\n if request.method==\"POST\":\n user_form =UserEditForm(instance=request.user,data=request.POST,files=request.FILES)\n profile_form=ProfileEditForm(instance=request.user.profile,data=request.POST,files=request.FILES)\n if user_form.is_valid() and profile_form.is_valid():\n user_form.save()\n profile_form.save()\n else:\n user_form=UserEditForm(instance=request.user)\n profile_form=ProfileEditForm(instance=request.user.profile)\n return render(request,'account/edit.html',{'user_form':user_form,\n 'profile_form':profile_form})\n\n","sub_path":"第五章/account/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"45759751","text":"from __future__ import division\nPKG='test_goal_controller'\n\nimport unittest\nfrom math import pi, sin, cos\nfrom diff_drive.goal_controller import GoalController\nfrom diff_drive.pose import Pose\n\n\nclass TestGoalController(unittest.TestCase):\n\n def setUp(self):\n self.controller = GoalController()\n\n def testAtGoal(self):\n cur = Pose()\n desired = self.controller.getVelocity(cur, cur, 0.1)\n self.assertEquals(desired.xVel, 0)\n self.assertEquals(desired.thetaVel, 0)\n\n def testStraightAhead(self):\n cur = Pose()\n goal = Pose()\n goal.x = 1\n desired = self.controller.getVelocity(cur, goal, 0.1)\n self.assertGreater(desired.xVel, 0)\n self.assertEquals(desired.thetaVel, 0)\n\n def testRotateLeft(self):\n cur = Pose()\n goal = Pose()\n goal.theta = pi/2\n desired = self.controller.getVelocity(cur, goal, 0.1)\n self.assertEquals(desired.xVel, 0)\n self.assertGreater(desired.thetaVel, 0)\n\n def testRotateRight(self):\n cur = Pose()\n goal = Pose()\n goal.theta = -pi/2\n desired = self.controller.getVelocity(cur, goal, 0.1)\n self.assertEquals(desired.xVel, 0)\n self.assertLess(desired.thetaVel, 0)\n\n def testCurveLeft(self):\n cur = Pose()\n goal = Pose()\n goal.x = 1\n goal.y = 1\n goal.theta = pi\n desired = self.controller.getVelocity(cur, goal, 0.1)\n self.assertGreater(desired.xVel, 0)\n self.assertGreater(desired.thetaVel, 0)\n\n def testCurveRight(self):\n cur = Pose()\n cur.theta = pi\n goal = Pose()\n goal.x = 1\n goal.y = 1\n desired = self.controller.getVelocity(cur, goal, 0.1)\n self.assertGreater(desired.xVel, 0)\n self.assertLess(desired.thetaVel, 0)\n\n def testButtonHookLeft(self):\n cur = Pose()\n goal = Pose()\n goal.x = 1\n goal.theta = pi\n desired = self.controller.getVelocity(cur, goal, 0.1)\n self.assertGreater(desired.xVel, 0)\n self.assertLess(desired.thetaVel, 0)\n\n def testButtonHookRight(self):\n cur = Pose()\n goal = Pose()\n goal.x = 1\n goal.theta = -pi\n desired = self.controller.getVelocity(cur, goal, 0.1)\n self.assertGreater(desired.xVel, 0)\n self.assertGreater(desired.thetaVel, 0)\n\n def testGoToGoal(self):\n self.checkGoToGoal(0, 0, 0, 1, 1, pi/2)\n self.checkGoToGoal(0, 0, 0, 1, 1, -pi/2)\n self.checkGoToGoal(0, 0, 0, 0, 1, pi)\n self.checkGoToGoal(0, 0, 0, 1, 0, pi)\n\n def checkGoToGoal(self, x0, y0, th0, x1, y1, th1):\n dTol = 0.05 # 5cm\n thTol = 0.04 # Approx 2.5 degrees\n\n self.controller.setLinearTolerance(dTol)\n self.controller.setAngularTolerance(thTol)\n\n cur = Pose()\n cur.x = x0\n cur.y = y0\n cur.theta = th0\n\n goal = Pose()\n goal.x = x1\n goal.y = y1\n goal.theta = th1\n\n lastDistance = self.controller.getGoalDistance(cur, goal)\n dT = 0.05\n for i in range(1000):\n if self.controller.atGoal(cur, goal):\n return\n\n desired = self.controller.getVelocity(cur, goal, dT)\n cur.x += dT * desired.xVel * cos(cur.theta)\n cur.y += dT * desired.xVel * sin(cur.theta)\n cur.theta += dT * desired.thetaVel\n\n # If we get here, we didn't reach the goal.\n self.assertFalse('Did not reach the goal.')\n \nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/test_goal_controller.py","file_name":"test_goal_controller.py","file_ext":"py","file_size_in_byte":3571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"74562873","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Jun 11 15:15:20 2018\r\n\r\n@author: Administrator\r\n\"\"\"\r\n\r\nfrom openpyxl import load_workbook\r\nfrom openpyxl.styles import Color, PatternFill, Font, Fill , Border\r\nfrom openpyxl.styles.colors import RED\r\n\r\n\r\n\r\nfilePath=\"D:/citiMLJune2018/day1/AnnualReport.xlsx\"\r\n\r\nwb = load_workbook(filePath,read_only=False)\r\n\"\"\"\r\nimport calendar\r\n\r\n\r\n\r\n\r\nfor month in calendar.month_name:\r\n print (month)\r\n wb.create_sheet(month +\"_2018\")\r\n \r\nwb.save(filePath)\r\nwb.close()\r\n\r\n\r\n\"\"\"\r\nimport datetime\r\n\r\ncurrentMonth = datetime.date.today().strftime(\"%B\")\r\nprint(currentMonth)\r\nsheetRef=wb.get_sheet_by_name(currentMonth+\"_2018\")\r\n\r\n#import random\r\n\"\"\"\r\nfor row in range(1,100):\r\n for col in range (1,10):\r\n sheetRef.cell(column=col,row=row,value=\"%d\" %(random.randint(1,10000)))\r\n\r\nwb.save(filePath)\r\n\"\"\"\r\nfor row in range(1,100):\r\n for col in range(1,10):\r\n cell = sheetRef.cell(row=row,column=col)\r\n if(int(cell.value) > 5000):\r\n cell.font = Font(size=18,color= RED)\r\n \r\nwb.save(filePath) \r\nwb.close()","sub_path":"day1/ExcelWriting.py","file_name":"ExcelWriting.py","file_ext":"py","file_size_in_byte":1088,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"499816259","text":"import os\nimport sys\n\ndef gen_patch(source,target,patch_file):\n f=open(os.path.join(source,'defects4j.build.properties'))\n for line in f:\n line=line.strip()\n if line[0]=='#':\n continue\n pair=line.split('=')\n if pair[0]=='d4j.dir.src.classes':\n path2src=pair[1]\n break\n os.system('diff -r -u -w %s %s > %s'%(os.path.join(source,path2src),os.path.join(target,path2src),patch_file))\n\nif __name__=='__main__':\n gen_patch(sys.argv[0],sys.argv[1],sys.argv[2])\n","sub_path":"tool/patches/gen_patch.py","file_name":"gen_patch.py","file_ext":"py","file_size_in_byte":527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"201228068","text":"def is_even(i):\n return (i % 2) == 0\n\nfor i in range(20):\n if is_even(i):\n print(i, \"even\")\n else:\n print(i, \"odd\")\n\n# nested functions and scoping \ndef g(x):\n def h():\n x = 'abc'\n print('x in h():', x)\n \n x += 1\n h() # no return in h(): return None\n return x\n\nprint(g(3))","sub_path":"0.MIT-6.0001/notes/5.functions.py","file_name":"5.functions.py","file_ext":"py","file_size_in_byte":324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"96044840","text":"import datetime\nimport lxml.html\nimport os\nimport pytz\nimport requests\n\nfrom flask import Flask, redirect, render_template, request\n\napp = Flask(__name__)\n\ndef next_weekday(d, weekday):\n days_ahead = weekday - d.weekday()\n if days_ahead <= 0: # Target day already happened this week\n days_ahead += 7\n return d + datetime.timedelta(days_ahead - 7)\n\n@app.route(\"/\")\ndef index():\n\n # get the weather\n token = os.environ.get(\"WEATHER_TOKEN\")\n res = requests.get(\"http://api.openweathermap.org/data/2.5/forecast/daily?q=Cambridge,MA&cnt=2&units=imperial\", params={\"APPID\": token})\n if res.status_code == 200:\n data = res.json()\n weather_status = data[\"list\"][1][\"weather\"][0][\"main\"]\n low = data[\"list\"][1][\"temp\"][\"min\"]\n high = data[\"list\"][1][\"temp\"][\"max\"]\n timestamp = data[\"list\"][1][\"dt\"]\n date = datetime.datetime.fromtimestamp(timestamp).replace(tzinfo=pytz.timezone(\"America/New_York\")).strftime('%m-%d-%Y')\n weather = {\n \"timestamp\": date,\n \"status\": weather_status,\n \"low\": low,\n \"high\": high\n }\n else:\n weather = None\n\n return render_template(\"index.html\", weather=weather)\n\n@app.route(\"/extract/\")\ndef extract():\n url = request.args.get(\"url\")\n res = requests.get(url)\n if res.status_code != 200:\n return \"ERROR: Could not parse website.\"\n\n contents = lxml.html.fromstring(res.text)\n text = contents.get_element_by_id(\"text\")\n\n # remove all the images\n images = text.find_class(\"shortcodes-object\")\n for image in images:\n image.drop_tree()\n\n ids_to_remove = [\"subscribe-link\", \"previous-article-bottom\", \"article-tags\"]\n for id_to_remove in ids_to_remove:\n elt = text.get_element_by_id(id_to_remove)\n if elt is not None:\n elt.drop_tree()\n\n classes_to_remove = [\"article-recommended-container\"]\n for class_to_remove in classes_to_remove:\n for element in text.find_class(class_to_remove):\n element.drop_tree()\n\n res = lxml.html.tostring(text)\n\n return res\n\n@app.route(\"/crossword/\")\ndef crossword():\n # get the date of the most recent Monday\n d = datetime.datetime.now()\n next_monday = next_weekday(d, 0)\n date_string = next_monday.strftime(\"%y%m%d\")\n url = \"http://bbs.amuniversal.com/web/content/UFS_Puzzles/Todays_Crossword_Dailies/dax{}_week.zip\".format(date_string)\n return redirect(url)\n\nif __name__ == \"__main__\":\n port = int(os.environ.get('PORT', 5000))\n app.run(host='0.0.0.0', port=port)\n","sub_path":"application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":2570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"482999179","text":"# 反正都要while True总不会错。\n# 二维清单就是清单中的清单。\n# 快写可以直接写 p = [name, price]\n# 打印一下他们是什么。\n\n# 字符串可以加法 乘法\n# f.write写东西。写在当前目录,没有的就创建。有的就覆盖。\n# \\n换行\n# ,对于csv可以隔个格子。\n\n# 在csv增加标题。商品,价格。\n# 用到中文就要在后面加 open( encoding = 'utf-8')\n# excel导入的时候用UTF-8导入。\n# comma就是逗点。\n\n# 读取CSV数据。\n# 切割。就是每一行.split('切割的字符串'),切完变成列表。 每一行.strip()就是去除\\n\n# 快写。储存。name, price =\n\n# continue break都是循环才可以用。但是continue不会退出循环。\n# 一般使用在FOR LOOP最上面\n\n# import os 就是operating system作业系统。相当于政府。\n# os.path.isfile('地址/档名')\n\nimport os\n\nproducts = []\n\n# 读取数据,检查档案。\nif os.path.isfile('products.csv'):\n print('找到档案')\n with open('products.csv', 'r', encoding='utf-8') as f:\n for line in f:\n if '商品,价格' in line:\n continue\n List = line.strip().split(',')\n name = List[0]\n price = List[1]\n products.append([name, price])\nelse:\n print('找不到档案')\n\nprint(products)\n\n# 输入数据\nwhile True:\n name = input('请输入名称:')\n if name == 'q':\n break\n price = input('请输入价格:')\n price = int(price)\n # p = []\n # p.append(name)\n # p.append(price)\n products.append([name, price])\n\nprint(products)\n\n# 打印清单\nfor p in products:\n print(p[0], '的价格是', p[1])\n\n\n# 写入到文档\nwith open('products.csv', 'w', encoding='utf-8') as f:\n f.write('商品,价格\\n')\n for p in products:\n f.write(p[0] + ',' + str(p[1]) + '\\n')\n\n","sub_path":"day4/十五。专案:商品清单.py","file_name":"十五。专案:商品清单.py","file_ext":"py","file_size_in_byte":1854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"137863371","text":"# pylint: disable=C0111,R0903\n\n\"\"\"Displays the status of watson (time-tracking tool)\n\nRequires the following executable:\n * watson\n\"\"\"\n\nimport bumblebee.input\nimport bumblebee.output\nimport bumblebee.engine\nimport bumblebee.util\nimport bumblebee.popup_v2\n\nimport logging\nimport re\nimport functools\n\nclass Module(bumblebee.engine.Module):\n def __init__(self, engine, config):\n super(Module, self).__init__(engine, config,\n bumblebee.output.Widget(full_text=self.text))\n self._tracking = False\n self._project = \"\"\n engine.input.register_callback(self, button=bumblebee.input.LEFT_MOUSE,\n cmd=self.toggle)\n\n def toggle(self, widget):\n self._project = \"hit\"\n if self._tracking:\n bumblebee.util.execute(\"watson stop\")\n else:\n bumblebee.util.execute(\"watson restart\")\n self._tracking = not self._tracking\n\n def text(self, widget):\n if self._tracking:\n return self._project\n else:\n return \"Paused\"\n\n def update(self, widgets):\n output = bumblebee.util.execute(\"watson status\")\n if re.match('No project started', output):\n self._tracking = False\n return\n\n self._tracking = True\n m = re.search(r'Project (.+) started', output)\n self._project = m.group(1)\n\n #\n def state(self, widget):\n return \"on\" if self._tracking else \"off\"\n # return [widget.get(\"status\", None), widget.get(\"period\", None)]\n\n# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4\n","sub_path":"bumblebee/modules/watson.py","file_name":"watson.py","file_ext":"py","file_size_in_byte":1596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"579814183","text":"import pdb\nimport torch\nimport matplotlib\nimport matplotlib.pyplot as plt\n\nimport torch.nn.functional as F\nfrom torch.nn.functional import softmax\n\nclass AdversarialAttack:\n def __init__(self, model, criterion, tokenizer, show_progress=True, **kwargs):\n \"\"\"\n param model: nn.Module object - can be HuggingFace's model or custom one.\n param criterion: nn.functional - torch criterion used to train your model. # need check\n param tokenizer: nn.Tokenizer - HuggingFace's tokenizer. # need check\n param show_progress: bool type - show tqdm progress bar. \n param kwargs: encoder - string indicates the HuggingFace's encoder, that has 'embeddings' attribute.\n Used if your model doesn't have 'get_input_embeddings' method to get access to encoder embeddings\n \"\"\"\n self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n self.model = model.to(self.device)\n self.model.eval()\n self.criterion = criterion\n self.tokenizer = tokenizer\n self.show_progress = show_progress\n self.kwargs = kwargs\n\n self.batch_output = None\n\n def _get_gradients(self, batch):\n \"\"\"\n set requires_grad to 'true' for all paramters, but save original values to resotre them later\n \"\"\"\n embedding_gradients = []\n original_param_name_to_requires_grad_dict = {}\n \n for param_name, param in self.model.named_parameters():\n original_param_name_to_requires_grad_dict[param_name] = param.requires_grad\n param.requires_grad = True\n \n hooks = self._register_embedding_gradient_hooks(embedding_gradients)\n loss = self.forward_step(batch)\n\n self.model.zero_grad()\n loss.backward()\n\n for hook in hooks:\n hook.remove()\n\n # restore the original requires_grad values of the parameters\n for param_name, param in self.model.named_parameters():\n param.requires_grad = original_param_name_to_requires_grad_dict[param_name]\n\n return embedding_gradients[0]\n\n def _register_embedding_gradient_hooks(self, embedding_gradients):\n \"\"\"\n Registers a backward hook on the used to save the gradients of the embeddings for use in get_gradients()\n when there are multiple inputs (e.g., a passage and question), the hook will be called multiple times.\n We append all the embeddings gradients to a list.\n \"\"\"\n\n def hook_layers(module, grad_in, grad_out):\n embedding_gradients.append(grad_out[0])\n\n backward_hooks = []\n embedding_layer = self.get_embeddings_layer()\n backward_hooks.append(embedding_layer.register_backward_hook(hook_layers))\n return backward_hooks\n\n def get_embeddings_layer(self,):\n if hasattr(self.model, \"get_input_embeddings\"):\n embedding_layer = self.model.get_input_embeddings()\n else:\n encoder_attribute = self.kwargs.get(\"encoder\")\n assert encoder_attribute, \"Your model doesn't have 'get_input_embeddings' method, thus you \" \\\n \"have provide 'encoder' key argument while initializing SaliencyInterpreter object\"\n embedding_layer = getattr(self.model, encoder_attribute).embeddings\n return embedding_layer\n\n @property\n def special_tokens(self, ):\n \"\"\"\n some tokenizers don't have 'eos_token' and 'bos_token' attributes.\n Thus, we need some trick to get them.\n \"\"\"\n\n if self.tokenizer.bos_token is None or self.tokenizer.eos_token is None:\n special_tokens = self.tokenizer.build_inputs_with_special_tokens([])\n special_tokens_ids = self.tokenizer.convert_ids_to_tokens(special_tokens)\n self.tokenizer.bos_token, self.tokenizer.eos_token = special_tokens_ids\n\n special_tokens = self.tokenizer.eos_token, self.tokenizer.bos_token\n return special_tokens\n\n def _pairwise_dot_product(self, src_embeds, vocab_embeds, cosine=False):\n \"\"\"\n Compute the cosine similarity between each word in the vocab and each word in the source\n \"\"\"\n if cosine:\n src_embeds = F.normalize(src_embeds, dim=-1, p=2)\n vocab_embeds = F.normalize(vocab_embeds, dim=-1, p=2)\n # dot product\n dot_product = torch.einsum(\"bij,kj->bik\", (src_embeds, vocab_embeds))\n return dot_product\n\n def _pairwise_distance(self, src_embeds, vocab_embeds, squared=False):\n \"\"\"\n Compute the euclidean distance between each word in the vocab and each word in the source.\n \"\"\"\n # compute square norm to avoid compute all the directions\n vocab_sq_norm = vocab_embeds.norm(p=2, dim=-1) ** 2\n src_sq_norm = src_embeds.norm(p=2, dim=-1) ** 2\n\n # dot product\n dot_product = self._pairwise_dot_product(src_embeds, vocab_embeds)\n \n # reshape for broadcasting\n vocab_sq_norm = vocab_sq_norm.unsqueeze(0).unsqueeze(0) # 1, 1, vocab size\n src_sq_norm = src_sq_norm.unsqueeze(2) # batch, seq length, 1\n\n # compute squared difference\n sq_norm = vocab_sq_norm + src_sq_norm - 2 * dot_product\n if squared:\n return sq_norm\n else:\n # relu + epsilon for numerical stability\n sq_norm = F.relu(sq_norm) + 1e-20\n \n # take the square root\n return sq_norm.sqrt()\n\n def forward_step(self, batch):\n \"\"\"\n If your model receive inputs in another way or you computing not like in this example\n simply override this method.\n \"\"\"\n input_ids = torch.as_tensor(batch.input_ids).to(self.device).reshape((1, -1)) # batch.get('input_ids').to(self.device)\n attention_mask = torch.as_tensor(batch.attention_mask).to(self.device).reshape((1, -1)) # batch.get('attention_mask').to(self.device)\n outputs = self.model(input_ids=input_ids, attention_mask=attention_mask)[0]\n\n _, _, num_label = outputs.shape\n \"\"\"\n outputs : (batch, seq_length, feat_dim) => (seq_length, feat_dim)\n labels : (batch, seq_length) => (seq_length,)\n \"\"\"\n outputs = outputs.view(-1, num_label)\n labels = torch.argmax(outputs, dim=1) # torch.argmax(outputs, dim=1)\n batch_losses = self.criterion(outputs, labels)\n loss = torch.mean(batch_losses) # mean average\n self.batch_output = [input_ids, outputs]\n return loss\n\n def update_output(self, ):\n \"\"\"\n You can override this method if you want to change the format of outputs (e.g., storing gradients)\n \"\"\"\n input_ids, outputs, grads, adv_tokens = self.batch_output\n\n probs = softmax(outputs, dim=-1)\n probs, labels = torch.max(probs, dim=-1)\n\n tokens = [\n self.tokenizer.convert_ids_to_tokens(input_ids_)\n for input_ids_ in input_ids\n ]\n\n embedding_grads = grads.sum(dim=2)\n \n # norm for each sequence\n norms = torch.norm(embedding_grads, dim=1, p=2) # need check hyperparameter\n \n # normalizing\n for i, norm in enumerate(norms):\n embedding_grads[i] = torch.abs(embedding_grads[i]) / norm\n\n batch_output = []\n \n # check probs, labels shape\n labels = torch.reshape(labels, (1, -1))\n probs = torch.reshape(probs, (1, -1))\n iterator = zip(tokens, probs, embedding_grads, labels)\n\n for example_tokens, example_prob, example_grad, example_label in iterator:\n example_dict = dict()\n # as we do it by batches we has a padding so we need to remove it\n \n example_tokens = [t for t in example_tokens if t != self.tokenizer.pad_token]\n example_dict['tokens'] = example_tokens\n example_dict['grad'] = example_grad.cpu().tolist()[:len(example_tokens)]\n example_dict['label'] = example_label.cpu().tolist()[:len(example_tokens)] # example_label.item()\n example_dict['prob'] = example_prob.cpu().tolist()[:len(example_tokens)] # example_prob.item() \n\n batch_output.append(example_dict)\n\n return batch_output\n\n\n\n\n\n\n\n\n","sub_path":"adversarial-attack/attacker.py","file_name":"attacker.py","file_ext":"py","file_size_in_byte":8350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"311454895","text":"\"\"\"\nCompares basis sets with authoritative versions\n\"\"\"\n\nimport os\nimport pytest\n\nfrom basis_set_exchange import misc, curate\nfrom .common_testvars import bs_metadata, bs_names, auth_data_dir\n\n# create a map of the sources dir\n_basis_src_map = {}\nfor x in os.listdir(auth_data_dir):\n if not x.endswith('.bz2'):\n continue\n\n # remove .fmt.bz2\n base, _ = os.path.splitext(x)\n base, _ = os.path.splitext(base)\n\n if base in _basis_src_map:\n raise RuntimeError(\"Duplicate basis set in authoritative sources: {}\".format(base))\n\n _basis_src_map[base] = os.path.join(auth_data_dir, x)\n\n\n@pytest.mark.parametrize('basis_name_ver', list(_basis_src_map.keys()))\ndef test_authoritative(basis_name_ver):\n '''\n Compare the stored basis sets with the stored authoritative sources\n '''\n\n basis_name, ver = os.path.splitext(basis_name_ver)\n ver = ver[1:] # remove starting '.'\n\n # Determine the basis name from the filename\n basis_name = misc.basis_name_from_filename(basis_name)\n\n basis_meta = bs_metadata[basis_name]\n\n ref_filename = _basis_src_map[basis_name_ver]\n\n if not basis_name in bs_names:\n raise RuntimeError(\"Source basis {} doesn't have a BSE basis\".format(basis_name))\n\n assert curate.compare_basis_against_file(basis_name, ref_filename, version=ver, uncontract_general=True)\n","sub_path":"basis_set_exchange/tests/test_authoritative.py","file_name":"test_authoritative.py","file_ext":"py","file_size_in_byte":1345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"458515268","text":"from train import Net\nfrom PIL import Image\nimport numpy as np\nimport glob\nimport torch\nimport torch.nn.functional as F\n\n\n\nweights = glob.glob(\"output/*.pth\")\nif len(weights) == 0:\n raise IOError(\"Could not find any model\")\nprint(f\"Found {len(weights)} models, using {weights[0]}\")\n\ndevice = torch.device('cpu')\nmodel = Net()\nmodel.load_state_dict(torch.load(weights[0], map_location=device))\n\n\ndef predict(file_path):\n \n img = np.array(Image.open(file_path).resize(size=(28, 28)))\n img = img.reshape(1, 1, 28, 28)\n img = img.astype('float32')\n img = img / 255.0\n digit = F.softmax(model(torch.from_numpy(img)), dim=1)\n return str(int(digit[0].argmax()))\n\n\nif __name__ == \"__main__\":\n \n print(predict(\"./301.png\"))","sub_path":"predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"239999225","text":"import argparse\r\nimport importlib\r\n\r\nfrom bunch import Bunch\r\nimport numpy as np\r\n\r\nfrom data.data_loader import DataLoader\r\nfrom utils.utils import set_seed, stack_csrdata, load_fake_data\r\n\r\nparser = argparse.ArgumentParser()\r\nparser.add_argument(\"--config_file\", type=str, default=\"evaluate_attack_args\")\r\nconfig = parser.parse_args()\r\n\r\n\r\ndef main(args):\r\n # Load data.\r\n print(\"Loading data from {}\".format(args.data_path))\r\n data_loader = DataLoader(path=args.data_path)\r\n n_users, n_items = data_loader.n_users, data_loader.n_items\r\n print(\"n_users: {}, n_items: {}\".format(n_users, n_items))\r\n train_data = data_loader.load_train_data()\r\n test_data = data_loader.load_test_data()\r\n\r\n attack_eval_args = Bunch(args.attack_eval_args)\r\n # Load fake data (and combine with normal training data) if path provided.\r\n n_fakes = 0\r\n if attack_eval_args.fake_data_path:\r\n fake_data = load_fake_data(attack_eval_args.fake_data_path)\r\n train_data = stack_csrdata(train_data, fake_data)\r\n n_fakes = fake_data.shape[0]\r\n print(\"Statistics of fake data: \"\r\n \"n_fakes={}, avg_clicks={:.2f}\".format(\r\n n_fakes, fake_data.sum(1).mean()))\r\n\r\n # Evaluate victim model performance.\r\n for victim_args in attack_eval_args.victims:\r\n print(victim_args)\r\n victim_args = Bunch(victim_args)\r\n\r\n trainer_class = victim_args.model[\"trainer_class\"]\r\n trainer = trainer_class(n_users=n_users+n_fakes,\r\n n_items=n_items,\r\n args=victim_args)\r\n trainer.fit(train_data, test_data)\r\n # Load target items and evaluate attack performance.\r\n target_items = np.load(attack_eval_args.target_items_path)['target_items']\r\n trainer.validate(train_data, test_data, target_items)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n args = importlib.import_module(config.config_file)\r\n\r\n set_seed(args.seed, args.use_cuda)\r\n main(args)\r\n","sub_path":"evaluate_attack.py","file_name":"evaluate_attack.py","file_ext":"py","file_size_in_byte":2002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"144482966","text":"\"\"\"Utlities functions to make bokeh plots.\"\"\"\nfrom __future__ import annotations\n\nimport math\nfrom typing import Any, Optional\n\nfrom matplotlib import pyplot as plt\n\nfrom xplogger.experiment_manager.viz.utils import (\n get_data_and_colors,\n validate_kwargs_for_aggregate_metrics,\n)\nfrom xplogger.parser.experiment import ExperimentSequenceDict # type: ignore\n\n\ndef plot_experiment_sequence_dict(\n exp_seq_dict: ExperimentSequenceDict,\n metadata_for_plot: dict[str, Any],\n color_palette: list[Any],\n colors: Optional[list[str]] = None,\n color_offset: int = 0,\n return_all_metrics_with_same_length: bool = True,\n kwargs_for_aggregate_metrics: Optional[dict[str, Any]] = None,\n) -> None:\n \"\"\"Plot the given experiment sequence dict as a matplotlib.\n\n Args:\n exp_seq_dict (ExperimentSequenceDict):\n metadata_for_plot (dict[str, Any]):\n color_palette (list[Any]):\n colors (Optional[list[str]], optional): Defaults to None.\n color_offset (int, optional): Defaults to 0.\n kwargs_for_aggregate_metrics (Optional[dict[str, Any]], optional):\n These arguments are pass to aggregation function of exp_seq_dict.\n Defaults to None.\n\n Returns:\n figure:\n \"\"\"\n validate_kwargs_for_aggregate_metrics(\n kwargs_for_aggregate_metrics=kwargs_for_aggregate_metrics\n )\n\n x_metric = kwargs_for_aggregate_metrics[\"x_name\"] # type: ignore\n y_metric_list = kwargs_for_aggregate_metrics[\"metric_names\"] # type: ignore\n\n plt.title(metadata_for_plot.get(\"title\", \"Default Title\"))\n plt.xlabel(x_metric)\n plt.ylabel(\"-\".join(y_metric_list))\n\n data, colors = get_data_and_colors(\n exp_seq_dict=exp_seq_dict,\n return_all_metrics_with_same_length=return_all_metrics_with_same_length,\n kwargs_for_aggregate_metrics=kwargs_for_aggregate_metrics, # type: ignore\n color_palette=color_palette,\n colors=colors,\n color_offset=color_offset,\n )\n\n for index, (key, y) in enumerate(data.items(), color_offset):\n if key.endswith(f\"_{x_metric}\"):\n continue\n for current_metric_name in y_metric_list:\n if key.endswith(current_metric_name):\n current_exp_seq_key = key.replace(f\"_{current_metric_name}\", \"\")\n break\n else:\n print(\"Can not find the metric name.\")\n breakpoint()\n x_key = f\"{current_exp_seq_key}_{x_metric}\"\n x = data[x_key].mean(axis=0)\n mean = y.mean(axis=0)\n stderr = y.std(axis=0) / math.sqrt(len(y))\n plt.plot(x, mean, linewidth=2, color=colors[index], label=key)\n plt.fill_between(\n x=x,\n y1=mean - stderr,\n y2=mean + stderr,\n alpha=metadata_for_plot.get(\"fill_alpha\", 0.6),\n color=colors[index],\n )\n","sub_path":"xplogger/experiment_manager/viz/matplotlib.py","file_name":"matplotlib.py","file_ext":"py","file_size_in_byte":2860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"191285657","text":"\"\"\"\nMain flask server functionality and logic.\n\"\"\"\n# pylint: disable=no-member\n# pylint: disable=wrong-import-position\n# pylint: disable=global-statement\nimport os\nimport logging\nfrom os.path import join, dirname\nfrom dotenv import load_dotenv\nimport flask\nimport flask_socketio\nimport flask_sqlalchemy\nimport iso8601\nfrom oauth2client import client\nfrom googleapiclient.discovery import build\n\nAPI_NAME = \"calendar\"\nAPI_VERSION = \"v3\"\nCLIENT_SECRET_FILE = \"./credentials.json\"\nfrom iteration_utilities import unique_everseen\n\nlogging.getLogger(\"werkzeug\").setLevel(logging.ERROR)\n\nAPP = flask.Flask(__name__)\n\n##BOILER PLATE CODE TO INITIATE SOCKETS\nSOCKETIO = flask_socketio.SocketIO(APP)\nSOCKETIO.init_app(APP, cors_allowed_origins=\"*\")\n\nDOTENV_PATH = join(dirname(__file__), \"sql.env\")\nload_dotenv(DOTENV_PATH)\n\n# BOILER PLATE CODE TO INSTANTIATE PSQL AND SQLALCHEMY\n\nDATABASE_URI = os.environ[\"DATABASE_URL\"]\n\nAPP.config[\"SQLALCHEMY_DATABASE_URI\"] = DATABASE_URI\n\nDB = flask_sqlalchemy.SQLAlchemy(APP)\n\nDB.init_app(APP)\nDB.app = APP\n\n\nCALENDER_EVENT_CHANNEL = \"calendar_event\"\n\nimport models\n\n\ndef push_new_user_to_database(ident, name, email):\n \"\"\"\n Pushes new user to database.\n \"\"\"\n DB.session.add(models.AuthUser(ident, name, email))\n DB.session.commit()\n\n\ndef add_event(ccode, title, start, end, desc):\n \"\"\"\n adds an event, returns id of added event\n \"\"\"\n added_event = models.Event(ccode, title, start, end, desc)\n DB.session.add(added_event)\n DB.session.commit()\n return added_event.id\n\n\ndef mod_event(ccode, title, start, end, desc, event_id):\n \"\"\"\n modifies an event, returns id of added event\n \"\"\"\n event = DB.session.query(models.Event).filter(models.Event.id == event_id).first()\n event.id = event_id\n event.title = title\n event.ccode = ccode\n event.start = start\n event.end = end\n event.desc = desc\n DB.session.commit()\n emit_events_to_calender(\"recieve all events\", ccode)\n\n\ndef del_event(event_id, ccode):\n \"\"\"\n Deletes an event and returns the id of the deleted event.\n \"\"\"\n DB.session.query(models.Event).filter(models.Event.id == event_id).delete()\n DB.session.commit()\n\n\ndef delete_cal(ccode):\n \"\"\"\n Deletes an cal\n \"\"\"\n print(\"Deleting all events!\")\n print(\":GOT HERE GOT HERE GOT HERE GOT HERE\")\n for record in (\n DB.session.query(models.Event).filter(models.Event.ccode.contains(ccode)).all()\n ):\n print(record)\n DB.session.query(models.Calendars).filter(models.Calendars.ccode == ccode).delete()\n DB.session.commit()\n\n\ndef add_calendar_for_user(userid, priv_flag):\n\n \"\"\"\n adds an event, returns the ccode of the new calendar\n \"\"\"\n added_calendar = models.Calendars(userid, priv_flag)\n DB.session.add(added_calendar)\n DB.session.commit()\n return added_calendar.ccode\n\n\ndef get_sid():\n \"\"\"\n returns sid.\n \"\"\"\n return flask.request.sid\n\n\ndef emit_events_to_calender(channel, cal_code):\n \"\"\"\n Emits all calendar events along channel\n \"\"\"\n sid = get_sid()\n all_events = []\n for ccode in cal_code:\n events_for_ccode = [\n {\n \"start\": record.start,\n \"end\": record.end,\n \"title\": record.title,\n \"ccode\": record.ccode,\n \"eventid\": record.id,\n }\n for record in DB.session.query(models.Event)\n .filter(models.Event.ccode.contains([ccode]))\n .all()\n ]\n all_events.extend(events_for_ccode)\n\n all_events = list(unique_everseen(all_events))\n # for event in all_events:\n # print(event)\n SOCKETIO.emit(channel, all_events, room=sid)\n\n\ndef rfc3339_to_unix(timestamp):\n \"\"\"\n Convert to unix time.\n \"\"\"\n _date_obj = iso8601.parse_date(timestamp)\n _date_unix = _date_obj.timestamp()\n return _date_unix\n\n\ndef emit_ccode_to_calender(channel, ccodes):\n \"\"\"\n emits details about a ccode\n \"\"\"\n sid = get_sid()\n ccode_details = {}\n for ccode in ccodes:\n record = (\n DB.session.query(models.Calendars)\n .filter(models.Calendars.ccode == ccode)\n .first()\n )\n details_for_ccode = {\n \"userid\": record.userid,\n \"private\": record.private,\n }\n ccode_details[record.ccode] = details_for_ccode\n\n if len(ccode_details) > 0:\n SOCKETIO.emit(channel, ccode_details, room=sid)\n\n\ndef exists_in_auth_user(check_id):\n \"\"\"\n Check to see if the auth user is there\n \"\"\"\n return (\n DB.session.query(models.AuthUser.userid).filter_by(userid=check_id).scalar()\n is not None\n )\n\n\ndef exists_in_calender(merge_code):\n \"\"\"\n Check to see if merge calendar exists\n \"\"\"\n return (\n DB.session.query(models.Calendars.ccode).filter_by(ccode=merge_code).scalar()\n is not None\n )\n\n\n##SOCKET EVENTS\n@SOCKETIO.on(\"connect\")\ndef on_connect():\n \"\"\"\n Runs on connect.\n \"\"\"\n # print(\"Someone connected!\")\n\n\n@SOCKETIO.on(\"disconnect\")\ndef on_disconnect():\n \"\"\"\n Runs on disconnect.\n \"\"\"\n # print(\"Someone disconnected!\")\n\n\n@SOCKETIO.on(\"new google user\")\ndef on_new_google_user(data):\n \"\"\"\n Runs verification on google token.\n \"\"\"\n print(\"Beginning to authenticate data: \", data)\n sid = get_sid()\n try:\n credentials = client.credentials_from_clientsecrets_and_code(\n CLIENT_SECRET_FILE,\n [\"https://www.googleapis.com/auth/calendar.readonly\", \"profile\", \"email\"],\n data[\"code\"],\n )\n print(\"Verified user. Proceeding to check database.\")\n userid = credentials.id_token[\"sub\"]\n email = credentials.id_token[\"email\"]\n name = credentials.id_token[\"name\"]\n exists = exists_in_auth_user(userid)\n if not exists:\n push_new_user_to_database(userid, name, email)\n add_calendar_for_user(userid, False)\n all_ccodes = [\n record.ccode\n for record in DB.session.query(models.Calendars)\n .filter_by(userid=userid)\n .all()\n ]\n SOCKETIO.emit(\n \"Verified\",\n {\n \"name\": name,\n \"ccodes\": all_ccodes,\n \"userid\": userid,\n \"access_token\": credentials.access_token,\n },\n room=sid,\n )\n #\n # print(\"printing all CCODES\")\n # print(all_ccodes)\n #\n return userid\n except ValueError:\n # Invalid token\n print(\"Could not verify token.\")\n return \"Unverified.\"\n except KeyError:\n print(\"Malformed token.\")\n return \"Unverified.\"\n\n\n@SOCKETIO.on(\"delete calendar\")\ndef on_delete_cal(data):\n \"\"\"\n add a new event for to calendar\n \"\"\"\n print(\"not updating?\")\n delete_cal(data[\"ccode\"])\n\n\n@SOCKETIO.on(\"add calendar\")\ndef on_add_calendar(data):\n \"\"\"\n add a new calendar for user\n \"\"\"\n print(data)\n userid = data[\"userid\"]\n private = data[\"privateCal\"]\n ccode_list = data[\"ccode_list\"]\n print(ccode_list)\n print(private)\n ccode = add_calendar_for_user(userid, private)\n print(\n \"Added calendar for user \",\n userid,\n \"With ccode \",\n ccode,\n \" Private flag: \",\n private,\n )\n added_event_id = add_event(\n [ccode], \"Created Calendar At\", 946688461, 946688461, \"some words\"\n )\n print(added_event_id)\n ccode_list.append(ccode)\n emit_events_to_calender(\"recieve all events\", ccode_list)\n\n SOCKETIO.emit(\n \"update dropdown\",\n {\n \"ccode\": ccode,\n },\n )\n\n\n@SOCKETIO.on(\"get events\")\ndef send_events_to_calendar(data):\n \"\"\"\n send_events_to_calendar.\n \"\"\"\n print(\"LOOKING FOR CALCODE: \", data)\n # EMIT EVENTS TO CALENDAR\n emit_events_to_calender(\"recieve all events\", data)\n print(\"SENT EVENTS!\")\n\n\n@SOCKETIO.on(\"get ccode details\")\ndef send_ccode_to_calendar(data):\n \"\"\"\n send_ccode_to_calendar.\n \"\"\"\n print(\"getting details for ccode: \", data)\n emit_ccode_to_calender(\"recieve ccode details\", data)\n print(\"SENT ccode!\")\n\n\n@SOCKETIO.on(\"new event\")\ndef on_new_event(data):\n \"\"\"\n add a new event for to calendar\n \"\"\"\n print(data)\n title = data[\"title\"]\n start = data[\"start\"]\n end = data[\"end\"]\n ccode = data[\"ccode\"]\n print(start)\n print(end)\n added_event_id = add_event([ccode], title, start, end, \"some words\")\n print(\"SENDING INDIVIDUAL EVENT\")\n SOCKETIO.emit(\n \"calender_event\",\n {\n \"title\": title,\n \"start\": start,\n \"end\": end,\n \"eventid\": added_event_id,\n \"ccode\": [ccode],\n },\n room=get_sid(),\n )\n return added_event_id\n\n\n@SOCKETIO.on(\"modify event\")\ndef on_modify_event(data):\n \"\"\"\n add a new event for to calendar\n \"\"\"\n print(data)\n title = data[\"title\"]\n start = data[\"start\"]\n end = data[\"end\"]\n ccode = data[\"ccode\"]\n event_id = data[\"event_id\"]\n ccode_list = data[\"ccode_list\"]\n print(start)\n print(end)\n mod_event([ccode], title, start, end, \"some words\", event_id)\n # EMIT EVENTS TO CALENDAR\n print(\"print form mod event\")\n print(type(ccode_list))\n emit_events_to_calender(\"recieve all events\", ccode_list)\n\n\n@SOCKETIO.on(\"delete event\")\ndef on_delete_event(data):\n \"\"\"\n add a new event for to calendar\n \"\"\"\n print(\"Recieved request to delete event.\")\n print(data)\n event_id = data[\"event_id\"]\n ccode = data[\"ccode\"]\n ccode_list = data[\"ccode_list\"]\n del_event(event_id, [ccode])\n # EMIT EVENTS TO CALENDAR\n print(\"emit form delete event\")\n emit_events_to_calender(\"recieve all events\", ccode_list)\n\n\n@SOCKETIO.on(\"cCodeToMerge\")\ndef on_merge_calendar(data):\n \"\"\"\n merge calendar\n \"\"\"\n ccode_list = data[\"ccode_list\"]\n merge_code = int(data[\"userToMergeWith\"])\n print(\"LOOKING FOR CALCODE\", data[\"userToMergeWith\"])\n cal_code = int(data[\"currentUser\"])\n exists = exists_in_calender(merge_code)\n try:\n if not exists:\n raise ValueError\n for record in (\n DB.session.query(models.Event)\n .filter(models.Event.ccode.contains([merge_code]))\n .all()\n ):\n if cal_code not in record.ccode:\n record.ccode.append(cal_code)\n DB.session.commit()\n\n print(\"cal_code appended\")\n ccode_list.append(merge_code)\n print(ccode_list)\n\n emit_events_to_calender(\"recieve all events\", ccode_list)\n except ValueError:\n print(\n \"Ccode does not exist, or you have attempted to merge with a private calendar.\"\n )\n\n\n@SOCKETIO.on(\"Import Calendar\")\ndef on_import_calendar(data):\n \"\"\"\n import primary google calendar for user\n \"\"\"\n access_token = data[\"accessToken\"]\n private = data[\"privateCal\"]\n userid = data[\"userid\"]\n ccode_list = data[\"ccode_list\"]\n creds = client.AccessTokenCredentials(access_token, \"my-user-agent/1.0\")\n service = build(\"calendar\", \"v3\", credentials=creds)\n print(\"Getting all primary calendar events\")\n events_result = (\n service.events()\n .list(calendarId=\"primary\", singleEvents=True, orderBy=\"startTime\")\n .execute()\n )\n events = events_result.get(\"items\", [])\n if not events:\n print(\"No upcoming events found. Initializing empty calendar.\")\n ccode = add_calendar_for_user(userid, private)\n ccode_list.append(ccode)\n for event in events:\n if (\n event[\"start\"].get(\"dateTime\") is None\n or event[\"end\"].get(\"dateTime\") is None\n ):\n continue\n start = int(rfc3339_to_unix(str(event[\"start\"].get(\"dateTime\"))))\n end = int(rfc3339_to_unix(str(event[\"end\"].get(\"dateTime\"))))\n try:\n title = (\n (event[\"summary\"][:117] + \"..\")\n if len(event[\"summary\"]) > 117\n else event[\"summary\"]\n )\n except KeyError:\n event[\"summary\"] = \"No Title\"\n try:\n desc = (\n (event[\"description\"][:117] + \"..\")\n if len(event[\"description\"]) > 117\n else event[\"description\"]\n )\n except KeyError:\n desc = \"some desc\"\n added_event_id = add_event([ccode], title, start, end, desc)\n print(added_event_id)\n emit_events_to_calender(\"recieve all events\", ccode_list)\n SOCKETIO.emit(\n \"update dropdown\",\n {\n \"ccode\": ccode,\n },\n )\n\n\n@SOCKETIO.on(\"modify calendar\")\ndef on_modify_calendar(data):\n \"\"\"\n modify calendar\n \"\"\"\n print(data)\n ccode = data[\"ccode\"]\n private = data[\"privateCal\"]\n del_cal = data[\"deleteCal\"]\n all_ccodes = data[\"allCcodes\"]\n\n print(ccode)\n calendar = (\n DB.session.query(models.Calendars)\n .filter(models.Calendars.ccode == ccode)\n .first()\n )\n if calendar:\n if del_cal:\n for record in (\n DB.session.query(models.Event)\n .filter(models.Event.ccode.contains([ccode]))\n .all()\n ):\n if record.ccode[0] == ccode:\n del_event(record.id, ccode)\n DB.session.query(models.Calendars).filter(\n models.Calendars.ccode == ccode\n ).delete()\n elif private:\n calendar.private = True\n elif private:\n calendar.private = False\n DB.session.commit()\n emit_events_to_calender(\"recieve all events\", all_ccodes)\n\n\n@APP.route(\"/\")\ndef hello():\n \"\"\"\n Runs at page-load.\n\n \"\"\"\n models.DB.create_all()\n DB.session.commit()\n print(\"User has joined.\")\n return flask.render_template(\"index.html\")\n\n\nif __name__ == \"__main__\":\n SOCKETIO.run(\n APP,\n host=os.getenv(\"IP\", \"0.0.0.0\"),\n port=int(os.getenv(\"PORT\", \"8080\")),\n debug=True,\n )\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":14070,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"518681432","text":"from pathlib import Path\r\n\r\nimport folders\r\nimport tags\r\nimport utils\r\n\r\n\r\nclass MainProgram():\r\n SOURCE_PATH = Path('./files')\r\n DESTINATION_PATH = Path('./result/')\r\n\r\n def __init__(self):\r\n self.folders = folders.Folders()\r\n self.tags = tags.Tags()\r\n self.utils = utils.Utils()\r\n\r\n def retag_files(self):\r\n mp3_files = self.folders.retrieve_mp3_files(self.SOURCE_PATH)\r\n\r\n for mp3_file in mp3_files:\r\n complete_file_path = self.folders.join(self.SOURCE_PATH, mp3_file.name)\r\n\r\n mp3 = self.tags.open_mp3_file(complete_file_path)\r\n\r\n tags = self.tags.read_tags(mp3)\r\n if len(tags) != 2 or not tags.get('song', False):\r\n tags = self.tags.get_artist_and_song_from_file_name(mp3_file)\r\n tags = self.utils.capitalize(tags)\r\n\r\n source_file_path = self.folders.join(\r\n self.SOURCE_PATH,\r\n mp3_file.name,\r\n )\r\n\r\n new_path = self.folders.copy_file(\r\n source_file_path,\r\n self.DESTINATION_PATH,\r\n '{}.mp3'.format(tags.get('song', mp3_file.name)),\r\n )\r\n\r\n new_mp3_file = self.tags.open_mp3_file(new_path)\r\n self.tags.write_artist_and_song_tags(new_mp3_file, tags)\r\n\r\n\r\nif __name__ == '__main__':\r\n main = MainProgram()\r\n main.retag_files()\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"66663418","text":"#!/usr/bin/env python3\n#\n# Copyright 2021 Graviti. Licensed under MIT License.\n#\n# pylint: disable=invalid-name\n\n\"\"\"Dataloader of the DogsVsCats dataset.\"\"\"\n\nimport os\n\nfrom ...dataset import Data, Dataset\nfrom ...label import Classification\nfrom .._utility import glob\n\nDATASET_NAME = \"Dogs vs Cats\"\n_SEGMENTS = {\"train\": True, \"test\": False}\n\n\ndef DogsVsCats(path: str) -> Dataset:\n \"\"\"Dataloader of the DogsVsCats dataset.\n\n Arguments:\n path: The root directory of the dataset.\n The file structure should be like::\n\n \n train/\n cat.0.jpg\n ...\n dog.0.jpg\n ...\n test/\n 1000.jpg\n 1001.jpg\n ...\n\n Returns:\n Loaded ``Dataset`` object.\n\n \"\"\"\n root_path = os.path.abspath(os.path.expanduser(path))\n dataset = Dataset(DATASET_NAME)\n dataset.load_catalog(os.path.join(os.path.dirname(__file__), \"catalog.json\"))\n\n for segment_name, is_labeled in _SEGMENTS.items():\n segment = dataset.create_segment(segment_name)\n image_paths = glob(os.path.join(root_path, segment_name, \"*.jpg\"))\n for image_path in image_paths:\n data = Data(image_path)\n if is_labeled:\n data.label.classification = Classification(os.path.basename(image_path)[:3])\n segment.append(data)\n\n return dataset\n","sub_path":"tensorbay/opendataset/DogsVsCats/loader.py","file_name":"loader.py","file_ext":"py","file_size_in_byte":1505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"323238721","text":"import os\nimport sys\nimport gzip\nimport glob\nfrom errno import EEXIST\nfrom contextlib import closing\nfrom shutil import copyfileobj\nfrom contextlib import contextmanager\n\ntry:\n from urllib.request import urlopen\n from urllib.error import URLError\nexcept ImportError:\n from urllib2 import urlopen, URLError\nimport subprocess as sp\nimport wgetter\nfrom six import iteritems\n\nfrom .logger import logger\n\n\ndef mkdir_p(path_to_dir):\n \"\"\"Make directory(ies).\n\n This function behaves like mkdir -p.\n\n Args:\n path_to_dir (:obj:`str`): Path to the directory to make.\n \"\"\"\n try:\n os.makedirs(path_to_dir)\n except OSError as e: # Python >2.5\n if e.errno == EEXIST and os.path.isdir(path_to_dir):\n logger.debug(\n \"Directory %s already exists. Skipping.\" % path_to_dir)\n else:\n raise e\n\n\ndef download_aspera(url, dest_path, user=\"anonftp\",\n ftp=\"ftp-trace.ncbi.nlm.nih.gov\"):\n \"\"\"Download file with Aspera Connect.\n\n For details see the documentation ov Aspera Connect\n\n Args:\n url (:obj:`str`): URL to the file\n dest_path (:obj:`str`): Destination path.\n user (:obj:`str`, optional): User. Defaults to anonftp.\n ftp (:obj:`str`, optional): FTP path. Defaults to\n \"ftp-trace.ncbi.nlm.nih.gov\".\n \"\"\"\n logger.info(\"Downloading {} using aspera\\n\".format(url))\n aspera_home = os.environ.get(\"ASPERA_HOME\", None)\n if not aspera_home:\n raise ValueError(\"environment variable $ASPERA_HOME not set\")\n if not os.path.exists(aspera_home):\n raise ValueError(\n \"$ASPERA_HOME directory {} does not exist\".format(aspera_home))\n ascp = os.path.join(aspera_home, \"connect/bin/ascp\")\n key = os.path.join(aspera_home, \"connect/etc/asperaweb_id_dsa.openssh\")\n if not os.path.exists(ascp):\n raise ValueError(\"could not find ascp binary\")\n if not os.path.exists(key):\n raise ValueError(\"could not find openssh key\")\n\n if url.startswith(\"ftp://\"):\n url = url.replace(\"ftp://\", \"\")\n url = url.replace(ftp, \"\")\n\n cmd = \"{} -i {} -k1 -T -l400m {}@{}:{} {}\".format(\n ascp, key, user, ftp, url, dest_path)\n logger.debug(cmd)\n p = sp.Popen(cmd, shell=True, stdout=sp.PIPE, stderr=sp.PIPE)\n stdout, stderr = p.communicate()\n\n\ndef download_from_url(url, destination_path, force=False, aspera=False,\n silent=False):\n \"\"\"Download file from remote server\n\n Args:\n url (:obj:`str`): Path to the file on remote server (including file\n name)\n destination_path (:obj:`str`): Path to the file on local machine\n (including file name)\n force (:obj:`bool`): If file exist force to overwrite it. Defaults to\n False.\n aspera (:obj:`bool`): Download with Aspera Connect. Defaults to False.\n silent (:obj:`bool`): Do not print any message. Defaults to False.\n \"\"\"\n try:\n is_already_downloaded = os.path.isfile(destination_path)\n if is_already_downloaded:\n if force:\n if not silent:\n logger.info(\n \"Downloading %s to %s\" % (url, destination_path))\n fn = wgetter.download(url, outdir=os.path.dirname(\n destination_path))\n else:\n with closing(urlopen(url)) as r:\n with open(destination_path, mode='wb') as f:\n copyfileobj(r, f)\n else:\n logger.info(\"File already exist. Use force=True if you would \"\n \"like to overwrite it.\")\n else:\n if aspera:\n download_aspera(url, destination_path)\n else:\n if not silent:\n logger.info(\n \"Downloading %s to %s\" % (url, destination_path))\n fn = wgetter.download(url, outdir=os.path.dirname(\n destination_path))\n else:\n with closing(urlopen(url)) as r:\n with open(destination_path, mode='wb') as f:\n copyfileobj(r, f)\n except URLError:\n logger.error(\"Cannot find file %s\" % url)\n\n\ndef download_unpack_SRA_for_parallel(args):\n '''\n Auxiliary function for parallel download of sra.\n '''\n return download_unpack_SRA(*args)\n\ndef download_unpack_SRA(path, ftpaddres, directory_path,\n filetype='fasta', force=False, aspera=False, silent=False,\n fastq_dump_options=None, keep_sra=False):\n '''\n Combination of download_from_url for sra and unpacking with fastq-dump.\n\n :param path: downloaed path\n :param ftpaddres: ftp address\n :param directory_path: target local directory\n :param filetype: 'fastq' or 'fasta' for fastq-dump\n :param force: overwrite existing sra?\n :param aspera: download with aspera\n :param silent: supress wgetter log (get rid of enormous log file)\n :param fastq_dump_options: options for fastq-dump, see .download_SRA description\n :param keep_sra: keep original sra for later use\n :return: downloaded paths (note that if sequencing is pair-ended it might generate list of output files)\n '''\n mkdir_p(os.path.abspath(directory_path))\n\n sra_run = path.split(\"/\")[-1]\n logger.info(\"Analysing %s\" % sra_run)\n url = ftpaddres.format(range_subdir=sra_run[:6],\n file_dir=sra_run)\n logger.debug(\"URL: %s\", url)\n filepath = os.path.abspath(\n os.path.join(directory_path, \"%s.sra\" % sra_run))\n download_from_url(url, filepath, aspera=aspera, silent=silent, force=force)\n\n if filetype in [\"fasta\", \"fastq\"]:\n if which('fastq-dump') is None:\n raise NoSRAToolkitException(\n \"fastq-dump command not found\")\n ftype = \"\"\n if filetype == \"fasta\":\n ftype = \" --fasta \"\n cmd = \"fastq-dump\"\n for fqoption, fqvalue in iteritems(fastq_dump_options):\n if fqvalue:\n cmd += (\" --%s %s\" % (fqoption, fqvalue))\n else:\n cmd += (\" --%s\" % fqoption)\n cmd += \" %s --outdir %s %s\"\n cmd = cmd % (ftype, directory_path, filepath)\n logger.debug(cmd)\n process = sp.Popen(cmd, stdout=sp.PIPE,\n stderr=sp.PIPE,\n shell=True)\n logger.info(\"Converting to %s/%s*.%s.gz\\n\" % (\n directory_path, sra_run, filetype))\n pout, perr = process.communicate()\n downloaded_path = glob.glob(os.path.join(\n directory_path,\n \"%s*.%s.gz\" % (sra_run, filetype)\n ))\n\n if not keep_sra and filetype != 'sra':\n # Delete sra file\n os.unlink(filepath)\n #else:\n # downloaded_path = None\n\n return downloaded_path\n\n@contextmanager\ndef smart_open(filepath):\n \"\"\"Open file intelligently depending on the source and python version.\n\n Args:\n filepath (:obj:`str`): Path to the file.\n\n Yields:\n Context manager for file handle.\n\n \"\"\"\n if filepath[-2:] == \"gz\":\n mode = \"rt\"\n fopen = gzip.open\n else:\n mode = \"r\"\n fopen = open\n if sys.version_info[0] < 3:\n fh = fopen(filepath, mode)\n else:\n fh = fopen(filepath, mode, errors=\"ignore\")\n try:\n yield fh\n except IOError:\n fh.close()\n finally:\n fh.close()\n\n\ndef which(program):\n \"\"\"Check if executable exists.\n\n The code is taken from:\n https://stackoverflow.com/questions/377017/test-if-executable-exists-in-python\n Args:\n program (:obj:`str`): Path to the executable.\n\n Returns:\n :obj:`str` or :obj:`None`: Path to the program or None.\n\n \"\"\"\n\n def is_exe(fpath):\n \"\"\"Check if fpath is executable.\"\"\"\n return os.path.isfile(fpath) and os.access(fpath, os.X_OK)\n\n fpath, fname = os.path.split(program)\n if fpath:\n if is_exe(program):\n return program\n else:\n for path in os.environ[\"PATH\"].split(os.pathsep):\n path = path.strip('\"')\n exe_file = os.path.join(path, program)\n if is_exe(exe_file):\n return exe_file\n\n return None\n","sub_path":"GEOparse/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":8347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"389188165","text":"File = open(\"text.txt\").read()\nList = []\nfor i in range(0,len(File)-3):\n valid = False\n for a in range(1,4):\n if File[i+3].islower()==True:\n if File[(i+3)-a].isupper()!=True:\n if File[(i+3)+a].isupper()!=True:\n valid = True\n if valid == True:\n List.append(File[i+3])\nprint(*List)\n# 1 lower case letter surounded by EXACTLY 3 upper case letters.\n# XXX x XXX\n","sub_path":"Python/!PYTHON/PYTHONchallenges/Python/python challenges/3/challenge 3.py","file_name":"challenge 3.py","file_ext":"py","file_size_in_byte":425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"422428850","text":"from flask_wtf import FlaskForm\nfrom flask_wtf.file import FileField, FileAllowed #used for image uploading\nfrom wtforms import StringField, PasswordField, SubmitField, BooleanField, \\\n TextAreaField, FloatField#DateTimeField, DateField, TimeField\nfrom wtforms.validators import DataRequired, Length, Email, EqualTo, ValidationError\nfrom wtforms_sqlalchemy.fields import QuerySelectField\n\n\nclass EmployeeForm(FlaskForm):\n name = StringField('Employee Name',validators=[DataRequired()])\n submit = SubmitField('Add Employee')\n\n\n\nclass AddRestaurantForm(FlaskForm):\n name = StringField('Employee Name',validators=[DataRequired()])\n submit = SubmitField('Add Restaurant')\n\n\nclass DatabaseForm(FlaskForm):\n excelFile = FileField('excelFile', validators = [FileAllowed(['xlsx'])])\n uploadExcel = SubmitField('Upload Excel File')\n\n#not used:\nclass AddRoleForm(FlaskForm):\n role = StringField('Role')\n wage = FloatField('Wage')\n tipPercentage = FloatField('Tip Percentage')\n notes = TextAreaField('notes')","sub_path":"dmrApp/main/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"563994553","text":"# -*- coding: utf8 -*-\n# Traditional Credit Scoring Using Logistic Regression\nfrom __future__ import print_function\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\nimport scorecardpy as sc\n\npd.set_option('display.max_columns', 500)\nsave_path = 'C:/Users/hjse7/Documents/github/scorecardpy/'\n\nif __name__ == '__main__':\n # data prepare ------\n # load germancredit data\n dat = sc.germancredit()\n # filter variable via missing rate, iv, identical value rate\n dt_s = sc.var_filter(dat, y=\"creditability\")\n\n # breaking dt into train and test\n train, test = sc.split_df(dt_s, 'creditability').values()\n\n # woe binning ------\n bins = sc.woebin(dt_s, y=\"creditability\")\n # print bins\n\n plotlist = sc.woebin_plot(bins)\n\n # # save binning plot\n # for key,i in plotlist.items():\n # i.show()\n # i.savefig(save_path + str(key)+'.png')\n\n # binning adjustment\n # # adjust breaks interactively\n # breaks_adj = sc.woebin_adj(dt_s, \"creditability\", bins)\n # # or specify breaks manually\n breaks_adj = {\n 'age.in.years': [26, 35, 40],\n 'other.debtors.or.guarantors': [\"none\", \"co-applicant%,%guarantor\"]\n }\n bins_adj = sc.woebin(dt_s, y=\"creditability\", breaks_list=breaks_adj)\n # print bins_adj\n\n # converting train and test into woe values\n train_woe = sc.woebin_ply(train, bins_adj)\n test_woe = sc.woebin_ply(test, bins_adj)\n # print (train_woe)\n\n\n y_train = train_woe.loc[:,'creditability']\n X_train = train_woe.loc[:,train_woe.columns != 'creditability']\n y_test = test_woe.loc[:,'creditability']\n X_test = test_woe.loc[:,train_woe.columns != 'creditability']\n\n # logistic regression ------\n from sklearn.linear_model import LogisticRegression\n lr = LogisticRegression(penalty='l1', C=0.9, solver='saga', n_jobs=-1)\n # print (X_train)\n lr.fit(X_train, y_train)\n # print (lr.coef_)\n # print (lr.intercept_)\n\n # predicted proability\n train_pred = lr.predict_proba(X_train)[:,1]\n test_pred = lr.predict_proba(X_test)[:,1]\n # performance ks & roc ------\n train_perf = sc.perf_eva(y_train, train_pred, title = \"train\")\n test_perf = sc.perf_eva(y_test, test_pred, title = \"test\")\n\n # score ------\n card = sc.scorecard(bins_adj, lr, X_train.columns)\n for key, value in card.iteritems():\n print(key)\n print('-' * 20)\n print(value)\n print()\n # print(card)\n # credit score\n train_score = sc.scorecard_ply(train, card, print_step=0)\n test_score = sc.scorecard_ply(test, card, print_step=0)\n\n # psi\n sc.perf_psi(\n score = {'train':train_score, 'test':test_score},\n label = {'train':y_train, 'test':y_test}\n )","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"300376541","text":"import csv\nfrom django.shortcuts import render\nfrom django.views.generic import TemplateView\nfrom django.core.files.storage import FileSystemStorage\n\nfrom .models import Disabled_ch, Notice_nocookie, RollFile, HTMLpage\n\n\nfrom .GsuiteUsers import GUser, GUser_school\n\n# Create your views here.\n\n\ndef nocookie(request):\n template_name = \"youtube_nocookie.html\"\n context = {}\n chs = Disabled_ch.objects.filter(is_noticed=True)\n context['chs'] = chs\n notice = Notice_nocookie.objects.all()\n if notice:\n context['notice'] = notice[0]\n\n if request.method == \"POST\":\n ch_val = request.POST.get('new_ch')\n if Disabled_ch.objects.filter(ch_name=ch_val):\n exist_ch = Disabled_ch.objects.get(ch_name=ch_val)\n exist_ch.is_noticed = True\n exist_ch.save()\n else:\n new_ch = Disabled_ch(ch_name=ch_val)\n new_ch.save()\n\n return render(request, template_name, context)\n\n\ndef GsuiteConvertor(request):\n template_name = \"g-suite_convertor.html\"\n context = {}\n\n if request.method == 'POST':\n file = request.FILES['roll_file']\n school = request.POST.get(\"school\")\n s_info = valid_G(school, file)\n if s_info:\n grade = request.POST.get(\"grade\")\n classN = request.POST.get(\"classN\")\n file_name = f'{school}{grade}-{classN}_user.csv'\n roll_file = RollFile(title=file_name,\n roll_file=file)\n roll_file.roll_file.name = file_name\n if request.POST.get('whole_school'):\n guser = GUser_school(file, s_info)\n else:\n guser = GUser(file, s_info, grade, classN)\n roll_file.save()\n context['result'] = guser.file_url\n else:\n context['errors'] = '학교명 혹은 파일이 올바르지 않습니다.'\n return render(request, template_name, context)\n\n\ndef valid_G(school, file):\n s_info = ''\n with open('staticfiles/G-suite/cbe_school_info.csv', 'r', newline='') as csvfile:\n reader = csv.reader(csvfile, delimiter=',')\n for row in reader:\n if row[1] == school:\n s_info = [row[0], school, row[2].split(\"@\")[0]]\n break\n\n if s_info:\n # Should change .csv to .xlsx when deploy\n if file.size < 100000 and '.xlsx' in file.name:\n return s_info\n print(f'{file.name}: {file.size}byte')\n\n return False\n\n\ndef load_html(request, **kwargs):\n template_name = \"htmlpage.html\"\n context = {}\n # context['page'] = HTMLpage.objects.get(title='구구단').page\n context['page'] = HTMLpage.objects.get(id=kwargs['pk']).page\n\n return render(request, template_name, context)\n\n\ndef load_last_html(request):\n template_name = \"pages.html\"\n context = {}\n\n context['pages'] = HTMLpage.objects.order_by('-id')\n\n return render(request, template_name, context)\n\n# def load_last_html(request):\n# template_name = \"htmlpage.html\"\n# context = {}\n# # context['page'] = HTMLpage.objects.get(title='구구단').page\n# context['page'] = HTMLpage.objects.order_by('-id')[0].page\n\n# return render(request, template_name, context)\n","sub_path":"etc/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"536285957","text":"import json\n\nfrom django.urls import reverse\nfrom rest_framework import status\nfrom rest_framework.test import APITestCase\nfrom medium.apps.core.factories import UserFactory\n\n\nclass ProfileViewTests(APITestCase):\n def setUp(self):\n self.user_data = {\"user\": {\"email\": \"rustan@demo.com\", \"password\": \"rustancorpuz\", \"username\": \"rustanstrife\"}}\n self.user = UserFactory(**self.user_data['user'])\n self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.user.token)\n\n def test_get_profile(self):\n user = UserFactory()\n url = reverse('profiles:profile', kwargs={'username': user.username})\n response = self.client.get(url)\n response_data = json.loads(response.content)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response_data['profile']['username'], user.username)\n self.assertEqual(response_data['profile']['bio'], user.profile.bio)\n self.assertTrue(response_data['profile']['image'])\n self.assertIsInstance(response_data['profile']['following'], bool)\n\n def test_follow_profile(self):\n user = UserFactory()\n url = reverse('profiles:follow', kwargs={'username': user.username})\n response = self.client.post(url, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertTrue(response.data['following'])\n self.assertEqual(response.data['username'], user.username)\n\n def test_follow_profile_no_auth(self):\n self.client.credentials()\n user = UserFactory()\n url = reverse('profiles:follow', kwargs={'username': user.username})\n response = self.client.post(url, format='json')\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)\n\n def test_unfollow_profile(self):\n user = UserFactory()\n self.user.profile.follow(user.profile)\n self.assertTrue(self.user.profile.is_following(user.profile))\n\n url = reverse('profiles:follow', kwargs={'username': user.username})\n response = self.client.delete(url, format='json')\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertFalse(response.data['following'])\n self.assertEqual(response.data['username'], user.username)\n\n\n","sub_path":"medium/apps/profiles/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":2292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"214883144","text":"\n\n#calss header\nclass _MONOCOTYLEDON():\n\tdef __init__(self,): \n\t\tself.name = \"MONOCOTYLEDON\"\n\t\tself.definitions = [u'a type of plant that produces flowers and has only one cotyledon (= leaf part inside the seed). Monocotyledons include daffodils and grasses.']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_monocotyledon.py","file_name":"_monocotyledon.py","file_ext":"py","file_size_in_byte":435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"311275552","text":"import os\nimport yaml\n\nfrom autumn import constants\nfrom autumn.plots.plots import validate_plot_config\nfrom autumn.post_processing.processor import validate_post_process_config\nfrom apps.covid_19.plots import load_plot_config as load_covid_plot_config\n\n\ndef load_params(run_dirpath: str) -> dict:\n \"\"\"\n Loads the run params from the app's data dir\n \"\"\"\n params_path = os.path.join(run_dirpath, \"params.yml\")\n with open(params_path, \"r\") as f:\n return yaml.safe_load(f)\n\n\ndef load_post_processing_config(app_name: str) -> dict:\n \"\"\"\n Loads the post-processing config from the app's code dir\n \"\"\"\n pp_config_path = os.path.join(constants.APPS_PATH, app_name, \"post-processing.yml\")\n with open(pp_config_path, \"r\") as f:\n config = yaml.safe_load(f)\n\n validate_post_process_config(config)\n return config\n\n\ndef load_plot_config(app_name: str, param_set_name: str) -> dict:\n \"\"\"\n Loads the plot config from the app's code dir\n \"\"\"\n if app_name in [\"covid_19\", \"dr_tb_malancha\"]:\n config = load_covid_plot_config(param_set_name)\n else:\n pp_config_path = os.path.join(constants.APPS_PATH, app_name, \"plots.yml\")\n with open(plot_config_path, \"r\") as f:\n config = yaml.safe_load(f)\n\n validate_plot_config(config)\n return config\n","sub_path":"autumn/plots/streamlit/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"499323896","text":"#\n# @lc app=leetcode.cn id=430 lang=python3\n#\n# [430] 扁平化多级双向链表\n#\n\n# @lc code=start\n\"\"\"\n# Definition for a Node.\nclass Node:\n def __init__(self, val, prev, next, child):\n self.val = val\n self.prev = prev\n self.next = next\n self.child = child\n\"\"\"\n\nclass Solution:\n def flatten(self, head: 'Node') -> 'Node':\n v = []\n def dfs(head):\n if(head==None):\n return\n v.append(head)\n dfs(head.child)\n dfs(head.next)\n dfs(head)\n n = len(v)\n for i in range(n):\n if(i+10):\n v[i].prev = v[i-1]\n v[i].child = None\n return head\n \n# @lc code=end\n\n","sub_path":"430.扁平化多级双向链表.py","file_name":"430.扁平化多级双向链表.py","file_ext":"py","file_size_in_byte":783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"597715240","text":"import gym\nfrom gym import spaces\nfrom ple import PLE\nimport numpy as np\n\nfrom PIL import Image\n\n\ndef state_preprocessor(game_dict):\n _, values = zip(*sorted(list(game_dict.items())))\n state = np.array(values)\n return state\n\n\nclass PLEEnv(gym.Env):\n metadata = {'render.modes': ['human', 'rgb_array']}\n\n def __init__(self, game_name='FlappyBird', display_screen=True, observe_state=False):\n # open up a game state to communicate with emulator\n import importlib\n game_module_name = ('ple.games.%s' % game_name).lower()\n game_module = importlib.import_module(game_module_name)\n game = getattr(game_module, game_name)()\n self.game_state = PLE(game, fps=30, display_screen=display_screen, state_preprocessor=state_preprocessor)\n self.game_state.init()\n self._action_set = self.game_state.getActionSet()\n self.action_space = spaces.Discrete(len(self._action_set))\n self.screen_width, self.screen_height = self.game_state.getScreenDims()\n if self.screen_height+self.screen_width > 500:\n img_scale = 0.25\n else:\n img_scale = 1.0\n self.screen_width = int(self.screen_width*img_scale)\n self.screen_height = int(self.screen_height*img_scale)\n self.observe_state = observe_state\n if self.observe_state:\n # the bounds are typically not infinity\n self.observation_space = spaces.Box(low=-float('inf'), high=float('inf'), shape=self.game_state.state_dim)\n else:\n self.observation_space = spaces.Box(low=0, high=255, shape=(self.screen_height, self.screen_width, 3))\n self.viewer = None\n\n def _step(self, a):\n reward = self.game_state.act(self._action_set[a])\n if self.observe_state:\n state = self.game_state.getGameState()\n else:\n state = self._get_image()\n terminal = self.game_state.game_over()\n return state, reward, terminal, {}\n\n def _resize_frame(self, frame):\n pil_image = Image.fromarray(frame)\n pil_image = pil_image.resize((self.screen_width, self.screen_height), Image.ANTIALIAS)\n return np.array(pil_image)\n\n def _get_image(self):\n image_rotated = np.fliplr(np.rot90(self.game_state.getScreenRGB(), 3)) # Hack to fix the rotated image returned by ple\n return self._resize_frame(image_rotated)\n\n @property\n def _n_actions(self):\n return len(self._action_set)\n\n # return: (states, observations)\n def _reset(self, **kwargs):\n self.observation_space = spaces.Box(low=0, high=255, shape=(self.screen_height, self.screen_width, 3))\n self.game_state.reset_game(**kwargs)\n if self.observe_state:\n state = self.game_state.getGameState()\n else:\n state = self._get_image()\n return state\n\n def _render(self, mode='human', close=False):\n if close:\n if self.viewer is not None:\n self.viewer.close()\n self.viewer = None\n return\n img = self._get_image()\n if mode == 'rgb_array':\n return img\n elif mode == 'human':\n from gym.envs.classic_control import rendering\n if self.viewer is None:\n self.viewer = rendering.SimpleImageViewer()\n self.viewer.imshow(img)\n\n def _seed(self, seed):\n rng = np.random.RandomState(seed)\n self.game_state.rng = rng\n self.game_state.game.rng = self.game_state.rng\n\n self.game_state.init()\n","sub_path":"gym_ple/ple_env.py","file_name":"ple_env.py","file_ext":"py","file_size_in_byte":3546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"16598065","text":"import discord\nfrom discord.ext import commands\nfrom elasticsearch import Elasticsearch\nfrom elasticsearch_dsl import Search, Q, DocType, Text, Float, Keyword\nfrom elasticsearch_dsl.connections import connections\nfrom monacle_scraper import MonacleScraper, Team\nfrom collections import defaultdict\nfrom requests.exceptions import HTTPError\nimport humanize\nimport datetime\nimport re\nimport uuid\nimport trainerdex\n\nRE_MENTION = re.compile('\\<@\\d+\\>')\n\nTEAM_COLORS = {\n 0: 0xD3D3D3,\n 1: 0x0000FF,\n 2: 0xFF0000,\n 3: 0xFFFF00\n}\n\nMOVES = defaultdict(lambda: '?', { #From https://github.com/Noctem/Monocle/blob/a2e3c61b2ddd7772ae3c62a6f252476cce0e804b/monocle/names.py#L261\n 1: 'Thunder Shock',\n 2: 'Quick Attack',\n 3: 'Scratch',\n 4: 'Ember',\n 5: 'Vine Whip',\n 6: 'Tackle',\n 7: 'Razor Leaf',\n 8: 'Take Down',\n 9: 'Water Gun',\n 10: 'Bite',\n 11: 'Pound',\n 12: 'Double Slap',\n 13: 'Wrap',\n 14: 'Hyper Beam',\n 15: 'Lick',\n 16: 'Dark Pulse',\n 17: 'Smog',\n 18: 'Sludge',\n 19: 'Metal Claw',\n 20: 'Vice Grip',\n 21: 'Flame Wheel',\n 22: 'Megahorn',\n 23: 'Wing Attack',\n 24: 'Flamethrower',\n 25: 'Sucker Punch',\n 26: 'Dig',\n 27: 'Low Kick',\n 28: 'Cross Chop',\n 29: 'Psycho Cut',\n 30: 'Psybeam',\n 31: 'Earthquake',\n 32: 'Stone Edge',\n 33: 'Ice Punch',\n 34: 'Heart Stamp',\n 35: 'Discharge',\n 36: 'Flash Cannon',\n 37: 'Peck',\n 38: 'Drill Peck',\n 39: 'Ice Beam',\n 40: 'Blizzard',\n 41: 'Air Slash',\n 42: 'Heat Wave',\n 43: 'Twineedle',\n 44: 'Poison Jab',\n 45: 'Aerial Ace',\n 46: 'Drill Run',\n 47: 'Petal Blizzard',\n 48: 'Mega Drain',\n 49: 'Bug Buzz',\n 50: 'Poison Fang',\n 51: 'Night Slash',\n 52: 'Slash',\n 53: 'Bubble Beam',\n 54: 'Submission',\n 55: 'Karate Chop',\n 56: 'Low Sweep',\n 57: 'Aqua Jet',\n 58: 'Aqua Tail',\n 59: 'Seed Bomb',\n 60: 'Psyshock',\n 61: 'Rock Throw',\n 62: 'Ancient Power',\n 63: 'Rock Tomb',\n 64: 'Rock Slide',\n 65: 'Power Gem',\n 66: 'Shadow Sneak',\n 67: 'Shadow Punch',\n 68: 'Shadow Claw',\n 69: 'Ominous Wind',\n 70: 'Shadow Ball',\n 71: 'Bullet Punch',\n 72: 'Magnet Bomb',\n 73: 'Steel Wing',\n 74: 'Iron Head',\n 75: 'Parabolic Charge',\n 76: 'Spark',\n 77: 'Thunder Punch',\n 78: 'Thunder',\n 79: 'Thunderbolt',\n 80: 'Twister',\n 81: 'Dragon Breath',\n 82: 'Dragon Pulse',\n 83: 'Dragon Claw',\n 84: 'Disarming Voice',\n 85: 'Draining Kiss',\n 86: 'Dazzling Gleam',\n 87: 'Moonblast',\n 88: 'Play Rough',\n 89: 'Cross Poison',\n 90: 'Sludge Bomb',\n 91: 'Sludge Wave',\n 92: 'Gunk Shot',\n 93: 'Mud Shot',\n 94: 'Bone Club',\n 95: 'Bulldoze',\n 96: 'Mud Bomb',\n 97: 'Fury Cutter',\n 98: 'Bug Bite',\n 99: 'Signal Beam',\n 100: 'X-Scissor',\n 101: 'Flame Charge',\n 102: 'Flame Burst',\n 103: 'Fire Blast',\n 104: 'Brine',\n 105: 'Water Pulse',\n 106: 'Scald',\n 107: 'Hydro Pump',\n 108: 'Psychic',\n 109: 'Psystrike',\n 110: 'Ice Shard',\n 111: 'Icy Wind',\n 112: 'Frost Breath',\n 113: 'Absorb',\n 114: 'Giga Drain',\n 115: 'Fire Punch',\n 116: 'Solar Beam',\n 117: 'Leaf Blade',\n 118: 'Power Whip',\n 119: 'Splash',\n 120: 'Acid',\n 121: 'Air Cutter',\n 122: 'Hurricane',\n 123: 'Brick Break',\n 124: 'Cut',\n 125: 'Swift',\n 126: 'Horn Attack',\n 127: 'Stomp',\n 128: 'Headbutt',\n 129: 'Hyper Fang',\n 130: 'Slam',\n 131: 'Body Slam',\n 132: 'Rest',\n 133: 'Struggle',\n 134: 'Scald',\n 135: 'Hydro Pump',\n 136: 'Wrap',\n 137: 'Wrap',\n 200: 'Fury Cutter',\n 201: 'Bug Bite',\n 202: 'Bite',\n 203: 'Sucker Punch',\n 204: 'Dragon Breath',\n 205: 'Thunder Shock',\n 206: 'Spark',\n 207: 'Low Kick',\n 208: 'Karate Chop',\n 209: 'Ember',\n 210: 'Wing Attack',\n 211: 'Peck',\n 212: 'Lick',\n 213: 'Shadow Claw',\n 214: 'Vine Whip',\n 215: 'Razor Leaf',\n 216: 'Mud Shot',\n 217: 'Ice Shard',\n 218: 'Frost Breath',\n 219: 'Quick Attack',\n 220: 'Scratch',\n 221: 'Tackle',\n 222: 'Pound',\n 223: 'Cut',\n 224: 'Poison Jab',\n 225: 'Acid',\n 226: 'Psycho Cut',\n 227: 'Rock Throw',\n 228: 'Metal Claw',\n 229: 'Bullet Punch',\n 230: 'Water Gun',\n 231: 'Splash',\n 232: 'Water Gun',\n 233: 'Mud Slap',\n 234: 'Zen Headbutt',\n 235: 'Confusion',\n 236: 'Poison Sting',\n 237: 'Bubble',\n 238: 'Feint Attack',\n 239: 'Steel Wing',\n 240: 'Fire Fang',\n 241: 'Rock Smash',\n 242: 'Transform',\n 243: 'Counter',\n 244: 'Powder Snow',\n 245: 'Close Combat',\n 246: 'Dynamic Punch',\n 247: 'Focus Blast',\n 248: 'Aurora Beam',\n 249: 'Charge Beam',\n 250: 'Volt Switch',\n 251: 'Wild Charge',\n 252: 'Zap Cannon',\n 253: 'Dragon Tail',\n 254: 'Avalanche',\n 255: 'Air Slash',\n 256: 'Brave Bird',\n 257: 'Sky Attack',\n 258: 'Sand Tomb',\n 259: 'Rock Blast',\n 260: 'Infestation',\n 261: 'Struggle Bug',\n 262: 'Silver Wind',\n 263: 'Astonish',\n 264: 'Hex',\n 265: 'Night Shade',\n 266: 'Iron Tail',\n 267: 'Gyro Ball',\n 268: 'Heavy Slam',\n 269: 'Fire Spin',\n 270: 'Overheat',\n 271: 'Bullet Seed',\n 272: 'Grass Knot',\n 273: 'Energy Ball',\n 274: 'Extrasensory',\n 275: 'Future Sight',\n 276: 'Mirror Coat',\n 277: 'Outrage',\n 278: 'Snarl',\n 279: 'Crunch',\n 280: 'Foul Play',\n 281: 'Hidden Power'\n})\n\n\nclass Gym(DocType):\n title = Text(analyzer='snowball', fields={'raw': Keyword()})\n description = Text(analyzer='snowball')\n latitude = Float()\n longitude = Float()\n url = Text()\n image = Text()\n monacle_id = Text()\n\n class Meta:\n index = 'marker'\n\n \ndef format_list(items):\n if len(items) > 1:\n message = \", \".join([item for item in items[:-1]])+\" and {0}\".format(items[-1])\n else:\n message = \"{0}\".format(items[0])\n return message\n\ndef get_display_name(member):\n try:\n return trainerdex.Client().get_discord_user(member.id).owner().trainer(all_=False).username\n except:\n return member.display_name\n\nconnections.create_connection(hosts=['localhost'])\n\nclass Gyms:\n \"\"\"Pokemon Go Gyms!\"\"\"\n\n def __init__(self, bot):\n self.bot = bot\n self.client = Elasticsearch()\n self.monacle = MonacleScraper('https://kentpogomap.uk/raw_data', 'BIDoJSaHxR0Cz3mqJvI5kShtUc0CW/HPwK/CrRtEZhU=')\n self.going_users = defaultdict(set) # gym.id: list of users\n self.arrived_users = defaultdict(set) # gym.id: list of users\n self.users_going = {} # user_id: gym.id\n self.users_arrived = {} # user_id: gym.id\n self.user_groups = defaultdict(set) # user_id: list of users\n\t\t\n async def find_gym(self, gym):\n s = Search(using=self.client, index=\"marker\").query(\"match\", title={'query': gym, 'fuzziness': 2, 'slop': 1})\n response = s.execute()\n if response.hits.total == 0:\n await self.bot.say(\"I couldn't find that gym\")\n return None, None\n hit = response[0]\n monacle_gym = await self.get_monacle_gym(hit)\n return hit, monacle_gym\n\n async def get_monacle_gym(self, hit):\n return None\n\n @commands.command(pass_context=True)\n async def gym(self, ctx, *, gym: str):\n \"\"\"\n Lookup a gym, responds with an image, title, description and a google maps link.\n Gyms that have active raids are prioritized over gyms that do not.\n \"\"\"\n hit, monacle_gym = await self.find_gym(gym)\n if not hit:\n return\n description = \"{}\\n[Get Directions](https://www.google.com/maps/?daddr={},{})\".format(hit.description, hit.latitude, hit.longitude)\n embed=discord.Embed(title=hit.title, url='https://www.pokemongomap.info'+hit.url, description=description)\n embed.set_thumbnail(url=hit.image)\n if monacle_gym:\n embed.set_image(url='https://maps.googleapis.com/maps/api/staticmap?center={0},{1}&zoom=15&size=250x125&maptype=roadmap&markers=color:{3}%7C{0},{1}&key={2}'.format(hit.latitude, hit.longitude, 'AIzaSyCEadifeA8X02v2OKv-orZWm8nQf1Q2EZ4', \"0x{:02X}\".format(TEAM_COLORS[monacle_gym.team])))\n embed.color = TEAM_COLORS[monacle_gym.team]\n if monacle_gym.slots_available > 0:\n embed.add_field(name='Slots available', value=monacle_gym.slots_available)\n embed.add_field(name='Owned by', value=monacle_gym.team_name)\n if monacle_gym.raid_start and monacle_gym.raid_start <= datetime.datetime.now() and monacle_gym.raid_end >= datetime.datetime.now():\n embed.add_field(name='Raid level', value=monacle_gym.raid_level)\n embed.add_field(name='Raid Pokemon', value=monacle_gym.raid_pokemon.name)\n embed.add_field(name='CP', value=monacle_gym.raid_pokemon.cp)\n embed.add_field(name='Moveset', value=MOVES[monacle_gym.raid_pokemon.move_1]+' / '+MOVES[monacle_gym.raid_pokemon.move_2])\n embed.add_field(name='Started at', value=monacle_gym.raid_start.strftime(\"%H:%M:%S\"))\n embed.add_field(name='Ends at', value=\"{} ({})\".format(monacle_gym.raid_end.strftime(\"%H:%M:%S\"), humanize.naturaltime(datetime.datetime.now()-monacle_gym.raid_end)))\n else:\n embed.set_image(url='https://maps.googleapis.com/maps/api/staticmap?center={0},{1}&zoom=15&size=250x125&maptype=roadmap&markers=color:{3}%7C{0},{1}&key={2}'.format(hit.latitude, hit.longitude, 'AIzaSyCEadifeA8X02v2OKv-orZWm8nQf1Q2EZ4', 'white'))\n\n await self.bot.say(embed=embed)\n\n @commands.command(pass_context=True, no_pm=True)\n async def interested(self, ctx, *, gym: str):\n \"\"\"State you're interested in going to a raid\"\"\"\n gym = re.sub(RE_MENTION, '', gym).strip()\n hit, monacle_gym = await self.find_gym(gym)\n if not hit:\n return\n message = get_display_name(ctx.message.author)\n if monacle_gym and monacle_gym.raid_start and monacle_gym.raid_start <= datetime.datetime.now() and monacle_gym.raid_end >= datetime.datetime.now():\n message += \" is interested in the {0} raid\".format(monacle_gym.raid_pokemon.name)\n else:\n await self.bot.say(\"I can't see a raid at {}, sorry.\".format(hit.title))\n return self.bot.delete_message(ctx.message)\n message += ' at {}'.format(hit.title)\n message += \".\"\n await self.bot.say(message)\n if discord.utils.get(ctx.message.server.channels, name='ticker'):\n ticker = discord.utils.get(ctx.message.server.channels, name='ticker')\n await self.bot.send_message(ticker, message)\n await self.bot.delete_message(ctx.message)\n \n @commands.command(pass_context=True, no_pm=True)\n async def addgoing(self, ctx, *, gym: str):\n \"\"\"Used to set other trainers as going to a raid\"\"\"\n return await self._going(ctx, gym, False)\n\n @commands.command(pass_context=True, no_pm=True)\n async def going(self, ctx, *, gym: str):\n \"\"\"Used to set yourself and possibly other trainers as going to a raid\"\"\"\n return await self._going(ctx, gym, True)\n\n async def _going(self, ctx, gym, add_author_to_group):\n gym = re.sub(RE_MENTION, '', gym).strip()\n hit, monacle_gym = await self.find_gym(gym)\n if ctx.message.author in dict(list(self.users_going.items()) + list(self.users_arrived.items())) and add_author_to_group:\n await self._notgoing(ctx)\n temp1 = await self.bot.say('You forgot to do `.done` at your last raid but I sorted that.')\n extra_users = re.search(r'\\+(\\d+)', gym)\n if not hit:\n return\n message = get_display_name(ctx.message.author)\n if extra_users:\n extra_users = int(extra_users.group(0))\n message += \" +{}\".format(extra_users)\n else:\n extra_users = 0\n \n if add_author_to_group:\n self.going_users[hit.meta.id].add(ctx.message.author)\n group = set()\n users = []\n if add_author_to_group:\n group.add(ctx.message.author)\n users.append(ctx.message.author)\n if ctx.message.mentions:\n group.update(ctx.message.mentions)\n users = list(group) # remove duplicates\n if ctx.message.author in ctx.message.mentions:\n users.remove(ctx.message.author) # can't raid with yourself\n for user in users:\n self.going_users[hit.meta.id].add(user)\n message = format_list([\"{0}\".format(get_display_name(user)) for user in users])\n\n if len(users) == 1:\n message += ' is'\n else:\n message += ' are'\n \n message += ' going to {}'.format(hit.title)\n if monacle_gym and monacle_gym.raid_start and monacle_gym.raid_start <= datetime.datetime.now() and monacle_gym.raid_end >= datetime.datetime.now():\n message += \" for a raid on {0}\".format(monacle_gym.raid_pokemon.name)\n message += \".\"\n\n for user in users:\n self.users_going[user] = hit.meta.id\n self.user_groups[user].update(group)\n await self.bot.say(message)\n if discord.utils.get(ctx.message.server.channels, name='ticker'):\n ticker = discord.utils.get(ctx.message.server.channels, name='ticker')\n await self.bot.send_message(ticker, message)\n await self.bot.delete_message(ctx.message)\n\n @commands.command(pass_context=True, no_pm=True)\n async def notgoing(self, ctx):\n \"\"\"No, not going anymore m8\"\"\"\n message = await self._notgoing(ctx)\n await self.bot.say(message)\n if discord.utils.get(ctx.message.server.channels, name='ticker'):\n ticker = discord.utils.get(ctx.message.server.channels, name='ticker')\n await self.bot.send_message(ticker, message)\n return await self.bot.delete_message(ctx.message)\n\n async def _notgoing(self, ctx):\n gym_id = self.users_arrived.get(ctx.message.author, None)\n if not gym_id:\n gym_id = self.users_going.get(ctx.message.author, None)\n if not gym_id:\n await self.bot.say('You are not marked as going to any raids')\n return\n gym = Gym.get(id=gym_id)\n monacle_gym = await self.get_monacle_gym(gym)\n if monacle_gym and monacle_gym.raid_start and monacle_gym.raid_start <= datetime.datetime.now() and monacle_gym.raid_end >= datetime.datetime.now():\n message = \"{} is not going to the {} raid at {}\".format(get_display_name(ctx.message.author), monacle_gym.raid_pokemon.name, gym.title)\n else:\n message = \"{} is not going to {}\".format(get_display_name(ctx.message.author), gym.title)\n self.arrived_users[gym_id].discard(ctx.message.author)\n self.going_users[gym_id].discard(ctx.message.author)\n if ctx.message.author in self.users_arrived:\n del self.users_arrived[ctx.message.author]\n if ctx.message.author in self.users_going:\n del self.users_going[ctx.message.author]\n for user in self.user_groups[ctx.message.author]:\n if user != ctx.message.author:\n self.user_groups[user].discard(ctx.message.author)\n del self.user_groups[ctx.message.author]\n return message\n\n @commands.command(pass_context=True)\n async def who(self, ctx, *, gym: str):\n \"\"\"\n People try to put us down\n Just because we get around\n Things they do look awful cold\n I hope I die before I get old\n \"\"\"\n hit, monacle_gym = await self.find_gym(gym)\n if not hit:\n return\n message = \"\"\n if len(self.going_users[hit.meta.id]) == 0 and len(self.arrived_users[hit.meta.id]) == 0:\n message = \"Nobody is going\"\n if len(self.going_users[hit.meta.id]) > 0:\n message += format_list([get_display_name(user) for user in self.going_users[hit.meta.id]])\n message += \" are\" if len(self.going_users[hit.meta.id]) > 1 else \" is\"\n message += \" on the way\"\n if len(self.arrived_users[hit.meta.id]) > 0 and len(self.going_users[hit.meta.id]) > 0:\n message += \" and \"\n if len(self.arrived_users[hit.meta.id]) > 0:\n message += format_list([get_display_name(user) for user in self.arrived_users[hit.meta.id]])\n message += \" have arrived at\"\n else: \n message += \" to\"\n if monacle_gym and monacle_gym.raid_start and monacle_gym.raid_start <= datetime.datetime.now() and monacle_gym.raid_end >= datetime.datetime.now():\n message += \" the {} raid at {}.\\n\".format(monacle_gym.raid_pokemon.name, hit.title)\n else:\n message += \" \"+hit.title\n await self.bot.say(message)\n await self.bot.delete_message(ctx.message)\n\n @commands.command(pass_context=True)\n async def arrived(self, ctx, *members: discord.Member):\n \"\"\"You know when you were at school and they would do the register and you'd get really paranoid about how you said 'here'. No worries here, only one way to say it- [p]arrived!\"\"\"\n gym_id = self.users_arrived.get(ctx.message.author, None)\n if not gym_id:\n gym_id = self.users_going.get(ctx.message.author, None)\n if not gym_id:\n await self.bot.say('You are not marked as going to any raids')\n return\n gym = Gym.get(id=gym_id)\n monacle_gym = await self.get_monacle_gym(gym)\n arrived = set(self.user_groups[ctx.message.author])\n for member in members:\n arrived.update(self.user_groups[member])\n message = format_list([get_display_name(user) for user in arrived])\n if len(arrived) == 1:\n message += ' has'\n else:\n message += ' have'\n message += ' arrived at {}'.format(gym.title)\n if monacle_gym and monacle_gym.raid_start and monacle_gym.raid_start <= datetime.datetime.now() and monacle_gym.raid_end >= datetime.datetime.now():\n message += \" for the raid on {0}\".format(monacle_gym.raid_pokemon.name)\n message += \".\"\n self.users_arrived[ctx.message.author] = gym_id\n for user in arrived:\n if user in self.user_groups:\n del self.user_groups[user]\n if user in self.users_going:\n del self.users_going[user]\n self.arrived_users[gym_id].add(user)\n self.going_users[gym_id].remove(user)\n await self.bot.say(message)\n if discord.utils.get(ctx.message.server.channels, name='ticker'):\n ticker = discord.utils.get(ctx.message.server.channels, name='ticker')\n await self.bot.send_message(ticker, message)\n await self.bot.delete_message(ctx.message)\n\t\t\n @commands.command(pass_context=True)\n async def done(self, ctx):\n \"\"\"Finished already? That was quick!\"\"\"\n gym_id = self.users_arrived.get(ctx.message.author, None)\n if not gym_id:\n gym_id = self.users_going.get(ctx.message.author, None)\n if not gym_id:\n await self.bot.say('You are not marked as going to any raids')\n return\n gym = Gym.get(id=gym_id)\n monacle_gym = await self.get_monacle_gym(gym)\n if monacle_gym and monacle_gym.raid_start and monacle_gym.raid_start <= datetime.datetime.now() and monacle_gym.raid_end >= datetime.datetime.now():\n message = \"{} has finished the {} raid at {}\".format(get_display_name(ctx.message.author), monacle_gym.raid_pokemon.name, gym.title)\n else:\n message = \"{} is finished at {}\".format(get_display_name(ctx.message.author), gym.title)\n self.arrived_users[gym_id].discard(ctx.message.author)\n self.going_users[gym_id].discard(ctx.message.author)\n if ctx.message.author in self.users_arrived:\n del self.users_arrived[ctx.message.author]\n if ctx.message.author in self.users_going:\n del self.users_going[ctx.message.author]\n for user in self.user_groups[ctx.message.author]:\n if user != ctx.message.author:\n self.user_groups[user].discard(ctx.message.author)\n del self.user_groups[ctx.message.author]\n await self.bot.say(message)\n if discord.utils.get(ctx.message.server.channels, name='ticker'):\n ticker = discord.utils.get(ctx.message.server.channels, name='ticker')\n await self.bot.send_message(ticker, message)\n await self.bot.delete_message(ctx.message)\n\n @commands.command(pass_context=True)\n async def raids(self, ctx):\n \"\"\"Not a list of active raids\"\"\"\n message = ''\n gyms = set(list(self.going_users.keys())+list(self.arrived_users.keys()))\n if not gyms:\n message = 'There are no raids on at the moment'\n for gym_id in gyms:\n gym = Gym.get(id=gym_id)\n monacle_gym = await self.get_monacle_gym(gym)\n if monacle_gym and monacle_gym.raid_start and monacle_gym.raid_start <= datetime.datetime.now() and monacle_gym.raid_end >= datetime.datetime.now():\n num_users = len(self.going_users[gym_id]) + len(self.arrived_users[gym_id])\n message += str(num_users)\n if num_users == 1:\n message += ' user is'\n else:\n message += ' users are'\n message += ' on the way to the {} raid at {} - ends at {} ({}).\\n'.format(monacle_gym.raid_pokemon.name, gym.title, monacle_gym.raid_end.strftime(\"%H:%M:%S\"), humanize.naturaltime(datetime.datetime.now()-monacle_gym.raid_end))\n await self.bot.say(message)\n await self.bot.delete_message(ctx.message)\n \n \ndef setup(bot):\n bot.add_cog(Gyms(bot))\n","sub_path":"gyms/gyms.py","file_name":"gyms.py","file_ext":"py","file_size_in_byte":21870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"590470441","text":"import sys\nimport os\nimport numpy as np\nfrom time import time\nimport pickle\nimport traceback\nimport logging\nfrom matplotlib import cm\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport contextlib\nimport io\nfrom functools import partial\nfrom copy import deepcopy\nimport redis\nimport cv2\nimport pickle\nimport json\n\nsys.path.append(os.getcwd())\n\nfrom my_meep.gen_geo_helper import read_windows, write_windows\nfrom my_meep.animator import my_animate, my_rms_plot, plot_3d\nfrom my_meep.helper import Translate, output_file_name, get_offset\nfrom my_meep.config.configs import get_array\nfrom my_meep.config.config_variables import *\n\nimport mpld3\nfrom mpld3 import plugins, utils\n\n\nr = redis.Redis(port=6379, host='meep_celery', db=0)\n \nclass Save_img():\n \"\"\" \n depend on whether the plotting is web, the funtion will attempt to pipe the image to the right direction \n \"\"\"\n def __init__(self, config, current_user_id):\n self.web = config.getboolean('web', 'web')\n self.config = config\n self.current_user_id = current_user_id\n\n def __call__(self, name):\n if self.web:\n # bytes_image = io.BytesIO()\n # plt.savefig(bytes_image, format='png')\n # bytes_image.seek(0)\n # r.set(str(self.current_user_id) + name, bytes_image.read())\n if 'rms' in name:\n return \n\n fig = plt.gcf()\n plot_string = mpld3.fig_to_html(fig, d3_url=None, mpld3_url=None, no_extras=False, template_type='general', figid=None, use_http=False)\n r.set('user_' + str(self.current_user_id) + '_plot_' + name, plot_string)\n else:\n # save to local dir\n plt.savefig(output_file_name(self.config) + name + '.png', dpi=300, bbox_inches='tight')\n plt.close()\n\nclass Plot_res():\n \"\"\"\n The class will both plot structure and RMS\n depend on the shape of the eps data and electric field data, it plots the corresponding correct graphes\n \"\"\"\n\n def __init__(self, result_manager, Simulation, current_user_id):\n self.ez_data = result_manager.ez_data.transpose()\n self.eps=result_manager.eps.transpose()\n self.eps_rock = result_manager.eps_rock.transpose()\n self.ez_data_particle_only = result_manager.ez_data_particle_only.transpose()\n self.config = result_manager.config\n self.Simulation=Simulation\n self.current_user_id = current_user_id\n self.cell_size = conf.get_array('Geometry', 'cell_size', self.config)\n\n self.ez_dim = len(self.ez_data.shape)\n self.eps_dim = len(self.eps.shape)\n r.set('user_' + str(self.current_user_id) + '_plot_eps', json.dumps(self.eps.tolist()))\n \n self.get_configs()\n self.get_eps_edge()\n self.get_conture_axis()\n self.save_img = Save_img(self.config, current_user_id)\n r.set('user_' + str(self.current_user_id) + '_plot_rms_particle_only', json.dumps(self.ez_data_particle_only.tolist()))\n r.set('user_' + str(self.current_user_id) + '_plot_rms_particle_only_log', json.dumps(np.log(self.ez_data_particle_only).tolist()))\n # r.set('user_' + str(self.current_user_id) + '_plot_rms_eps', json.dumps(self.eps.tolist()))\n\n def get_eps_edge(self):\n if np.max(self.eps) - np.min(self.eps) > 0.1:\n self.translate = Translate(np.min(self.eps), np.max(self.eps), 0, 254)\n vtrans = np.vectorize(self.translate)\n self.eps = vtrans(self.eps).astype(np.uint8)\n self.eps_edges = cv2.Canny(self.eps, 50, 150).astype(np.bool)\n # r.set('user_' + str(self.current_user_id) + '_plot_rms_eps_edge', json.dumps(self.eps_edges.tolist()))\n else:\n self.eps_edges = None\n \n def get_configs(self):\n self.web = self.config.getboolean('web', 'web')\n self.res = self.config.getfloat('Simulation', 'resolution')\n self.out_every = self.config.getfloat('Simulation', 'out_every')\n self.time_sim = self.config.getfloat('Simulation', 'time')\n self.cbar_scale = get_array('Visualization', 'cbar_scale', self.config)\n self.view_only_particles = self.config.getboolean('Visualization', 'view_only_particles')\n\n def get_conture_axis(self):\n X = np.arange(-self.cell_size[0]/2, self.cell_size[0]/2, 1/self.res)\n Y = np.arange(-self.cell_size[1]/2, self.cell_size[1]/2, 1/self.res)\n self.X, self.Y = np.meshgrid(X, Y)\n r.set('user_' + str(self.current_user_id) + '_plot_rms_xy', json.dumps([self.X[0, :].tolist(), self.Y[:, 0].tolist()]))\n\n def structure_plot(self):\n sim_dim = self.config.getint('Simulation', 'dimension')\n if sim_dim == 2:\n plt.figure(figsize=[4,4])\n self.Simulation.plot2D()\n else:\n offset, offset_index = get_offset(self.eps)\n plot_3d(self.eps, offset, offset_index, self.config)\n\n def transient_3d(self):\n ez_trans = self.ez_trans\n ez_trans = np.moveaxis(ez_trans, -1, 0)\n my_animate(ez_trans, window=1)\n\n def static_3d(self):\n offset, offset_index = get_offset(self.ez_data)\n plot_3d(self.ez_data, offset, offset_index, self.config)\n\n def transient_2d(self):\n start = int(self.cell_size[0]*2/self.out_every*3)\n\n # 3 is to ensure the slower wave in the medium fully propogate\n end = len(self.ez_data) - 1\n if start >= end:\n print('Time interval is not sufficient')\n start = end - 20\n\n print('Time period for RMS: ', [start, end])\n self.ez_data[-2, self.eps_edges] = np.max(self.ez_data)*len(self.ez_data)/20\n my_rms_plot(self.ez_data, 0, 'rms', [start, end])\n\n def add_particle_edge(self):\n if self.eps_edges is not None:\n self.ez_data[self.eps_edges] = 5\n\n\n def static_2d(self, ez_data):\n pass\n # fig = plt.figure(figsize=(4,4))\n # ax = plt.axes()\n # graph = plt.pcolor(self.X, self.Y, ez_data, vmin=0, vmax=0.01)\n # cb = fig.colorbar(graph, ax=ax)\n # cb.set_label(label='E^2 (V/m)^2', size='xx-large', weight='bold')\n # cb.ax.tick_params(labelsize=20) \n # ax.tick_params(axis='both', which='major', labelsize=20)\n # plt.title('Fill Factor is ' + self.config.get('Geometry','fill_factor'), fontsize=20)\n\n def static_2d_all(self):\n r.set('user_' + str(self.current_user_id) + '_plot_rms_block', json.dumps(self.ez_data.tolist()))\n r.set('user_' + str(self.current_user_id) + '_plot_rms_block_log', json.dumps(np.log(self.ez_data).tolist()))\n\n def static_2d_particle(self):\n pass\n # r.set('user_' + str(self.current_user_id) + '_plot_rms_particle_only', json.dumps(self.ez_data_particle_only.tolist()))\n\n def static_2d_particle_contour(self):\n fig = plt.figure(figsize=(7, 6))\n\n self.cbar_scale /= 6\n ax = fig.gca(projection='3d')\n trans = self.translate(-self.cell_size[0]/2,\n self.cell_size[0]/2, 0, self.ez_data_particle_only.shape[0])\n ax_lim = [-1, 1, -2, 2]\n ax_index_lim = [int(trans(ele)) for ele in ax_lim]\n\n graph = ax.plot_surface(self.X[ax_index_lim[0]:ax_index_lim[1], ax_index_lim[2]:ax_index_lim[3]], self.Y[ax_index_lim[0]:ax_index_lim[1], ax_index_lim[2] :ax_index_lim[3]], self.ez_data_particle_only[ax_index_lim[0]:ax_index_lim[1], ax_index_lim[2]:ax_index_lim[3]], cmap=cm.coolwarm, linewidth=0, antialiased=False) \n ax.tick_params(axis='both', which='major', labelsize=20)\n plt.title('Fill Factor is ' + self.config.get('Geometry','fill_factor'), fontsize=20)\n\n def __call__(self):\n if self.config.getboolean('Visualization', 'structure'):\n self.structure_plot()\n self.save_img('structure')\n \n ez_dim = self.ez_dim\n eps_dim = self.eps_dim\n\n if ez_dim == 4 and eps_dim == 3:\n print('trans 3d')\n self.transient_3d()\n elif ez_dim == 3 and eps_dim == 3:\n print('static 3d')\n self.static_3d()\n elif ez_dim == 3 and eps_dim == 2:\n print('trans 2d')\n self.transient_2d()\n elif ez_dim == 2 and eps_dim == 2:\n self.add_particle_edge()\n self.static_2d_particle()\n self.static_2d_all()\n\n self.save_img('rms')\n\n","sub_path":"my_meep/my_meep/visulization.py","file_name":"visulization.py","file_ext":"py","file_size_in_byte":8403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"296314658","text":"# -*- coding: utf-8 -*-\nimport random\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nfrom parser_n import parse\n\n\ndef insert_sort(n):\n array = list(range(1, n + 1))\n random.shuffle(array)\n fig = plt.figure()\n ims = []\n left = range(1, len(array) + 1)\n for i in range(1, len(array) + 1):\n if i >= len(array):\n break\n temp = array[i]\n for j in reversed(range(i)):\n height = array\n print(array)\n im = plt.bar(left, height, color=\"#66cdaa\")\n ims.append(im)\n if temp > array[j]:\n # temp2 = array[j + 1]\n # array[j + 1] = array[j]\n # array[j] = temp2\n array[j + 1], array[j] = array[j], array[j + 1]\n\n else:\n # array.pop(j + 1)\n # array.insert(j + 1, temp)\n break\n\n ani = animation.ArtistAnimation(fig, ims, interval=30)\n plt.show(block=False)\n input(\"Enter to close\")\n plt.close()\n\n\ndef main():\n n = parse()\n insert_sort(n)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"revisions/insert_sort1.py","file_name":"insert_sort1.py","file_ext":"py","file_size_in_byte":1124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"427661009","text":"import pytest\nfrom asynctest import call, ANY\n\nfrom kopf.k8s.events import post_event\n\n\ndef test_posting(client_mock):\n result = object()\n apicls_mock = client_mock.CoreV1Api\n apicls_mock.return_value.create_namespaced_event.return_value = result\n postfn_mock = apicls_mock.return_value.create_namespaced_event\n\n obj = {'apiVersion': 'group/version',\n 'kind': 'kind',\n 'metadata': {'namespace': 'ns',\n 'name': 'name',\n 'uid': 'uid'}}\n post_event(obj=obj, type='type', reason='reason', message='message')\n\n assert postfn_mock.called\n assert postfn_mock.call_count == 1\n assert postfn_mock.call_args_list == [call(\n namespace='ns', # same as the object's namespace\n body=ANY,\n )]\n\n event = postfn_mock.call_args_list[0][1]['body']\n assert event.type == 'type'\n assert event.reason == 'reason'\n assert event.message == 'message'\n assert event.source.component == 'kopf'\n assert event.involved_object['apiVersion'] == 'group/version'\n assert event.involved_object['kind'] == 'kind'\n assert event.involved_object['namespace'] == 'ns'\n assert event.involved_object['name'] == 'name'\n assert event.involved_object['uid'] == 'uid'\n\n\ndef test_type_is_v1_not_v1beta1(client_mock):\n apicls_mock = client_mock.CoreV1Api\n postfn_mock = apicls_mock.return_value.create_namespaced_event\n\n obj = {'apiVersion': 'group/version',\n 'kind': 'kind',\n 'metadata': {'namespace': 'ns',\n 'name': 'name',\n 'uid': 'uid'}}\n post_event(obj=obj, type='type', reason='reason', message='message')\n\n event = postfn_mock.call_args_list[0][1]['body']\n assert isinstance(event, client_mock.V1Event)\n assert not isinstance(event, client_mock.V1beta1Event)\n\n\ndef test_api_errors_logged_but_suppressed(client_mock, assert_logs):\n error = client_mock.rest.ApiException('boo!')\n apicls_mock = client_mock.CoreV1Api\n apicls_mock.return_value.create_namespaced_event.side_effect = error\n postfn_mock = apicls_mock.return_value.create_namespaced_event\n\n obj = {'apiVersion': 'group/version',\n 'kind': 'kind',\n 'metadata': {'namespace': 'ns',\n 'name': 'name',\n 'uid': 'uid'}}\n post_event(obj=obj, type='type', reason='reason', message='message')\n\n assert postfn_mock.called\n assert_logs([\n \"Failed to post an event.*boo!\",\n ])\n\n\ndef test_regular_errors_escalate(client_mock):\n error = Exception('boo!')\n apicls_mock = client_mock.CoreV1Api\n apicls_mock.return_value.create_namespaced_event.side_effect = error\n\n obj = {'apiVersion': 'group/version',\n 'kind': 'kind',\n 'metadata': {'namespace': 'ns',\n 'name': 'name',\n 'uid': 'uid'}}\n\n with pytest.raises(Exception) as excinfo:\n post_event(obj=obj, type='type', reason='reason', message='message')\n\n assert excinfo.value is error\n\n\ndef test_message_is_cut_to_max_length(client_mock):\n result = object()\n apicls_mock = client_mock.CoreV1Api\n apicls_mock.return_value.create_namespaced_event.return_value = result\n postfn_mock = apicls_mock.return_value.create_namespaced_event\n\n obj = {'apiVersion': 'group/version',\n 'kind': 'kind',\n 'metadata': {'namespace': 'ns',\n 'name': 'name',\n 'uid': 'uid'}}\n message = 'start' + ('x' * 2048) + 'end'\n post_event(obj=obj, type='type', reason='reason', message=message)\n\n event = postfn_mock.call_args_list[0][1]['body']\n assert len(event.message) <= 1024 # max supported API message length\n assert '...' in event.message\n assert event.message.startswith('start')\n assert event.message.endswith('end')\n","sub_path":"tests/k8s/test_events.py","file_name":"test_events.py","file_ext":"py","file_size_in_byte":3867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"275419507","text":"testdata = [2 , 5 , 3 , 7, 1 ,6 ,-3 ,12]\ndef BubbleSort(l):\n length = len(l)\n \n swapped = True \n while (swapped == True):\n swapped = False\n for x in range(1,length - 1):\n if l[x-1] > l[x]:\n temp = l[x]\n l[x] = l[x-1]\n l[x-1] = temp\n swapped = True\nprint(testdata)\nBubbleSort(testdata)\nprint(testdata)\n\n\n ","sub_path":"BubbleSort.py","file_name":"BubbleSort.py","file_ext":"py","file_size_in_byte":410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"475242944","text":"from django.db import models\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth import get_user_model\nfrom django.core.validators import MaxValueValidator, MinValueValidator\n\n# Create your models here.\n\n# This ensures our code will continue to work even if we change our default user model\nUser = get_user_model()\n\nclass Settings(models.Model):\n\tuser = models.OneToOneField(User, related_name='settings', on_delete=models.CASCADE)\n\n\t# More Secure Phone number fields are possible to implement in the future\n\tphone_number = models.CharField(max_length=12, blank=True, null=True)\n\tlocation = models.CharField(max_length=140, blank=True, null=True)\n\tage = models.IntegerField(blank=True, null=True)\n\tstudent_status = models.BooleanField(blank=True, null=True)\n\n\t# If we have a means of storing static files, this will be a more valid method of storing images\n\t# icon = models.ImageField(upload_to='profile_image', blank=True)\n\ticon_url = models.URLField(blank=True, null=True)\n\n\tdef __str__(self):\n\t\treturn f\"Profile Settings for {self.user.username}\"\n\n\nclass BudgetList(models.Model):\n\tuser = models.OneToOneField(User, related_name='budget', on_delete=models.CASCADE)\n\n\t# Set (null = True) so that fields are allowed to be blank\n\tbalance = models.FloatField(default = 0.0)\n\tsavings_goal = models.FloatField(default = 0.0)\n\tlast_updated = models.DateField(auto_now = True, null = True)\n\n\nclass CashFlow(models.Model):\n\t# django doesn't naturally have a one to many field, so instead use the ForeignKey which\n\t# represents a Many to One. It points all of our cashflows to our budget\n\tuser = models.ForeignKey(User,on_delete=models.CASCADE, null=True)\n\n\t# Name and date of cashflow\n\tname = models.CharField(max_length = 26, null = True)\n\tdate = models.DateField(null = True)\n\n\n\tpayment_choices = [(\"Payment\", \"Payment\"), (\"Income\", \"Income\")]\n\ttype = models.CharField(max_length = 26, choices = payment_choices, default = 'Payment', null = True)\n\trecurring_choices = ((True,\"Yes\"),(False,\"No\"))\n\trecurring = models.BooleanField(choices = recurring_choices, default = False, null = True)\n\n\tpayment_categories = [(\"Housing\", \"Housing\"), (\"Utilities\", \"Utilities\"), (\"Transportation\", \"Transportation\"), (\"Food/Groceries\", \"Food/Groceries\"), (\"Shopping & Entertainment\", \"Shopping & Entertainemnt\"), (\"Subscriptions\", \"Subscriptions\"), (\"Health\", \"Health\"), (\"Savings Contribution\", \"Savings Contribution\"), (\"Other\", \"Other\")]\n\tcategory = models.CharField(max_length = 26, choices = payment_categories, default = 'Food', null = True)\n\t# Currently assumes US dollars\n\tamount = models.FloatField(null = True)\n\tdescription = models.TextField(null = True)\n\t#category = models.CharField(max_length=140)\n","sub_path":"Project_Directory/dashboard/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"367106291","text":"from nose.tools import eq_\nfrom ece365lib import train\nimport nose\nimport nltk\nfrom nltk.lm import Laplace\nfrom nltk.lm.preprocessing import padded_everygram_pipeline\nfrom ece365lib import evaluate\n\ndef setup_module():\n global food_corpus, natr_corpus\n \n food = ['barley', 'castor-oil', 'cocoa', 'coconut', 'coconut-oil', 'coffee', 'copra-cake''grain', 'groundnut', 'groundnut-oil', 'potato''soy-meal', 'soy-oil', 'soybean', 'sugar', 'sun-meal', 'sun-oil', 'sunseed', 'tea', 'veg-oil', 'wheat']\n natural_resources = ['alum', 'fuel', 'gas', 'gold', 'iron-steel', 'lead', 'nat-gas', 'palladium', 'propane', 'tin', 'zinc']\n\n corpus = nltk.corpus.reuters\n food_corpus = corpus.raw(categories=food)\n natr_corpus = corpus.raw(categories=natural_resources)\n \n \ndef test_d1_1_tk():\n global food_corpus, natr_corpus\n \n food_corpus_tk = train.tokenize_corpus(food_corpus)\n natr_corpus_tk = train.tokenize_corpus(natr_corpus)\n \n eq_(food_corpus_tk[25][5],'Monday')\n eq_(natr_corpus_tk[25][5],'are')\n \n \ndef test_d1_2_pad():\n global food_corpus, natr_corpus\n \n food_corpus_tk = train.tokenize_corpus(food_corpus)\n natr_corpus_tk = train.tokenize_corpus(natr_corpus)\n \n food_corpus_tk_pd = train.pad_corpus(food_corpus_tk)\n natr_corpus_tk_pd = train.pad_corpus(natr_corpus_tk)\n \n eq_(food_corpus_tk_pd[35][0], '')\n eq_(natr_corpus_tk_pd[35][-1], '')\n eq_(len(food_corpus_tk_pd[45]), 14)\n eq_(len(natr_corpus_tk_pd[45]), 19)\n eq_(len(food_corpus_tk_pd[45]) - len(food_corpus_tk[45]), 2)\n \n \ndef test_d1_3_spc():\n global food_corpus, natr_corpus\n \n food_corpus_tk = train.tokenize_corpus(food_corpus)\n natr_corpus_tk = train.tokenize_corpus(natr_corpus)\n \n food_corpus_tk_pd = train.pad_corpus(food_corpus_tk)\n natr_corpus_tk_pd = train.pad_corpus(natr_corpus_tk)\n \n food_corpus_tr, food_corpus_te = train.split_corpus(food_corpus_tk_pd)\n natr_corpus_tr, natr_corpus_te = train.split_corpus(natr_corpus_tk_pd)\n \n eq_(len(food_corpus_tr), 4888)\n eq_(len(food_corpus_te), 1222)\n eq_(len(natr_corpus_tr), 2610)\n eq_(len(natr_corpus_te), 653)\n eq_(food_corpus_te[3][5], 'by')\n eq_(natr_corpus_te[1][2], 'Project')\n \n \ndef test_d1_4_cn():\n global food_corpus, natr_corpus\n \n food_corpus_tk = train.tokenize_corpus(food_corpus)\n natr_corpus_tk = train.tokenize_corpus(natr_corpus)\n \n food_corpus_tk_pd = train.pad_corpus(food_corpus_tk)\n natr_corpus_tk_pd = train.pad_corpus(natr_corpus_tk)\n \n food_corpus_tr, food_corpus_te = train.split_corpus(food_corpus_tk_pd)\n natr_corpus_tr, natr_corpus_te = train.split_corpus(natr_corpus_tk_pd)\n \n food_ngrams, food_vocab_man = train.count_ngrams(food_corpus_tr, 3)\n natr_ngrams, natr_vocab_man = train.count_ngrams(natr_corpus_tr, 3)\n \n eq_(len(food_ngrams.keys()), 181387)\n eq_(len(natr_ngrams.keys()), 105612)\n eq_(food_ngrams[('sold', 'the')], 2)\n eq_(natr_ngrams[('extracting', 'the')], 2)\n eq_(len(food_vocab_man), 12728)\n eq_(len(natr_vocab_man), 8972)\n eq_(sorted(food_vocab_man)[3200], 'ANALYSTS')\n eq_(sorted(natr_vocab_man)[3210], 'NGX')\n \n \ndef test_d1_5_es():\n global food_corpus, natr_corpus\n \n food_corpus_tk = train.tokenize_corpus(food_corpus)\n natr_corpus_tk = train.tokenize_corpus(natr_corpus)\n \n food_corpus_tk_pd = train.pad_corpus(food_corpus_tk)\n natr_corpus_tk_pd = train.pad_corpus(natr_corpus_tk)\n \n food_corpus_tr, food_corpus_te = train.split_corpus(food_corpus_tk_pd)\n natr_corpus_tr, natr_corpus_te = train.split_corpus(natr_corpus_tk_pd)\n \n food_ngrams, food_vocab_man = train.count_ngrams(food_corpus_tr, 3)\n natr_ngrams, natr_vocab_man = train.count_ngrams(natr_corpus_tr, 3)\n \n eq_(train.estimate(food_ngrams, ['palm'], ['producer', 'of']), 0.25)\n eq_(train.estimate(natr_ngrams, ['basis'], ['tested', 'the']), 0.5)\n\n\ndef test_d2_1_gp():\n global food_corpus, natr_corpus\n \n food_corpus_tk = train.tokenize_corpus(food_corpus)\n natr_corpus_tk = train.tokenize_corpus(natr_corpus)\n \n food_train, food_vocab = padded_everygram_pipeline(3, food_corpus_tk[:int(0.8*len(food_corpus_tk))])\n natr_train, natr_vocab = padded_everygram_pipeline(3, natr_corpus_tk[:int(0.8*len(natr_corpus_tk))])\n\n food_test = sum([[''] + x + [''] for x in food_corpus_tk[int(0.8*len(food_corpus_tk)):]],[])\n natr_test = sum([[''] + x + [''] for x in natr_corpus_tk[int(0.8*len(natr_corpus_tk)):]],[])\n\n food_lm = Laplace(3)\n natr_lm = Laplace(3)\n\n food_lm.fit(food_train, food_vocab)\n natr_lm.fit(natr_train, natr_vocab)\n \n eq_(int(evaluate.get_perplexity(food_lm, food_test[:2500])), 7318)\n eq_(int(evaluate.get_perplexity(food_lm, natr_test[:2500])), 7309)\n eq_(int(evaluate.get_perplexity(natr_lm, natr_test[:2500])), 5222)\n eq_(int(evaluate.get_perplexity(natr_lm, food_test[:2500])), 5354)\n\n \ndef test_d3_1_vary():\n global food_corpus, natr_corpus\n \n food_corpus_tk = train.tokenize_corpus(food_corpus)\n \n n_gram_orders = [2, 3]\n \n train_corpus = food_corpus_tk[:int(0.8*len(food_corpus_tk))]\n test_corpus = food_corpus_tk[int(0.8*len(food_corpus_tk)): int(0.85*len(food_corpus_tk))]\n\n results = train.vary_ngram(train_corpus, test_corpus, n_gram_orders)\n \n eq_(int(results[2]), 7387)\n eq_(int(results[3]), 7428)\n\n","sub_path":"NLP_Lab3/punit2/NLPLab3/tests/test_train.py","file_name":"test_train.py","file_ext":"py","file_size_in_byte":5438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"22926279","text":"import pandas as pd \r\nimport numpy as np\r\nimport os\r\n\r\ndef gene_celltype_df_builder(gene_list,df_braincell):\r\n\t\"\"\"This fuction is building a gene dictionary\"\"\"\r\n\tgene_dict = {}\r\n\tfor gene in gene_list:\r\n\t\tcell_type_list =[]\r\n\t\tfor i in list(range(len(df_braincell.columns))):\r\n\t\t\tif gene in df_braincell.iloc[:,i].unique():\r\n\t\t\t\tcell_type = df_braincell.columns[i]\r\n\t\t\t\tcell_type_list.append(cell_type)\r\n\t\t\t\tgene_dict[gene] = list(dict.fromkeys(cell_type_list))\r\n\t\t\telse: \r\n\t\t\t\tcell_type_list.append('')\r\n\t\t\t\tgene_dict[gene] = list(dict.fromkeys(cell_type_list))\t\t\r\n\t\r\n\t#middle step to make one column\r\n\t\"\"\"This fuction combines the values of a dictinary, if the values are \r\n\ta list. So the list becomes one string\"\"\"\r\n\tfor gene, cell_type in gene_dict.items():\r\n\t\t# For example 'CRYAB' is in the loop \r\n\t\tgene_dict[gene]=', '.join(cell_type)\r\n\r\n\t\"\"\"Now building a dataframe to be able use it as excel\"\"\"\t\r\n\tdf = pd.DataFrame.from_dict(gene_dict,\r\n\t\torient='index',\r\n\t\tcolumns=['Cell type'])\r\n\r\n\t#genes had to be repeated becuase the genes are indexed for the dataframe\r\n\tmanupulate_dict={}\r\n\tfor gene in gene_list:\r\n\t\tmanupulate_dict[gene] = gene\r\n\r\n\tdf_gene_manupulated = pd.DataFrame.from_dict(manupulate_dict,\r\n\t\torient='index',\r\n\t\tcolumns=['Gene'])\r\n\t#This had to be made inorder to use the concat fuction with the same index\r\n\r\n\tdf_all = pd.concat([df_gene_manupulated,df],axis=1)\r\n\r\n\treturn df_all\r\n\r\ndef data_joiner(gene_list,df_cluster,df_cluster_spfc):\r\n\t\"\"\"This function joins the rows with the given columns above\"\"\"\r\n\tappended_data = []\r\n\tgene_not_cluster = []\r\n\r\n\tfor gene in gene_list:\r\n\t\tif gene not in df_cluster['Gene'].unique():\r\n\t\t\tgene_not_cluster.append(gene)\r\n\r\n\tdf_not_in_cluster = pd.DataFrame({'Gene':gene_not_cluster}, \r\n\t\t\tcolumns=['Gene'], \r\n\t\t\tindex=list(range(len(gene_not_cluster))))\r\n\r\n\tfor gene in gene_list: \r\n\t\tdf_kunt_gene = df_cluster_spfc[df_cluster_spfc['Gene'] == gene]\r\n\t\tappended_data.append(df_kunt_gene) \r\n\r\n\tif appended_data != []: \r\n\t\tjoined_df = pd.concat(appended_data).drop_duplicates().append(df_not_in_cluster)\r\n\telse: \r\n\t\tjoined_df = pd.DataFrame()\r\n\t\r\n\treturn joined_df\r\n\r\ndef df_generator_expended(gene_list, file_list):\r\n\t\r\n\t# Create an empthy dictionary \r\n\tmanupulate_dict={} \r\n\tfor gene in gene_list: # same structure with the other dataframe \r\n\t\tmanupulate_dict[gene] = [] #emppthy list to append the wanted cell tpyes\r\n\r\n\tfor excel_file in file_list: #excell file list is stored in the same dir\r\n\t\tdf_celltype_expended = pd.read_excel(excel_file)\r\n\r\n\t\tdata = {'Cell Type': df_celltype_expended.iloc[1:,0],\r\n\t\t\t\t\t'Gene': df_celltype_expended.iloc[1:,1]}\r\n\r\n\t\tdf = pd.DataFrame(data) #Create a new dataframe with the correct structure\r\n\r\n\t\tgene_list_expended = df['Gene'].unique() #make a list from the df\r\n\r\n\t\tgene_dict_expended={} #make dcitornary and split the cell types \r\n\t\tfor i in range(len(df['Gene'].unique())):\r\n\t\t\tgene_row_list = gene_list_expended[i].replace(\",\",\"\").split()\r\n\t\t\tfor gene in gene_row_list:\r\n\t\t\t\tgene_dict_expended[gene]=str(df.iloc[i,0])\r\n\r\n\t\tdetected_genes_dict={} #some genes will be detected, now new dictionary\r\n\t\tfor gene, cell_type in gene_dict_expended.items():\r\n\t\t\tif gene in gene_list:\r\n\t\t\t\tdetected_genes_dict[gene]=cell_type\r\n\r\n\t\tfor gene in gene_list:\r\n\t\t\tif gene in detected_genes_dict.keys():\r\n\t\t\t\tmanupulate_dict[gene].append(detected_genes_dict[gene])\r\n\r\n\t# Now making the dictionary into df . combining the list back to one cell\r\n\t# in excel \r\n\tfor gene, cell_type_list in manupulate_dict.items():\r\n\t\tmanupulate_dict[gene]= ', '.join(cell_type_list)\r\n\r\n\t# building the dataframe from the dictionary\r\n\r\n\tdf = pd.DataFrame.from_dict(manupulate_dict,\r\n\t\torient='index',\r\n\t\tcolumns=['Cell type expnaded'])\r\n\r\n\t#genes had to be repeated becuase the genes are indexed for the dataframe\r\n\texp_dict_only_genes={}\r\n\tfor gene in gene_list:\r\n\t\texp_dict_only_genes[gene] = gene\r\n\r\n\tdf_only_gene = pd.DataFrame.from_dict(exp_dict_only_genes,\r\n\t\torient='index',\r\n\t\tcolumns=['gene'])\r\n\t#This had to be made inorder to use the concat fuction with the same index\r\n\r\n\tdf_all = pd.concat([df_only_gene,df],axis=1)\r\n\r\n\treturn df_all \r\n\r\n\r\n\r\n\r\n","sub_path":"kunt_df_generator.py","file_name":"kunt_df_generator.py","file_ext":"py","file_size_in_byte":4127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"614805242","text":"\r\ndef tempCovert():\r\n val = input(\"请输入带温度表示符号的温度值(例如:32C):\") # python默认输入是字符串\r\n while val[-1] not in ['N', 'n']: # 如果不是N或n\r\n if val[-1] in ['C', 'c']: # -1是倒数第一位\r\n f = 1.8 * eval(val[0:-1]) + 32 # 左闭右开\r\n print(\"转换后的温度为: %.2fF\" % f)\r\n elif val[-1] in ['F', 'f']:\r\n c = (eval(val[0:-1]) - 32) / 1.8\r\n print(\"转换后的温度为: %.2fC\" % c)\r\n else:\r\n print(\"输入有误\")\r\n val = input(\"再玩一次?(输入N或n结束)\")\r\n\r\ntempCovert()\r\n","sub_path":"pythontest/day02-28-56-01.py","file_name":"day02-28-56-01.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"574547708","text":"import cv2\r\nimport numpy as np\r\n\r\nimg = cv2.imread(\"image.jpg\")\r\n\r\nhsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\r\n\r\n# range of color given\r\nmask = cv2.inRange(hsv, (36, 25, 25), (70, 255,255))\r\n\r\n# green mask to slice out that part of the image\r\nimask = mask>0\r\ngreen = np.zeros_like(img, np.uint8)\r\ngreen[imask] = img[imask]\r\n\r\n#displaying in different windows\r\ncv2.imshow(\"green-masked\", green)\r\ncv2.imshow(\"original\", img)\r\n","sub_path":"Python-GreenMask-OpenCV-main/Python-GreenMask-OpenCV-main/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"506551119","text":"# -*- coding: utf-8 -*-\n\nimport cv2\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\n\nif __name__ == '__main__':\n img = cv2.imread('./ikuta.jpg')\n colors = ['blue', 'green', 'red']\n\n for index, color in enumerate(colors):\n hist = cv2.calcHist([img], [index], None, [256], [0, 256])\n plt.plot(hist, color = color)\n\n plt.savefig('./ikuta_hist.jpg')\n\n","sub_path":"python/udemy/opencv/session3/histgram.py","file_name":"histgram.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"277777803","text":"import random\nimport json\nimport matplotlib.pyplot as plt\n\n\ndef plotGini(giniValues):\n handles = []\n x = 1\n for i in giniValues:\n graph = plt.plot(i, label='Run '+ str(x))\n x +=1\n plt.legend()\n plt.xlabel('Customers')\n plt.ylabel('Gini coefficient G')\n plt.show()\n\n\ndef calculateGini(tables, customers):\n numTables = len(tables)\n marketshares=[]\n for table, guests in enumerate(tables):\n marketshares.append(guests/customers)\n\n sharesSum = 0;\n for i in range(numTables):\n for j in range(numTables):\n sharesSum += abs(marketshares[i]-marketshares[j])\n\n gini = (((1/numTables)*sharesSum)/2)\n return gini\n\n\ndef generateChineseRestaurant(customers):\n # First customer always sits at the first table\n tables = [1]\n gini = [0.0]\n # for all other customers do\n for cust in range(2, customers + 1):\n # rand between 0 and 1\n rand = random.random()\n # Total probability to sit at a table\n prob = 0\n # No table found yet\n table_found = False\n # Iterate over tables\n for table, guests in enumerate(tables):\n # calc probability for actual table an add it to total probability\n prob += guests / (cust)\n # If rand is smaller than the current total prob., customer will sit down at current table\n if rand < prob:\n # incr. #customers for that table\n tables[table] += 1\n # customer has found table\n table_found = True\n # no more tables need to be iterated, break out for loop\n break\n # If table iteration is over and no table was found, open new table\n if not table_found:\n tables.append(1)\n\n gini.append(calculateGini(tables, cust))\n return tables, gini\n\n\ncustomers = 1000\nnetwork = []\nginiValues=[]\n\nfor i in range(0, 5):\n tables, gini = generateChineseRestaurant(customers)\n network.append(tables)\n giniValues.append(gini)\n\nwith open('network_' + str(customers) + '.json', 'w') as out:\n json.dump(network, out)\n\nplotGini(giniValues)","sub_path":"assignment10/chinese_restaurant.py","file_name":"chinese_restaurant.py","file_ext":"py","file_size_in_byte":2154,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"616237130","text":"# import the necessary packages\nfrom picamera.array import PiRGBArray\nfrom picamera import PiCamera\nimport numpy as np\nimport time\nimport cv2\n\n# IMPORTANT FOR DEBUG PURPOSES\n# Pretty obvs, but make this true to have debug output\ndebug = False\n\n# initialize the camera and grab a reference to the raw camera capture\ncamera = PiCamera()\ncamera_resolution_x = 640\ncamera_resolution_y = 480\ncamera.resolution = (camera_resolution_x, camera_resolution_y)\ncamera.framerate = 32\nrawCapture = PiRGBArray(camera, size=(camera_resolution_x, camera_resolution_y))\n\n\nif (debug):\n # Create a Video Caputure object\n cap = cv2.VideoCapture(0)\n # Define the codec and create VideoWriter object.The output is stored in 'outpy.avi' file.\n out = cv2.VideoWriter('outpy.avi',cv2.VideoWriter_fourcc('M','J','P','G'), 10, (camera_resolution_x,camera_resolution_y))\n\n\n# allow the camera to warmup\ntime.sleep(0.1)\n\n# Set up the tracker_type\ntracker_types = ['BOOSTING', 'MIL','KCF', 'TLD', 'MEDIANFLOW', 'GOTURN', 'MOSSE', 'CSRT']\ntracker_type = tracker_types[2] # option 2 = TrackerKCF_create\n\nif tracker_type == 'BOOSTING':\n tracker = cv2.TrackerBoosting_create()\nif tracker_type == 'MIL':\n tracker = cv2.TrackerMIL_create()\nif tracker_type == 'KCF':\n tracker = cv2.TrackerKCF_create()\nif tracker_type == 'TLD':\n tracker = cv2.TrackerTLD_create()\nif tracker_type == 'MEDIANFLOW':\n tracker = cv2.TrackerMedianFlow_create()\nif tracker_type == 'GOTURN':\n tracker = cv2.TrackerGOTURN_create()\nif tracker_type == 'MOSSE':\n tracker = cv2.TrackerMOSSE_create()\nif tracker_type == \"CSRT\":\n tracker = cv2.TrackerCSRT_create()\n\n# grab an image from the camera\ncamera.capture(rawCapture, format=\"bgr\")\nframe = cv2.flip(rawCapture.array, -1)\n\n# Select bounding box\nbbox = cv2.selectROI(frame, False)\n\n# Initialize tracker with first frame and bounding box\nok = tracker.init(frame, bbox)\n\n# Calculate center of image and store in a tuple\n# This represents the theoretical line of fire\nframe_width = camera_resolution_x\nframe_height = camera_resolution_y\nvideo_center = (int(frame_width/2.0), int(frame_height/2.0))\n\n# clear buffer in anticipation of main loop\nrawCapture.truncate(0)\n\n# capture frames from the camera\nfor frame in camera.capture_continuous(rawCapture, format=\"bgr\", use_video_port=True):\n # grab the raw NumPy array representing the image, then initialize the timestamp\n # and occupied/unoccupied text\n image = cv2.flip(np.array(frame.array), -1)\n\n # Start timer\n timer = cv2.getTickCount()\n\n # Update tracker\n ok, bbox = tracker.update(image)\n\n # Calculate Frames per second (FPS)\n fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer);\n\n # radius should be scaled with target_center\n reticule_scalar = 5.0\n radius = (((frame_height / 2.0) * reticule_scalar) / 100.0)\n radius = int(radius)\n\n # gun reticule (light blue)\n cv2.circle(image, video_center, radius, (255,255,0), thickness=2, lineType=8, shift=0)\n\n # Draw bounding box\n if ok:\n # Tracking success\n p1 = (int(bbox[0]), int(bbox[1]))\n p2 = (int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3]))\n\n # note: colors are blue, green, red (lmao why)\n\n # rectangle around target (blue)\n cv2.rectangle(image, p1, p2, (255,0,0), 2, 1)\n\n # from this information we can then derive distance from center and angle of correction using some fancy math.\n target_center = (int(p1[0]+(bbox[2]/2.0)),int(p1[1]+(bbox[3]/2.0)))\n\n # target correction arrow (green)\n cv2.arrowedLine(image, video_center, target_center, (0,255,0), 3)\n\n # target reticule (red)\n cv2.circle(image, target_center, radius, (0,0,255), thickness=2, lineType=8, shift=0)\n\n cv2.putText(image, \"Tracking Successful\", (100,20), cv2.FONT_HERSHEY_SIMPLEX, 0.75,(0,0,255),2)\n else :\n # Tracking failure\n cv2.putText(image, \"Tracking failure detected\", (100,20), cv2.FONT_HERSHEY_SIMPLEX, 0.75,(0,0,255),2)\n\n # Display tracker type on frame\n # cv2.putText(image, tracker_type + \" Tracker\", (100,20), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50,170,50),2);\n\n # Display FPS on frame\n # cv2.putText(image, \"FPS: \" + str(int(fps)), (100,20), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50,170,50), 2);\n\n # check for debug output and save if needed\n if (debug):\n # Write the frame into the file 'output.avi'\n out.write(image)\n\n # Display result\n cv2.namedWindow(\"can finder\", cv2.WND_PROP_FULLSCREEN)\n cv2.setWindowProperty(\"can finder\",cv2.WND_PROP_FULLSCREEN,cv2.WINDOW_FULLSCREEN)\n cv2.imshow(\"can finder\", image)\n key = cv2.waitKey(1) & 0xFF\n\n # clear the stream in preparation for the next frame\n rawCapture.truncate(0)\n\n # if the `q` key was pressed, break from the loop\n if key == ord(\"q\"):\n \tbreak\n\n# When everything done, release the video capture and video write objects\nrawCapture.release()\nout.release()\n\n# Closes all the frames\ncv2.destroyAllWindows()\n","sub_path":"mainLiveView.py","file_name":"mainLiveView.py","file_ext":"py","file_size_in_byte":4986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"333187304","text":"import time\nfrom disk import load_inventory, update_inventory\nfrom gas_core import calc_money as dinero\nfrom gas_core import calc_gallon as gal\nfrom gas_core import price_of, final_message\nfrom gas_core import other_message\n\n\ndef gas_function():\n print(\n \"Hello, welcome to O's station.\\n\\nHere is the type of gas we have today.\\n\"\n )\n inventory = load_inventory()\n print(inventory)\n type_gas = input(\"What type of gas would you like? \\n\\n\").lower().strip()\n print(\"\\nIf you wish to exit press 'Q'\\n\")\n gas_price = price_of(inventory, type_gas)\n if not gas_price:\n print('That gas does not exist')\n exit()\n\n print(\"We have two different ways you can pay.\\n\")\n print(\"Prepay\\n\\nor\\n\\nAfter filling up\")\n pay_type = input(\"How would you like to pay? \\n\")\n if pay_type.lower() == \"prepay\":\n print(\"Thanks for choosing Prepay.\")\n money = input(\"How much money would you like to spend? \\n\")\n gallons = float(money) / float(gas_price)\n print(\"Press Enter to start filling\")\n input()\n time.sleep(1.5)\n print(\n final_message(type_gas,\n float(money), float(money) / float(gas_price)))\n elif pay_type.lower() == 'q':\n exit()\n else:\n print(\"Thanks for choosing after filling up.\\n\")\n gallons = input(\"How many gallons would you want? \\n\")\n money = float(gallons) * float(gas_price)\n print(\"Press Enter to start filling\")\n input()\n time.sleep(1.5)\n print(\n other_message(type_gas,\n float(gallons), float(gallons) * float(gas_price)))\n update_inventory(type_gas, gallons, money)\n\n\ndef main():\n gas_function()\n\n\nif __name__ == '__main__':\n main()","sub_path":"gas_shell.py","file_name":"gas_shell.py","file_ext":"py","file_size_in_byte":1781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"481781152","text":"def _gen_impl(ctx):\n args = ctx.actions.args()\n args.add(ctx.outputs.out)\n ctx.actions.run(\n outputs = [ctx.outputs.out],\n inputs = [],\n arguments = [args],\n executable = ctx.executable._generator,\n )\n\nnewgen = rule(\n implementation = _gen_impl,\n attrs = {\"_generator\": attr.label(default=Label(\"//tools:newgen\"),\n executable=True,\n cfg=\"host\"),\n },\n outputs = {\"out\": \"%{name}.txt\"},\n)\n","sub_path":"tools/newgen.bzl","file_name":"newgen.bzl","file_ext":"bzl","file_size_in_byte":521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"300897256","text":"import requests\nimport re\nfrom .vjudge import VJudge\nfrom .config import nyoj as oj\nfrom .config import user \nclass Judge(VJudge):\n def __init__(self):\n self.login_url = oj[\"login_url\"] \n self.submit_url = oj[\"submit_url\"] \n self.status_url = oj[\"status_url\"]\n self.session = requests.Session()\n self.login_post[\"userid\"] = user[self.uid][\"name\"]\n self.login_post[\"password\"] = user[self.uid][\"password\"]\n self.login_post[\"btn_submit\"] = \"登录\" \n def login(self):\n print(self.login_post)\n self.session.post(self.login_url,data=self.login_post,headers=self.headers)\n def submit(self,problemid,langid=\"43\",code=None):\n self.code = code\n self.langid = langid\n self.problemid = problemid\n self.submit_post[\"language\"] = langid\n self.submit_post[\"btn_submit\"] = \"提交\"\n self.submit_post[\"code\"] = self.code\n self.submit_url+=self.problemid\n self.login()\n print(self.submit_url)\n self.session.post(self.submit_url,data=self.submit_post,headers=self.headers)\n self.status=\"Yes\"\n","sub_path":"virtualjudge/nyoj.py","file_name":"nyoj.py","file_ext":"py","file_size_in_byte":1124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"610024980","text":"from sklearn.feature_extraction.text import TfidfVectorizer\n\nfrom tensorflow.keras.callbacks import LambdaCallback\nfrom tensorflow.keras.callbacks import EarlyStopping\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, LSTM, Embedding\nfrom tensorflow.keras.optimizers import Adamax\nfrom tensorflow.keras.optimizers import RMSprop\n\nimport numpy as np\nimport pandas as pd\nimport random\nimport requests\nimport sys\nimport re\n\n\n\n# Bring in the data. Right now I'm just doing train. Will add validate later.\nlines = \"\"\nwith open('train.txt', 'r', encoding='iso-8859-1') as dialogues:\n for line in dialogues:\n lines += line\n\n\n# Original words will be the y. They retain punctuation and capitalization.\norig_words = lines.replace(\"\\n\", \" \\n \").replace(\" -\", \"-\").split(\" \")\norig_words = list(filter(lambda a: a != '', orig_words))\n\n\n# New words will be the X. They don't have capitalization and punctuation.\nnew_words = []\nfor word in orig_words:\n w = re.sub(r'[^\\w\\s]','',word) #remove everything except words, space\n w = re.sub(r'\\_','',w) # And underscore\n new_words.append(w.lower())\n\n\n# Create Encoder/Decoder\ntext = orig_words + new_words\ntext.append('')\n\nwords = sorted(list(set(text)))\nword_int = dict((c, i) for i, c in enumerate(words))\nint_word = dict((i, c) for i, c in enumerate(words))\n\n\n# Create the X and y more formally.\nmaxlen = 11\nsentences = [] #X\npreds = [] #Y\n\n\n# This creates chunks that are 11 tokens long with the \n# middle token being the target word. \nfor i in range(len(orig_words)):\n if i == 0:\n x1 = [''] * 5\n x2 = [new_words[0]]\n x3 = new_words[1:6]\n sentences.append(x1 + x2 + x3)\n preds.append(orig_words[i])\n \n elif i < 5:\n x1 = [''] * (5-i)\n x2 = new_words[:i]\n x3 = new_words[i: i+6]\n sentences.append(x1 + x2 + x3)\n preds.append(orig_words[i])\n\n elif i == len(orig_words):\n x1 = new_words[i-5:]\n x2 = [''] * 5\n sentences.append(x1 + x2)\n preds.append(orig_words[i])\n\n elif i > len(orig_words) - 6:\n x1 = new_words[i-5:]\n x2 = [''] * (6 - (len(orig_words) - i))\n sentences.append(x1 + x2)\n preds.append(orig_words[i])\n \n else:\n sentences.append(new_words[i-5: i+6])\n preds.append(orig_words[i])\n\n\n# Make x and y computer readable.\nx = np.zeros((len(sentences), maxlen, len(words)), dtype=np.bool)\ny = np.zeros((len(sentences), len(words)), dtype=np.bool)\n\nfor i, sentence in enumerate(sentences):\n for t, word in enumerate(sentence):\n x[i, t, word_int[word]] = 1\n y[i, word_int[preds[i]]] = 1\n\n# Early Stopping Requirements\nstop = EarlyStopping(monitor='loss', min_delta=0.05, patience=2, mode='auto')\n\n# Build Model\nmodel = Sequential()\nmodel.add(LSTM(300, input_shape=(maxlen, len(words))))\nmodel.add(Dense(900, activation='relu'))\nmodel.add(Dense(len(words), activation='softmax'))\n\nmodel.compile(loss='categorical_crossentropy', optimizer='adamax')\n\n# Fit Model\nmodel.fit(x, y,\n batch_size=32,\n epochs=200, \n callbacks=[stop])\n\n# Save Model\nmodel.save(f'punctuation')\n\n\n\ndef gen_data(sentences):\n \"\"\"A function to generate predictions for the sixth word\n of every chunk in sentences and then returns them\n as a list of words.\"\"\"\n\n # With more time I would refine it to make it able to handle one\n # large string that's a whole document and then return the whole\n # document with capitalization and punctuation fixed.\n\n data = []\n for sentence in sentences:\n senten = sentence.split(\" \")\n x_pred = np.zeros((1, maxlen, len(words)))\n for t, word in enumerate(senten):\n x_pred[0, t, word_int[word]] = 1\n preds = model.predict(x_pred, verbose=0)[0]\n data.append(int_word[np.argmax(preds)])\n\n return data\n\n# This is just an example of a few chunks whose sixth words\n# make at least part of a sentence.\nsentences = ['thats the kind of guy she likes pretty ones \\n who', 'the kind of guy she likes pretty ones \\n who knows', 'kind of guy she likes pretty ones \\n who knows all', 'of guy she likes pretty ones \\n who knows all ive', 'guy she likes pretty ones \\n who knows all ive ever', 'she likes pretty ones \\n who knows all ive ever heard', 'likes pretty ones \\n who knows all ive ever heard her', 'pretty ones \\n who knows all ive ever heard her say', 'ones \\n who knows all ive ever heard her say is', '\\n who knows all ive ever heard her say is that']\nprint(gen_data(sentences))","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"572297016","text":"import curses\nimport signal\nimport sys\nfrom collections import OrderedDict\n\nclass CursesLog:\n def __init__(self):\n self._rows = OrderedDict()\n self._screen = curses.initscr()\n curses.def_shell_mode()\n curses.start_color()\n curses.use_default_colors()\n curses.init_pair(1, curses.COLOR_GREEN, -1)\n curses.init_pair(2, curses.COLOR_RED, -1)\n curses.cbreak()\n curses.noecho()\n\n def _remove_first(self):\n self._rows.pop(list(self._rows)[0])\n for k, v in self._rows.items():\n v[\"y\"] -= 1\n\n def add_str(self, marker, message, append=False, *args, **kwargs):\n if marker in self._rows:\n d = self._rows[marker]\n if not append:\n self._screen.addstr(d[\"y\"], d[\"x\"] , message, *args, **kwargs)\n else:\n self._screen.addstr(d[\"y\"], d[\"end\"], message, *args, **kwargs)\n else:\n if self._screen.getmaxyx()[0] <= len(self._rows):\n self._screen.move(0, 0)\n self._screen.deleteln()\n self._screen.refresh()\n self._remove_first()\n\n self._screen.addstr(len(self._rows), 0, message, *args, **kwargs)\n\n y, x = self._screen.getyx()\n self._rows[marker] = {\n \"y\": y,\n \"x\": x - len(message),\n \"end\": x\n }\n self._screen.refresh()\n\n @staticmethod\n def exit():\n curses.echo()\n curses.nocbreak()\n curses.reset_shell_mode()\n curses.endwin()\n\n\ndef signal_handler(signal, frame):\n CursesLog.exit()\n sys.exit(0)\n\nsignal.signal(signal.SIGINT, signal_handler)\n","sub_path":"scripts/curses_log.py","file_name":"curses_log.py","file_ext":"py","file_size_in_byte":1678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"327185383","text":"import random\nimport itertools\n\nfrom solariat_bottle.tests.base import UICase, setup_agent_schema\n\nfrom solariat_bottle.db.schema_based import (\n KEY_IS_ID, KEY_NAME, KEY_TYPE, KEY_EXPRESSION, TYPE_INTEGER,\n TYPE_STRING, TYPE_BOOLEAN, TYPE_LIST, TYPE_DICT)\n\nclass CustomerAgentBaseCase(UICase):\n\n def setUp(self):\n super(CustomerAgentBaseCase, self).setUp()\n self.login()\n\n schema = list()\n schema.append({\n KEY_NAME: 'first_name',\n KEY_TYPE: TYPE_STRING,\n # KEY_EXPRESSION: 'last name',\n })\n schema.append({\n KEY_NAME: 'last_name',\n KEY_TYPE: TYPE_STRING,\n # KEY_EXPRESSION: 'last name',\n })\n schema.append({\n KEY_NAME: 'skills',\n KEY_TYPE: TYPE_DICT,\n # KEY_EXPRESSION: 'skills',\n })\n schema.append({\n KEY_NAME: 'sex',\n KEY_TYPE: TYPE_STRING,\n # KEY_EXPRESSION: 'sex',\n })\n schema.append({\n KEY_NAME: 'skillset',\n KEY_TYPE: TYPE_LIST,\n # KEY_EXPRESSION: 'skillset',\n })\n schema.append({\n KEY_NAME: 'seniority',\n KEY_TYPE: TYPE_STRING,\n # KEY_EXPRESSION: 'seniority',\n })\n schema.append({\n KEY_NAME: 'products',\n KEY_TYPE: TYPE_LIST,\n # KEY_EXPRESSION: 'products',\n })\n schema.append({\n KEY_NAME: 'age',\n KEY_TYPE: TYPE_INTEGER,\n # KEY_EXPRESSION: 'age',\n })\n schema.append({\n KEY_NAME: 'occupancy',\n KEY_TYPE: TYPE_INTEGER,\n # KEY_EXPRESSION: 'occupancy',\n })\n schema.append({\n KEY_NAME: 'english_fluency',\n KEY_TYPE: TYPE_STRING,\n # KEY_EXPRESSION: 'english_fluency',\n })\n \n setup_agent_schema(self.user, extra_schema=schema)\n \n\n def choose_many(self, items, min_=0, max_=None):\n if max_ is None:\n max_ = len(items)\n n_samples = random.randint(min_, max_)\n return random.sample(items, n_samples)\n\n def groupby(self, resp, group_by):\n key = lambda d: d[group_by]\n _sorted = sorted(resp['list'], key=key)\n return itertools.groupby(_sorted, key=key)\n\n def count_distribution(self, resp):\n return {elm['label']: elm['data'][0][-1] for elm in resp['list']}\n\n def combinations(self, groups, max_for_each_combination=1):\n r = []\n for i in xrange(len(groups)+1):\n all_combinations = list(itertools.combinations(groups, i))\n n_samples = min(max_for_each_combination, len(all_combinations))\n r.extend(random.sample(all_combinations, n_samples))\n return r\n\n def categorize_age_groups(self, resp):\n for each in resp['list']:\n if each['age'] < 16:\n raise Exception(\"Age less than 16 should not be there\")\n elif each['age'] <= 25:\n each['age'] = (16, 25)\n elif each['age'] <= 35:\n each['age'] = (26, 35)\n elif each['age'] <= 45:\n each['age'] = (36, 45)\n else:\n each['age'] = (46, 100)\n\n def _create(self, **kw):\n raise NotImplemented\n\n def _fetch(self, **kw):\n raise NotImplemented\n","sub_path":"tests/journeys/customers_agents/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":3332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"439793249","text":"from django.conf.urls import url\nfrom . import views\n\napp_name = 'game_board'\n\nurlpatterns = [\n url(r'^$', views.index, name='index'),\n url(r'^new/$', views.new_room, name='new_room'),\n url(r'^(?P